blob: 86f9e82b015b6041fa570250d5b68a2c9290b72c [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Robin Murphy1b722502015-01-12 17:51:15 +000074/* IO virtual address start page frame number */
75#define IOVA_START_PFN (1)
76
Mark McLoughlinf27be032008-11-20 15:49:43 +000077#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070078#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070079#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080080
Andrew Mortondf08cdc2010-09-22 13:05:11 -070081/* page table handling */
82#define LEVEL_STRIDE (9)
83#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
84
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020085/*
86 * This bitmap is used to advertise the page sizes our hardware support
87 * to the IOMMU core, which will then use this information to split
88 * physically contiguous memory regions it is mapping into page sizes
89 * that we support.
90 *
91 * Traditionally the IOMMU core just handed us the mappings directly,
92 * after making sure the size is an order of a 4KiB page and that the
93 * mapping has natural alignment.
94 *
95 * To retain this behavior, we currently advertise that we support
96 * all page sizes that are an order of 4KiB.
97 *
98 * If at some point we'd like to utilize the IOMMU core's new behavior,
99 * we could change this to advertise the real page sizes we support.
100 */
101#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
102
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700103static inline int agaw_to_level(int agaw)
104{
105 return agaw + 2;
106}
107
108static inline int agaw_to_width(int agaw)
109{
Jiang Liu5c645b32014-01-06 14:18:12 +0800110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700111}
112
113static inline int width_to_agaw(int width)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline unsigned int level_to_offset_bits(int level)
119{
120 return (level - 1) * LEVEL_STRIDE;
121}
122
123static inline int pfn_level_offset(unsigned long pfn, int level)
124{
125 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126}
127
128static inline unsigned long level_mask(int level)
129{
130 return -1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long level_size(int level)
134{
135 return 1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long align_to_level(unsigned long pfn, int level)
139{
140 return (pfn + level_size(level) - 1) & level_mask(level);
141}
David Woodhousefd18de52009-05-10 23:57:41 +0100142
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144{
Jiang Liu5c645b32014-01-06 14:18:12 +0800145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100146}
147
David Woodhousedd4e8312009-06-27 16:21:20 +0100148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149 are never going to work. */
150static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151{
152 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154
155static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156{
157 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159static inline unsigned long page_to_dma_pfn(struct page *pg)
160{
161 return mm_to_dma_pfn(page_to_pfn(pg));
162}
163static inline unsigned long virt_to_dma_pfn(void *p)
164{
165 return page_to_dma_pfn(virt_to_page(p));
166}
167
Weidong Hand9630fe2008-12-08 11:06:32 +0800168/* global iommu list, set NULL for ignored DMAR units */
169static struct intel_iommu **g_iommus;
170
David Woodhousee0fc7e02009-09-30 09:12:17 -0700171static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000172static int rwbf_quirk;
173
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000174/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700175 * set to 1 to panic kernel if can't successfully enable VT-d
176 * (used when kernel is launched w/ TXT)
177 */
178static int force_on = 0;
179
180/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181 * 0: Present
182 * 1-11: Reserved
183 * 12-63: Context Ptr (12 - (haw-1))
184 * 64-127: Reserved
185 */
186struct root_entry {
187 u64 val;
188 u64 rsvd1;
189};
190#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191static inline bool root_present(struct root_entry *root)
192{
193 return (root->val & 1);
194}
195static inline void set_root_present(struct root_entry *root)
196{
197 root->val |= 1;
198}
199static inline void set_root_value(struct root_entry *root, unsigned long value)
200{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800201 root->val &= ~VTD_PAGE_MASK;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000202 root->val |= value & VTD_PAGE_MASK;
203}
204
205static inline struct context_entry *
206get_context_addr_from_root(struct root_entry *root)
207{
208 return (struct context_entry *)
209 (root_present(root)?phys_to_virt(
210 root->val & VTD_PAGE_MASK) :
211 NULL);
212}
213
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000214/*
215 * low 64 bits:
216 * 0: present
217 * 1: fault processing disable
218 * 2-3: translation type
219 * 12-63: address space root
220 * high 64 bits:
221 * 0-2: address width
222 * 3-6: aval
223 * 8-23: domain id
224 */
225struct context_entry {
226 u64 lo;
227 u64 hi;
228};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000229
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000230static inline bool context_present(struct context_entry *context)
231{
232 return (context->lo & 1);
233}
234static inline void context_set_present(struct context_entry *context)
235{
236 context->lo |= 1;
237}
238
239static inline void context_set_fault_enable(struct context_entry *context)
240{
241 context->lo &= (((u64)-1) << 2) | 1;
242}
243
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000244static inline void context_set_translation_type(struct context_entry *context,
245 unsigned long value)
246{
247 context->lo &= (((u64)-1) << 4) | 3;
248 context->lo |= (value & 3) << 2;
249}
250
251static inline void context_set_address_root(struct context_entry *context,
252 unsigned long value)
253{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800254 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000255 context->lo |= value & VTD_PAGE_MASK;
256}
257
258static inline void context_set_address_width(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= value & 7;
262}
263
264static inline void context_set_domain_id(struct context_entry *context,
265 unsigned long value)
266{
267 context->hi |= (value & ((1 << 16) - 1)) << 8;
268}
269
270static inline void context_clear_entry(struct context_entry *context)
271{
272 context->lo = 0;
273 context->hi = 0;
274}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000275
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276/*
277 * 0: readable
278 * 1: writable
279 * 2-6: reserved
280 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800281 * 8-10: available
282 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283 * 12-63: Host physcial address
284 */
285struct dma_pte {
286 u64 val;
287};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline void dma_clear_pte(struct dma_pte *pte)
290{
291 pte->val = 0;
292}
293
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000294static inline u64 dma_pte_addr(struct dma_pte *pte)
295{
David Woodhousec85994e2009-07-01 19:21:24 +0100296#ifdef CONFIG_64BIT
297 return pte->val & VTD_PAGE_MASK;
298#else
299 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100300 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100301#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302}
303
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000304static inline bool dma_pte_present(struct dma_pte *pte)
305{
306 return (pte->val & 3) != 0;
307}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000308
Allen Kay4399c8b2011-10-14 12:32:46 -0700309static inline bool dma_pte_superpage(struct dma_pte *pte)
310{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200311 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700312}
313
David Woodhouse75e6bf92009-07-02 11:21:16 +0100314static inline int first_pte_in_page(struct dma_pte *pte)
315{
316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
317}
318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700319/*
320 * This domain is a statically identity mapping domain.
321 * 1. This domain creats a static 1:1 mapping to all usable memory.
322 * 2. It maps to each iommu if successful.
323 * 3. Each iommu mapps to this domain if successful.
324 */
David Woodhouse19943b02009-08-04 16:19:20 +0100325static struct dmar_domain *si_domain;
326static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700327
Weidong Han1ce28fe2008-12-08 16:35:39 +0800328/* domain represents a virtual machine, more than one devices
329 * across iommus may be owned in one domain, e.g. kvm guest.
330 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800331#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800332
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700333/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800334#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700335
Mark McLoughlin99126f72008-11-20 15:49:47 +0000336struct dmar_domain {
337 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700338 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800339 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800340 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000341
342 struct list_head devices; /* all devices' list */
343 struct iova_domain iovad; /* iova's that belong to this domain */
344
345 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000346 int gaw; /* max guest address width */
347
348 /* adjusted guest address width, 0 is level 2 30-bit */
349 int agaw;
350
Weidong Han3b5410e2008-12-08 09:17:15 +0800351 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800352
353 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800354 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800355 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100356 int iommu_superpage;/* Level of superpages supported:
357 0 == 4KiB (no superpages), 1 == 2MiB,
358 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800359 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800360 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000361};
362
Mark McLoughlina647dac2008-11-20 15:49:48 +0000363/* PCI domain-device relationship */
364struct device_domain_info {
365 struct list_head link; /* link to domain siblings */
366 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100367 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000368 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000369 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800370 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371 struct dmar_domain *domain; /* pointer to domain */
372};
373
Jiang Liub94e4112014-02-19 14:07:25 +0800374struct dmar_rmrr_unit {
375 struct list_head list; /* list of rmrr units */
376 struct acpi_dmar_header *hdr; /* ACPI header */
377 u64 base_address; /* reserved base address*/
378 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000379 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800380 int devices_cnt; /* target device count */
381};
382
383struct dmar_atsr_unit {
384 struct list_head list; /* list of ATSR units */
385 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000386 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800387 int devices_cnt; /* target device count */
388 u8 include_all:1; /* include all ports */
389};
390
391static LIST_HEAD(dmar_atsr_units);
392static LIST_HEAD(dmar_rmrr_units);
393
394#define for_each_rmrr_units(rmrr) \
395 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
396
mark gross5e0d2a62008-03-04 15:22:08 -0800397static void flush_unmaps_timeout(unsigned long data);
398
Jiang Liub707cb02014-01-06 14:18:26 +0800399static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800400
mark gross80b20dd2008-04-18 13:53:58 -0700401#define HIGH_WATER_MARK 250
402struct deferred_flush_tables {
403 int next;
404 struct iova *iova[HIGH_WATER_MARK];
405 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000406 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700407};
408
409static struct deferred_flush_tables *deferred_flush;
410
mark gross5e0d2a62008-03-04 15:22:08 -0800411/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800412static int g_num_of_iommus;
413
414static DEFINE_SPINLOCK(async_umap_flush_lock);
415static LIST_HEAD(unmaps_to_do);
416
417static int timer_on;
418static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800419
Jiang Liu92d03cc2014-02-19 14:07:28 +0800420static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700421static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800422static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700423 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800424static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000425 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800426static int domain_detach_iommu(struct dmar_domain *domain,
427 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700428
Suresh Siddhad3f13812011-08-23 17:05:25 -0700429#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800430int dmar_disabled = 0;
431#else
432int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700433#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200435int intel_iommu_enabled = 0;
436EXPORT_SYMBOL_GPL(intel_iommu_enabled);
437
David Woodhouse2d9e6672010-06-15 10:57:57 +0100438static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700439static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800440static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100441static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442
David Woodhousec0771df2011-10-14 20:59:46 +0100443int intel_iommu_gfx_mapped;
444EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
445
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700446#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
447static DEFINE_SPINLOCK(device_domain_lock);
448static LIST_HEAD(device_domain_list);
449
Thierry Redingb22f6432014-06-27 09:03:12 +0200450static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100451
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452static int __init intel_iommu_setup(char *str)
453{
454 if (!str)
455 return -EINVAL;
456 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800457 if (!strncmp(str, "on", 2)) {
458 dmar_disabled = 0;
459 printk(KERN_INFO "Intel-IOMMU: enabled\n");
460 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800462 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 } else if (!strncmp(str, "igfx_off", 8)) {
464 dmar_map_gfx = 0;
465 printk(KERN_INFO
466 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700467 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800468 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 "Intel-IOMMU: Forcing DAC for PCI devices\n");
470 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800471 } else if (!strncmp(str, "strict", 6)) {
472 printk(KERN_INFO
473 "Intel-IOMMU: disable batched IOTLB flush\n");
474 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100475 } else if (!strncmp(str, "sp_off", 6)) {
476 printk(KERN_INFO
477 "Intel-IOMMU: disable supported super page\n");
478 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700479 }
480
481 str += strcspn(str, ",");
482 while (*str == ',')
483 str++;
484 }
485 return 0;
486}
487__setup("intel_iommu=", intel_iommu_setup);
488
489static struct kmem_cache *iommu_domain_cache;
490static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700491
Suresh Siddha4c923d42009-10-02 11:01:24 -0700492static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700493{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700494 struct page *page;
495 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498 if (page)
499 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501}
502
503static inline void free_pgtable_page(void *vaddr)
504{
505 free_page((unsigned long)vaddr);
506}
507
508static inline void *alloc_domain_mem(void)
509{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700511}
512
Kay, Allen M38717942008-09-09 18:37:29 +0300513static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514{
515 kmem_cache_free(iommu_domain_cache, vaddr);
516}
517
518static inline void * alloc_devinfo_mem(void)
519{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521}
522
523static inline void free_devinfo_mem(void *vaddr)
524{
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
526}
527
Jiang Liuab8dfe22014-07-11 14:19:27 +0800528static inline int domain_type_is_vm(struct dmar_domain *domain)
529{
530 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
531}
532
533static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
534{
535 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
536 DOMAIN_FLAG_STATIC_IDENTITY);
537}
Weidong Han1b573682008-12-08 15:34:06 +0800538
Jiang Liu162d1b12014-07-11 14:19:35 +0800539static inline int domain_pfn_supported(struct dmar_domain *domain,
540 unsigned long pfn)
541{
542 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
543
544 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
545}
546
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700547static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800548{
549 unsigned long sagaw;
550 int agaw = -1;
551
552 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700553 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800554 agaw >= 0; agaw--) {
555 if (test_bit(agaw, &sagaw))
556 break;
557 }
558
559 return agaw;
560}
561
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700562/*
563 * Calculate max SAGAW for each iommu.
564 */
565int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
566{
567 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
568}
569
570/*
571 * calculate agaw for each iommu.
572 * "SAGAW" may be different across iommus, use a default agaw, and
573 * get a supported less agaw for iommus that don't support the default agaw.
574 */
575int iommu_calculate_agaw(struct intel_iommu *iommu)
576{
577 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
578}
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800581static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
582{
583 int iommu_id;
584
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700585 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800586 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800587 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800588 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
589 return NULL;
590
591 return g_iommus[iommu_id];
592}
593
Weidong Han8e6040972008-12-08 15:49:06 +0800594static void domain_update_iommu_coherency(struct dmar_domain *domain)
595{
David Woodhoused0501962014-03-11 17:10:29 -0700596 struct dmar_drhd_unit *drhd;
597 struct intel_iommu *iommu;
598 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800599
David Woodhoused0501962014-03-11 17:10:29 -0700600 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800601
Mike Travis1b198bb2012-03-05 15:05:16 -0800602 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700603 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800604 if (!ecap_coherent(g_iommus[i]->ecap)) {
605 domain->iommu_coherency = 0;
606 break;
607 }
Weidong Han8e6040972008-12-08 15:49:06 +0800608 }
David Woodhoused0501962014-03-11 17:10:29 -0700609 if (found)
610 return;
611
612 /* No hardware attached; use lowest common denominator */
613 rcu_read_lock();
614 for_each_active_iommu(iommu, drhd) {
615 if (!ecap_coherent(iommu->ecap)) {
616 domain->iommu_coherency = 0;
617 break;
618 }
619 }
620 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800621}
622
Jiang Liu161f6932014-07-11 14:19:37 +0800623static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100624{
Allen Kay8140a952011-10-14 12:32:17 -0700625 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800626 struct intel_iommu *iommu;
627 int ret = 1;
628
629 rcu_read_lock();
630 for_each_active_iommu(iommu, drhd) {
631 if (iommu != skip) {
632 if (!ecap_sc_support(iommu->ecap)) {
633 ret = 0;
634 break;
635 }
636 }
637 }
638 rcu_read_unlock();
639
640 return ret;
641}
642
643static int domain_update_iommu_superpage(struct intel_iommu *skip)
644{
645 struct dmar_drhd_unit *drhd;
646 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700647 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100648
649 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800650 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100651 }
652
Allen Kay8140a952011-10-14 12:32:17 -0700653 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800654 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700655 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800656 if (iommu != skip) {
657 mask &= cap_super_page_val(iommu->cap);
658 if (!mask)
659 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100660 }
661 }
Jiang Liu0e242612014-02-19 14:07:34 +0800662 rcu_read_unlock();
663
Jiang Liu161f6932014-07-11 14:19:37 +0800664 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100665}
666
Sheng Yang58c610b2009-03-18 15:33:05 +0800667/* Some capabilities may be different across iommus */
668static void domain_update_iommu_cap(struct dmar_domain *domain)
669{
670 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800671 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
672 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800673}
674
David Woodhouse156baca2014-03-09 14:00:57 -0700675static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800676{
677 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800678 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700679 struct device *tmp;
680 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800681 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800682 int i;
683
David Woodhouse156baca2014-03-09 14:00:57 -0700684 if (dev_is_pci(dev)) {
685 pdev = to_pci_dev(dev);
686 segment = pci_domain_nr(pdev->bus);
687 } else if (ACPI_COMPANION(dev))
688 dev = &ACPI_COMPANION(dev)->dev;
689
Jiang Liu0e242612014-02-19 14:07:34 +0800690 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800691 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700692 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100693 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800694
Jiang Liub683b232014-02-19 14:07:32 +0800695 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700696 drhd->devices_cnt, i, tmp) {
697 if (tmp == dev) {
698 *bus = drhd->devices[i].bus;
699 *devfn = drhd->devices[i].devfn;
700 goto out;
701 }
702
703 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000704 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700705
706 ptmp = to_pci_dev(tmp);
707 if (ptmp->subordinate &&
708 ptmp->subordinate->number <= pdev->bus->number &&
709 ptmp->subordinate->busn_res.end >= pdev->bus->number)
710 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100711 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800712
David Woodhouse156baca2014-03-09 14:00:57 -0700713 if (pdev && drhd->include_all) {
714 got_pdev:
715 *bus = pdev->bus->number;
716 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800717 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700718 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800719 }
Jiang Liub683b232014-02-19 14:07:32 +0800720 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700721 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800722 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800723
Jiang Liub683b232014-02-19 14:07:32 +0800724 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800725}
726
Weidong Han5331fe62008-12-08 23:00:00 +0800727static void domain_flush_cache(struct dmar_domain *domain,
728 void *addr, int size)
729{
730 if (!domain->iommu_coherency)
731 clflush_cache_range(addr, size);
732}
733
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700734/* Gets context entry for a given bus and devfn */
735static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
736 u8 bus, u8 devfn)
737{
738 struct root_entry *root;
739 struct context_entry *context;
740 unsigned long phy_addr;
741 unsigned long flags;
742
743 spin_lock_irqsave(&iommu->lock, flags);
744 root = &iommu->root_entry[bus];
745 context = get_context_addr_from_root(root);
746 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700747 context = (struct context_entry *)
748 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749 if (!context) {
750 spin_unlock_irqrestore(&iommu->lock, flags);
751 return NULL;
752 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700753 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 phy_addr = virt_to_phys((void *)context);
755 set_root_value(root, phy_addr);
756 set_root_present(root);
757 __iommu_flush_cache(iommu, root, sizeof(*root));
758 }
759 spin_unlock_irqrestore(&iommu->lock, flags);
760 return &context[devfn];
761}
762
763static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
764{
765 struct root_entry *root;
766 struct context_entry *context;
767 int ret;
768 unsigned long flags;
769
770 spin_lock_irqsave(&iommu->lock, flags);
771 root = &iommu->root_entry[bus];
772 context = get_context_addr_from_root(root);
773 if (!context) {
774 ret = 0;
775 goto out;
776 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000777 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778out:
779 spin_unlock_irqrestore(&iommu->lock, flags);
780 return ret;
781}
782
783static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
784{
785 struct root_entry *root;
786 struct context_entry *context;
787 unsigned long flags;
788
789 spin_lock_irqsave(&iommu->lock, flags);
790 root = &iommu->root_entry[bus];
791 context = get_context_addr_from_root(root);
792 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000793 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794 __iommu_flush_cache(iommu, &context[devfn], \
795 sizeof(*context));
796 }
797 spin_unlock_irqrestore(&iommu->lock, flags);
798}
799
800static void free_context_table(struct intel_iommu *iommu)
801{
802 struct root_entry *root;
803 int i;
804 unsigned long flags;
805 struct context_entry *context;
806
807 spin_lock_irqsave(&iommu->lock, flags);
808 if (!iommu->root_entry) {
809 goto out;
810 }
811 for (i = 0; i < ROOT_ENTRY_NR; i++) {
812 root = &iommu->root_entry[i];
813 context = get_context_addr_from_root(root);
814 if (context)
815 free_pgtable_page(context);
816 }
817 free_pgtable_page(iommu->root_entry);
818 iommu->root_entry = NULL;
819out:
820 spin_unlock_irqrestore(&iommu->lock, flags);
821}
822
David Woodhouseb026fd22009-06-28 10:37:25 +0100823static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000824 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 struct dma_pte *parent, *pte = NULL;
827 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700828 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829
830 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200831
Jiang Liu162d1b12014-07-11 14:19:35 +0800832 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200833 /* Address beyond IOMMU's addressing capabilities. */
834 return NULL;
835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 parent = domain->pgd;
837
David Woodhouse5cf0a762014-03-19 16:07:49 +0000838 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 void *tmp_page;
840
David Woodhouseb026fd22009-06-28 10:37:25 +0100841 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000843 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000845 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 break;
847
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000848 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100849 uint64_t pteval;
850
Suresh Siddha4c923d42009-10-02 11:01:24 -0700851 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852
David Woodhouse206a73c2009-07-01 19:30:28 +0100853 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100855
David Woodhousec85994e2009-07-01 19:21:24 +0100856 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400857 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800858 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100859 /* Someone else set it while we were thinking; use theirs. */
860 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800861 else
David Woodhousec85994e2009-07-01 19:21:24 +0100862 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000864 if (level == 1)
865 break;
866
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000867 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 level--;
869 }
870
David Woodhouse5cf0a762014-03-19 16:07:49 +0000871 if (!*target_level)
872 *target_level = level;
873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 return pte;
875}
876
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100879static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
880 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100881 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882{
883 struct dma_pte *parent, *pte = NULL;
884 int total = agaw_to_level(domain->agaw);
885 int offset;
886
887 parent = domain->pgd;
888 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100889 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890 pte = &parent[offset];
891 if (level == total)
892 return pte;
893
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 if (!dma_pte_present(pte)) {
895 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 }
898
Yijing Wange16922a2014-05-20 20:37:51 +0800899 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100900 *large_page = total;
901 return pte;
902 }
903
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000904 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 total--;
906 }
907 return NULL;
908}
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000911static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100912 unsigned long start_pfn,
913 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100915 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100916 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917
Jiang Liu162d1b12014-07-11 14:19:35 +0800918 BUG_ON(!domain_pfn_supported(domain, start_pfn));
919 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700920 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100921
David Woodhouse04b18e62009-06-27 19:15:01 +0100922 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700923 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 large_page = 1;
925 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100926 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 continue;
929 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100931 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100932 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100934 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
935
David Woodhouse310a5ab2009-06-28 18:52:20 +0100936 domain_flush_cache(domain, first_pte,
937 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700938
939 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940}
941
Alex Williamson3269ee02013-06-15 10:27:19 -0600942static void dma_pte_free_level(struct dmar_domain *domain, int level,
943 struct dma_pte *pte, unsigned long pfn,
944 unsigned long start_pfn, unsigned long last_pfn)
945{
946 pfn = max(start_pfn, pfn);
947 pte = &pte[pfn_level_offset(pfn, level)];
948
949 do {
950 unsigned long level_pfn;
951 struct dma_pte *level_pte;
952
953 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
954 goto next;
955
956 level_pfn = pfn & level_mask(level - 1);
957 level_pte = phys_to_virt(dma_pte_addr(pte));
958
959 if (level > 2)
960 dma_pte_free_level(domain, level - 1, level_pte,
961 level_pfn, start_pfn, last_pfn);
962
963 /* If range covers entire pagetable, free it */
964 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800965 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600966 dma_clear_pte(pte);
967 domain_flush_cache(domain, pte, sizeof(*pte));
968 free_pgtable_page(level_pte);
969 }
970next:
971 pfn += level_size(level);
972 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
973}
974
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975/* free page table pages. last level pte should already be cleared */
976static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100977 unsigned long start_pfn,
978 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979{
Jiang Liu162d1b12014-07-11 14:19:35 +0800980 BUG_ON(!domain_pfn_supported(domain, start_pfn));
981 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700982 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
Jiang Liud41a4ad2014-07-11 14:19:34 +0800984 dma_pte_clear_range(domain, start_pfn, last_pfn);
985
David Woodhousef3a0a522009-06-30 03:40:07 +0100986 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600987 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
988 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100989
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100991 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 free_pgtable_page(domain->pgd);
993 domain->pgd = NULL;
994 }
995}
996
David Woodhouseea8ea462014-03-05 17:09:32 +0000997/* When a page at a given level is being unlinked from its parent, we don't
998 need to *modify* it at all. All we need to do is make a list of all the
999 pages which can be freed just as soon as we've flushed the IOTLB and we
1000 know the hardware page-walk will no longer touch them.
1001 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1002 be freed. */
1003static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1004 int level, struct dma_pte *pte,
1005 struct page *freelist)
1006{
1007 struct page *pg;
1008
1009 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1010 pg->freelist = freelist;
1011 freelist = pg;
1012
1013 if (level == 1)
1014 return freelist;
1015
Jiang Liuadeb25902014-04-09 10:20:39 +08001016 pte = page_address(pg);
1017 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001018 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1019 freelist = dma_pte_list_pagetables(domain, level - 1,
1020 pte, freelist);
Jiang Liuadeb25902014-04-09 10:20:39 +08001021 pte++;
1022 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001023
1024 return freelist;
1025}
1026
1027static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1028 struct dma_pte *pte, unsigned long pfn,
1029 unsigned long start_pfn,
1030 unsigned long last_pfn,
1031 struct page *freelist)
1032{
1033 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1034
1035 pfn = max(start_pfn, pfn);
1036 pte = &pte[pfn_level_offset(pfn, level)];
1037
1038 do {
1039 unsigned long level_pfn;
1040
1041 if (!dma_pte_present(pte))
1042 goto next;
1043
1044 level_pfn = pfn & level_mask(level);
1045
1046 /* If range covers entire pagetable, free it */
1047 if (start_pfn <= level_pfn &&
1048 last_pfn >= level_pfn + level_size(level) - 1) {
1049 /* These suborbinate page tables are going away entirely. Don't
1050 bother to clear them; we're just going to *free* them. */
1051 if (level > 1 && !dma_pte_superpage(pte))
1052 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1053
1054 dma_clear_pte(pte);
1055 if (!first_pte)
1056 first_pte = pte;
1057 last_pte = pte;
1058 } else if (level > 1) {
1059 /* Recurse down into a level that isn't *entirely* obsolete */
1060 freelist = dma_pte_clear_level(domain, level - 1,
1061 phys_to_virt(dma_pte_addr(pte)),
1062 level_pfn, start_pfn, last_pfn,
1063 freelist);
1064 }
1065next:
1066 pfn += level_size(level);
1067 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1068
1069 if (first_pte)
1070 domain_flush_cache(domain, first_pte,
1071 (void *)++last_pte - (void *)first_pte);
1072
1073 return freelist;
1074}
1075
1076/* We can't just free the pages because the IOMMU may still be walking
1077 the page tables, and may have cached the intermediate levels. The
1078 pages can only be freed after the IOTLB flush has been done. */
1079struct page *domain_unmap(struct dmar_domain *domain,
1080 unsigned long start_pfn,
1081 unsigned long last_pfn)
1082{
David Woodhouseea8ea462014-03-05 17:09:32 +00001083 struct page *freelist = NULL;
1084
Jiang Liu162d1b12014-07-11 14:19:35 +08001085 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1086 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001087 BUG_ON(start_pfn > last_pfn);
1088
1089 /* we don't need lock here; nobody else touches the iova range */
1090 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1091 domain->pgd, 0, start_pfn, last_pfn, NULL);
1092
1093 /* free pgd */
1094 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1095 struct page *pgd_page = virt_to_page(domain->pgd);
1096 pgd_page->freelist = freelist;
1097 freelist = pgd_page;
1098
1099 domain->pgd = NULL;
1100 }
1101
1102 return freelist;
1103}
1104
1105void dma_free_pagelist(struct page *freelist)
1106{
1107 struct page *pg;
1108
1109 while ((pg = freelist)) {
1110 freelist = pg->freelist;
1111 free_pgtable_page(page_address(pg));
1112 }
1113}
1114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001115/* iommu handling */
1116static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1117{
1118 struct root_entry *root;
1119 unsigned long flags;
1120
Suresh Siddha4c923d42009-10-02 11:01:24 -07001121 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001122 if (!root) {
1123 pr_err("IOMMU: allocating root entry for %s failed\n",
1124 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001125 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001126 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001128 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129
1130 spin_lock_irqsave(&iommu->lock, flags);
1131 iommu->root_entry = root;
1132 spin_unlock_irqrestore(&iommu->lock, flags);
1133
1134 return 0;
1135}
1136
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137static void iommu_set_root_entry(struct intel_iommu *iommu)
1138{
1139 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001140 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 unsigned long flag;
1142
1143 addr = iommu->root_entry;
1144
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001145 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1147
David Woodhousec416daa2009-05-10 20:30:58 +01001148 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149
1150 /* Make sure hardware complete it */
1151 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001152 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001153
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155}
1156
1157static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1158{
1159 u32 val;
1160 unsigned long flag;
1161
David Woodhouse9af88142009-02-13 23:18:03 +00001162 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001165 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001166 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001167
1168 /* Make sure hardware complete it */
1169 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001170 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001172 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173}
1174
1175/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001176static void __iommu_flush_context(struct intel_iommu *iommu,
1177 u16 did, u16 source_id, u8 function_mask,
1178 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179{
1180 u64 val = 0;
1181 unsigned long flag;
1182
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183 switch (type) {
1184 case DMA_CCMD_GLOBAL_INVL:
1185 val = DMA_CCMD_GLOBAL_INVL;
1186 break;
1187 case DMA_CCMD_DOMAIN_INVL:
1188 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1189 break;
1190 case DMA_CCMD_DEVICE_INVL:
1191 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1192 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1193 break;
1194 default:
1195 BUG();
1196 }
1197 val |= DMA_CCMD_ICC;
1198
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001199 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001200 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1201
1202 /* Make sure hardware complete it */
1203 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1204 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207}
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001210static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1211 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212{
1213 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1214 u64 val = 0, val_iva = 0;
1215 unsigned long flag;
1216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217 switch (type) {
1218 case DMA_TLB_GLOBAL_FLUSH:
1219 /* global flush doesn't need set IVA_REG */
1220 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1221 break;
1222 case DMA_TLB_DSI_FLUSH:
1223 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1224 break;
1225 case DMA_TLB_PSI_FLUSH:
1226 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001227 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 val_iva = size_order | addr;
1229 break;
1230 default:
1231 BUG();
1232 }
1233 /* Note: set drain read/write */
1234#if 0
1235 /*
1236 * This is probably to be super secure.. Looks like we can
1237 * ignore it without any impact.
1238 */
1239 if (cap_read_drain(iommu->cap))
1240 val |= DMA_TLB_READ_DRAIN;
1241#endif
1242 if (cap_write_drain(iommu->cap))
1243 val |= DMA_TLB_WRITE_DRAIN;
1244
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001245 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 /* Note: Only uses first TLB reg currently */
1247 if (val_iva)
1248 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1249 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1250
1251 /* Make sure hardware complete it */
1252 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1253 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1254
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256
1257 /* check IOTLB invalidation granularity */
1258 if (DMA_TLB_IAIG(val) == 0)
1259 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1260 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1261 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001262 (unsigned long long)DMA_TLB_IIRG(type),
1263 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264}
1265
David Woodhouse64ae8922014-03-09 12:52:30 -07001266static struct device_domain_info *
1267iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1268 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001269{
Yu Zhao93a23a72009-05-18 13:51:37 +08001270 int found = 0;
1271 unsigned long flags;
1272 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001273 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001274
1275 if (!ecap_dev_iotlb_support(iommu->ecap))
1276 return NULL;
1277
1278 if (!iommu->qi)
1279 return NULL;
1280
1281 spin_lock_irqsave(&device_domain_lock, flags);
1282 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001283 if (info->iommu == iommu && info->bus == bus &&
1284 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001285 found = 1;
1286 break;
1287 }
1288 spin_unlock_irqrestore(&device_domain_lock, flags);
1289
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001290 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001291 return NULL;
1292
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001293 pdev = to_pci_dev(info->dev);
1294
1295 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001296 return NULL;
1297
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001298 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001299 return NULL;
1300
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return info;
1302}
1303
1304static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1305{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001306 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001307 return;
1308
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001309 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001310}
1311
1312static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1313{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001314 if (!info->dev || !dev_is_pci(info->dev) ||
1315 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001316 return;
1317
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001318 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001319}
1320
1321static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1322 u64 addr, unsigned mask)
1323{
1324 u16 sid, qdep;
1325 unsigned long flags;
1326 struct device_domain_info *info;
1327
1328 spin_lock_irqsave(&device_domain_lock, flags);
1329 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001330 struct pci_dev *pdev;
1331 if (!info->dev || !dev_is_pci(info->dev))
1332 continue;
1333
1334 pdev = to_pci_dev(info->dev);
1335 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001336 continue;
1337
1338 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001339 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001340 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1341 }
1342 spin_unlock_irqrestore(&device_domain_lock, flags);
1343}
1344
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001345static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001346 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001348 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001349 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351 BUG_ON(pages == 0);
1352
David Woodhouseea8ea462014-03-05 17:09:32 +00001353 if (ih)
1354 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001356 * Fallback to domain selective flush if no PSI support or the size is
1357 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358 * PSI requires page size to be 2 ^ x, and the base address is naturally
1359 * aligned to the size
1360 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001361 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1362 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001363 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001364 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001365 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001366 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001367
1368 /*
Nadav Amit82653632010-04-01 13:24:40 +03001369 * In caching mode, changes of pages from non-present to present require
1370 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001371 */
Nadav Amit82653632010-04-01 13:24:40 +03001372 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001373 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374}
1375
mark grossf8bab732008-02-08 04:18:38 -08001376static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1377{
1378 u32 pmen;
1379 unsigned long flags;
1380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001381 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001382 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1383 pmen &= ~DMA_PMEN_EPM;
1384 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1385
1386 /* wait for the protected region status bit to clear */
1387 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1388 readl, !(pmen & DMA_PMEN_PRS), pmen);
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001391}
1392
Jiang Liu2a41cce2014-07-11 14:19:33 +08001393static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 u32 sts;
1396 unsigned long flags;
1397
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001398 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001399 iommu->gcmd |= DMA_GCMD_TE;
1400 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401
1402 /* Make sure hardware complete it */
1403 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001404 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001406 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407}
1408
Jiang Liu2a41cce2014-07-11 14:19:33 +08001409static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410{
1411 u32 sts;
1412 unsigned long flag;
1413
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001414 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415 iommu->gcmd &= ~DMA_GCMD_TE;
1416 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1417
1418 /* Make sure hardware complete it */
1419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001420 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001422 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423}
1424
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426static int iommu_init_domains(struct intel_iommu *iommu)
1427{
1428 unsigned long ndomains;
1429 unsigned long nlongs;
1430
1431 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001432 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1433 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 nlongs = BITS_TO_LONGS(ndomains);
1435
Donald Dutile94a91b52009-08-20 16:51:34 -04001436 spin_lock_init(&iommu->lock);
1437
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 /* TBD: there might be 64K domains,
1439 * consider other allocation for future chip
1440 */
1441 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1442 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001443 pr_err("IOMMU%d: allocating domain id array failed\n",
1444 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 return -ENOMEM;
1446 }
1447 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1448 GFP_KERNEL);
1449 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001450 pr_err("IOMMU%d: allocating domain array failed\n",
1451 iommu->seq_id);
1452 kfree(iommu->domain_ids);
1453 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 return -ENOMEM;
1455 }
1456
1457 /*
1458 * if Caching mode is set, then invalid translations are tagged
1459 * with domainid 0. Hence we need to pre-allocate it.
1460 */
1461 if (cap_caching_mode(iommu->cap))
1462 set_bit(0, iommu->domain_ids);
1463 return 0;
1464}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465
Jiang Liuffebeb42014-11-09 22:48:02 +08001466static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467{
1468 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001469 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470
Donald Dutile94a91b52009-08-20 16:51:34 -04001471 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001472 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001473 /*
1474 * Domain id 0 is reserved for invalid translation
1475 * if hardware supports caching mode.
1476 */
1477 if (cap_caching_mode(iommu->cap) && i == 0)
1478 continue;
1479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 domain = iommu->domains[i];
1481 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001482 if (domain_detach_iommu(domain, iommu) == 0 &&
1483 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001484 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001485 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 }
1487
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001490}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491
Jiang Liuffebeb42014-11-09 22:48:02 +08001492static void free_dmar_iommu(struct intel_iommu *iommu)
1493{
1494 if ((iommu->domains) && (iommu->domain_ids)) {
1495 kfree(iommu->domains);
1496 kfree(iommu->domain_ids);
1497 iommu->domains = NULL;
1498 iommu->domain_ids = NULL;
1499 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
Weidong Hand9630fe2008-12-08 11:06:32 +08001501 g_iommus[iommu->seq_id] = NULL;
1502
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 /* free context mapping */
1504 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505}
1506
Jiang Liuab8dfe22014-07-11 14:19:27 +08001507static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001509 /* domain id for virtual machine, it won't be set in context */
1510 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512
1513 domain = alloc_domain_mem();
1514 if (!domain)
1515 return NULL;
1516
Jiang Liuab8dfe22014-07-11 14:19:27 +08001517 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001518 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001519 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001520 spin_lock_init(&domain->iommu_lock);
1521 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001522 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001523 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524
1525 return domain;
1526}
1527
Jiang Liufb170fb2014-07-11 14:19:28 +08001528static int __iommu_attach_domain(struct dmar_domain *domain,
1529 struct intel_iommu *iommu)
1530{
1531 int num;
1532 unsigned long ndomains;
1533
1534 ndomains = cap_ndoms(iommu->cap);
1535 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1536 if (num < ndomains) {
1537 set_bit(num, iommu->domain_ids);
1538 iommu->domains[num] = domain;
1539 } else {
1540 num = -ENOSPC;
1541 }
1542
1543 return num;
1544}
1545
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546static int iommu_attach_domain(struct dmar_domain *domain,
1547 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550 unsigned long flags;
1551
Weidong Han8c11e792008-12-08 15:29:22 +08001552 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001553 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001554 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001555 if (num < 0)
1556 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001557
Jiang Liufb170fb2014-07-11 14:19:28 +08001558 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559}
1560
Jiang Liu44bde612014-07-11 14:19:29 +08001561static int iommu_attach_vm_domain(struct dmar_domain *domain,
1562 struct intel_iommu *iommu)
1563{
1564 int num;
1565 unsigned long ndomains;
1566
1567 ndomains = cap_ndoms(iommu->cap);
1568 for_each_set_bit(num, iommu->domain_ids, ndomains)
1569 if (iommu->domains[num] == domain)
1570 return num;
1571
1572 return __iommu_attach_domain(domain, iommu);
1573}
1574
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001575static void iommu_detach_domain(struct dmar_domain *domain,
1576 struct intel_iommu *iommu)
1577{
1578 unsigned long flags;
1579 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001580
1581 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001582 if (domain_type_is_vm_or_si(domain)) {
1583 ndomains = cap_ndoms(iommu->cap);
1584 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1585 if (iommu->domains[num] == domain) {
1586 clear_bit(num, iommu->domain_ids);
1587 iommu->domains[num] = NULL;
1588 break;
1589 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001590 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001591 } else {
1592 clear_bit(domain->id, iommu->domain_ids);
1593 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001594 }
Weidong Han8c11e792008-12-08 15:29:22 +08001595 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596}
1597
Jiang Liufb170fb2014-07-11 14:19:28 +08001598static void domain_attach_iommu(struct dmar_domain *domain,
1599 struct intel_iommu *iommu)
1600{
1601 unsigned long flags;
1602
1603 spin_lock_irqsave(&domain->iommu_lock, flags);
1604 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1605 domain->iommu_count++;
1606 if (domain->iommu_count == 1)
1607 domain->nid = iommu->node;
1608 domain_update_iommu_cap(domain);
1609 }
1610 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1611}
1612
1613static int domain_detach_iommu(struct dmar_domain *domain,
1614 struct intel_iommu *iommu)
1615{
1616 unsigned long flags;
1617 int count = INT_MAX;
1618
1619 spin_lock_irqsave(&domain->iommu_lock, flags);
1620 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1621 count = --domain->iommu_count;
1622 domain_update_iommu_cap(domain);
1623 }
1624 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1625
1626 return count;
1627}
1628
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001629static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001630static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631
Joseph Cihula51a63e62011-03-21 11:04:24 -07001632static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633{
1634 struct pci_dev *pdev = NULL;
1635 struct iova *iova;
1636 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637
Robin Murphy1b722502015-01-12 17:51:15 +00001638 init_iova_domain(&reserved_iova_list, IOVA_START_PFN, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639
Mark Gross8a443df2008-03-04 14:59:31 -08001640 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1641 &reserved_rbtree_key);
1642
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643 /* IOAPIC ranges shouldn't be accessed by DMA */
1644 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1645 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001646 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001648 return -ENODEV;
1649 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650
1651 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1652 for_each_pci_dev(pdev) {
1653 struct resource *r;
1654
1655 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1656 r = &pdev->resource[i];
1657 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1658 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001659 iova = reserve_iova(&reserved_iova_list,
1660 IOVA_PFN(r->start),
1661 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001662 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001663 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001664 return -ENODEV;
1665 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666 }
1667 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001668 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001669}
1670
1671static void domain_reserve_special_ranges(struct dmar_domain *domain)
1672{
1673 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1674}
1675
1676static inline int guestwidth_to_adjustwidth(int gaw)
1677{
1678 int agaw;
1679 int r = (gaw - 12) % 9;
1680
1681 if (r == 0)
1682 agaw = gaw;
1683 else
1684 agaw = gaw + 9 - r;
1685 if (agaw > 64)
1686 agaw = 64;
1687 return agaw;
1688}
1689
1690static int domain_init(struct dmar_domain *domain, int guest_width)
1691{
1692 struct intel_iommu *iommu;
1693 int adjust_width, agaw;
1694 unsigned long sagaw;
1695
Robin Murphy1b722502015-01-12 17:51:15 +00001696 init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697 domain_reserve_special_ranges(domain);
1698
1699 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001700 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 if (guest_width > cap_mgaw(iommu->cap))
1702 guest_width = cap_mgaw(iommu->cap);
1703 domain->gaw = guest_width;
1704 adjust_width = guestwidth_to_adjustwidth(guest_width);
1705 agaw = width_to_agaw(adjust_width);
1706 sagaw = cap_sagaw(iommu->cap);
1707 if (!test_bit(agaw, &sagaw)) {
1708 /* hardware doesn't support it, choose a bigger one */
1709 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1710 agaw = find_next_bit(&sagaw, 5, agaw);
1711 if (agaw >= 5)
1712 return -ENODEV;
1713 }
1714 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715
Weidong Han8e6040972008-12-08 15:49:06 +08001716 if (ecap_coherent(iommu->ecap))
1717 domain->iommu_coherency = 1;
1718 else
1719 domain->iommu_coherency = 0;
1720
Sheng Yang58c610b2009-03-18 15:33:05 +08001721 if (ecap_sc_support(iommu->ecap))
1722 domain->iommu_snooping = 1;
1723 else
1724 domain->iommu_snooping = 0;
1725
David Woodhouse214e39a2014-03-19 10:38:49 +00001726 if (intel_iommu_superpage)
1727 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1728 else
1729 domain->iommu_superpage = 0;
1730
Suresh Siddha4c923d42009-10-02 11:01:24 -07001731 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001732
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001733 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001734 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735 if (!domain->pgd)
1736 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001737 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001738 return 0;
1739}
1740
1741static void domain_exit(struct dmar_domain *domain)
1742{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001743 struct dmar_drhd_unit *drhd;
1744 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001745 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746
1747 /* Domain 0 is reserved, so dont process it */
1748 if (!domain)
1749 return;
1750
Alex Williamson7b668352011-05-24 12:02:41 +01001751 /* Flush any lazy unmaps that may reference this domain */
1752 if (!intel_iommu_strict)
1753 flush_unmaps_timeout(0);
1754
Jiang Liu92d03cc2014-02-19 14:07:28 +08001755 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001756 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001757
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758 /* destroy iovas */
1759 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760
David Woodhouseea8ea462014-03-05 17:09:32 +00001761 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762
Jiang Liu92d03cc2014-02-19 14:07:28 +08001763 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001764 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001765 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001766 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001767 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001768
David Woodhouseea8ea462014-03-05 17:09:32 +00001769 dma_free_pagelist(freelist);
1770
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771 free_domain_mem(domain);
1772}
1773
David Woodhouse64ae8922014-03-09 12:52:30 -07001774static int domain_context_mapping_one(struct dmar_domain *domain,
1775 struct intel_iommu *iommu,
1776 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001777{
1778 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001779 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001780 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001781 int id;
1782 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001783 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784
1785 pr_debug("Set context mapping for %02x:%02x.%d\n",
1786 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001787
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001788 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001789 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1790 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001791
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792 context = device_to_context_entry(iommu, bus, devfn);
1793 if (!context)
1794 return -ENOMEM;
1795 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001796 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 spin_unlock_irqrestore(&iommu->lock, flags);
1798 return 0;
1799 }
1800
Weidong Hanea6606b2008-12-08 23:08:15 +08001801 id = domain->id;
1802 pgd = domain->pgd;
1803
Jiang Liuab8dfe22014-07-11 14:19:27 +08001804 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001805 if (domain_type_is_vm(domain)) {
1806 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001807 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001808 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001809 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001810 return -EFAULT;
1811 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001812 }
1813
1814 /* Skip top levels of page tables for
1815 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001816 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001817 */
Chris Wright1672af12009-12-02 12:06:34 -08001818 if (translation != CONTEXT_TT_PASS_THROUGH) {
1819 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1820 pgd = phys_to_virt(dma_pte_addr(pgd));
1821 if (!dma_pte_present(pgd)) {
1822 spin_unlock_irqrestore(&iommu->lock, flags);
1823 return -ENOMEM;
1824 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001825 }
1826 }
1827 }
1828
1829 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001830
Yu Zhao93a23a72009-05-18 13:51:37 +08001831 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001832 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001833 translation = info ? CONTEXT_TT_DEV_IOTLB :
1834 CONTEXT_TT_MULTI_LEVEL;
1835 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001836 /*
1837 * In pass through mode, AW must be programmed to indicate the largest
1838 * AGAW value supported by hardware. And ASR is ignored by hardware.
1839 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001840 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001841 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001842 else {
1843 context_set_address_root(context, virt_to_phys(pgd));
1844 context_set_address_width(context, iommu->agaw);
1845 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001846
1847 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001848 context_set_fault_enable(context);
1849 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001850 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001852 /*
1853 * It's a non-present to present mapping. If hardware doesn't cache
1854 * non-present entry we only need to flush the write-buffer. If the
1855 * _does_ cache non-present entries, then it does so in the special
1856 * domain #0, which we have to flush:
1857 */
1858 if (cap_caching_mode(iommu->cap)) {
1859 iommu->flush.flush_context(iommu, 0,
1860 (((u16)bus) << 8) | devfn,
1861 DMA_CCMD_MASK_NOBIT,
1862 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001863 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001864 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001866 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001867 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001868 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001869
Jiang Liufb170fb2014-07-11 14:19:28 +08001870 domain_attach_iommu(domain, iommu);
1871
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001872 return 0;
1873}
1874
Alex Williamson579305f2014-07-03 09:51:43 -06001875struct domain_context_mapping_data {
1876 struct dmar_domain *domain;
1877 struct intel_iommu *iommu;
1878 int translation;
1879};
1880
1881static int domain_context_mapping_cb(struct pci_dev *pdev,
1882 u16 alias, void *opaque)
1883{
1884 struct domain_context_mapping_data *data = opaque;
1885
1886 return domain_context_mapping_one(data->domain, data->iommu,
1887 PCI_BUS_NUM(alias), alias & 0xff,
1888 data->translation);
1889}
1890
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001891static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001892domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1893 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001894{
David Woodhouse64ae8922014-03-09 12:52:30 -07001895 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001896 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001897 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001898
David Woodhousee1f167f2014-03-09 15:24:46 -07001899 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001900 if (!iommu)
1901 return -ENODEV;
1902
Alex Williamson579305f2014-07-03 09:51:43 -06001903 if (!dev_is_pci(dev))
1904 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001905 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001906
1907 data.domain = domain;
1908 data.iommu = iommu;
1909 data.translation = translation;
1910
1911 return pci_for_each_dma_alias(to_pci_dev(dev),
1912 &domain_context_mapping_cb, &data);
1913}
1914
1915static int domain_context_mapped_cb(struct pci_dev *pdev,
1916 u16 alias, void *opaque)
1917{
1918 struct intel_iommu *iommu = opaque;
1919
1920 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921}
1922
David Woodhousee1f167f2014-03-09 15:24:46 -07001923static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924{
Weidong Han5331fe62008-12-08 23:00:00 +08001925 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001926 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001927
David Woodhousee1f167f2014-03-09 15:24:46 -07001928 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001929 if (!iommu)
1930 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931
Alex Williamson579305f2014-07-03 09:51:43 -06001932 if (!dev_is_pci(dev))
1933 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001934
Alex Williamson579305f2014-07-03 09:51:43 -06001935 return !pci_for_each_dma_alias(to_pci_dev(dev),
1936 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937}
1938
Fenghua Yuf5329592009-08-04 15:09:37 -07001939/* Returns a number of VTD pages, but aligned to MM page size */
1940static inline unsigned long aligned_nrpages(unsigned long host_addr,
1941 size_t size)
1942{
1943 host_addr &= ~PAGE_MASK;
1944 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1945}
1946
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001947/* Return largest possible superpage level for a given mapping */
1948static inline int hardware_largepage_caps(struct dmar_domain *domain,
1949 unsigned long iov_pfn,
1950 unsigned long phy_pfn,
1951 unsigned long pages)
1952{
1953 int support, level = 1;
1954 unsigned long pfnmerge;
1955
1956 support = domain->iommu_superpage;
1957
1958 /* To use a large page, the virtual *and* physical addresses
1959 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1960 of them will mean we have to use smaller pages. So just
1961 merge them and check both at once. */
1962 pfnmerge = iov_pfn | phy_pfn;
1963
1964 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1965 pages >>= VTD_STRIDE_SHIFT;
1966 if (!pages)
1967 break;
1968 pfnmerge >>= VTD_STRIDE_SHIFT;
1969 level++;
1970 support--;
1971 }
1972 return level;
1973}
1974
David Woodhouse9051aa02009-06-29 12:30:54 +01001975static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1976 struct scatterlist *sg, unsigned long phys_pfn,
1977 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001978{
1979 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001980 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001981 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001982 unsigned int largepage_lvl = 0;
1983 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001984
Jiang Liu162d1b12014-07-11 14:19:35 +08001985 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001986
1987 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1988 return -EINVAL;
1989
1990 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1991
Jiang Liucc4f14a2014-11-26 09:42:10 +08001992 if (!sg) {
1993 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01001994 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1995 }
1996
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001997 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001998 uint64_t tmp;
1999
David Woodhousee1605492009-06-29 11:17:38 +01002000 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002001 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002002 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2003 sg->dma_length = sg->length;
2004 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002005 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002006 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002007
David Woodhousee1605492009-06-29 11:17:38 +01002008 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002009 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2010
David Woodhouse5cf0a762014-03-19 16:07:49 +00002011 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002012 if (!pte)
2013 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002015 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002017 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2018 /*
2019 * Ensure that old small page tables are
2020 * removed to make room for superpage,
2021 * if they exist.
2022 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002023 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002024 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002025 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002026 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002027 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002028
David Woodhousee1605492009-06-29 11:17:38 +01002029 }
2030 /* We don't need lock here, nobody else
2031 * touches the iova range
2032 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002033 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002034 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002035 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002036 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2037 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002038 if (dumps) {
2039 dumps--;
2040 debug_dma_dump_mappings(NULL);
2041 }
2042 WARN_ON(1);
2043 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002044
2045 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2046
2047 BUG_ON(nr_pages < lvl_pages);
2048 BUG_ON(sg_res < lvl_pages);
2049
2050 nr_pages -= lvl_pages;
2051 iov_pfn += lvl_pages;
2052 phys_pfn += lvl_pages;
2053 pteval += lvl_pages * VTD_PAGE_SIZE;
2054 sg_res -= lvl_pages;
2055
2056 /* If the next PTE would be the first in a new page, then we
2057 need to flush the cache on the entries we've just written.
2058 And then we'll need to recalculate 'pte', so clear it and
2059 let it get set again in the if (!pte) block above.
2060
2061 If we're done (!nr_pages) we need to flush the cache too.
2062
2063 Also if we've been setting superpages, we may need to
2064 recalculate 'pte' and switch back to smaller pages for the
2065 end of the mapping, if the trailing size is not enough to
2066 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002067 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002068 if (!nr_pages || first_pte_in_page(pte) ||
2069 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002070 domain_flush_cache(domain, first_pte,
2071 (void *)pte - (void *)first_pte);
2072 pte = NULL;
2073 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002074
2075 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002076 sg = sg_next(sg);
2077 }
2078 return 0;
2079}
2080
David Woodhouse9051aa02009-06-29 12:30:54 +01002081static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2082 struct scatterlist *sg, unsigned long nr_pages,
2083 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002084{
David Woodhouse9051aa02009-06-29 12:30:54 +01002085 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2086}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002087
David Woodhouse9051aa02009-06-29 12:30:54 +01002088static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2089 unsigned long phys_pfn, unsigned long nr_pages,
2090 int prot)
2091{
2092 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002093}
2094
Weidong Hanc7151a82008-12-08 22:51:37 +08002095static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002096{
Weidong Hanc7151a82008-12-08 22:51:37 +08002097 if (!iommu)
2098 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002099
2100 clear_context_table(iommu, bus, devfn);
2101 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002102 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002103 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104}
2105
David Woodhouse109b9b02012-05-25 17:43:02 +01002106static inline void unlink_domain_info(struct device_domain_info *info)
2107{
2108 assert_spin_locked(&device_domain_lock);
2109 list_del(&info->link);
2110 list_del(&info->global);
2111 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002112 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002113}
2114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115static void domain_remove_dev_info(struct dmar_domain *domain)
2116{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002117 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002118 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002119
2120 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002121 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002122 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123 spin_unlock_irqrestore(&device_domain_lock, flags);
2124
Yu Zhao93a23a72009-05-18 13:51:37 +08002125 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002126 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002127
Jiang Liuab8dfe22014-07-11 14:19:27 +08002128 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002129 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002130 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002131 }
2132
2133 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002134 spin_lock_irqsave(&device_domain_lock, flags);
2135 }
2136 spin_unlock_irqrestore(&device_domain_lock, flags);
2137}
2138
2139/*
2140 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002141 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142 */
David Woodhouse1525a292014-03-06 16:19:30 +00002143static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002144{
2145 struct device_domain_info *info;
2146
2147 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002148 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149 if (info)
2150 return info->domain;
2151 return NULL;
2152}
2153
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002154static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002155dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2156{
2157 struct device_domain_info *info;
2158
2159 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002160 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002161 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002162 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002163
2164 return NULL;
2165}
2166
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002167static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002168 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002169 struct device *dev,
2170 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002171{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002172 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002173 struct device_domain_info *info;
2174 unsigned long flags;
2175
2176 info = alloc_devinfo_mem();
2177 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002178 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002179
Jiang Liu745f2582014-02-19 14:07:26 +08002180 info->bus = bus;
2181 info->devfn = devfn;
2182 info->dev = dev;
2183 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002184 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002185
2186 spin_lock_irqsave(&device_domain_lock, flags);
2187 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002188 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002189 else {
2190 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002191 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002192 if (info2)
2193 found = info2->domain;
2194 }
Jiang Liu745f2582014-02-19 14:07:26 +08002195 if (found) {
2196 spin_unlock_irqrestore(&device_domain_lock, flags);
2197 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002198 /* Caller must free the original domain */
2199 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002200 }
2201
David Woodhouseb718cd32014-03-09 13:11:33 -07002202 list_add(&info->link, &domain->devices);
2203 list_add(&info->global, &device_domain_list);
2204 if (dev)
2205 dev->archdata.iommu = info;
2206 spin_unlock_irqrestore(&device_domain_lock, flags);
2207
2208 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002209}
2210
Alex Williamson579305f2014-07-03 09:51:43 -06002211static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2212{
2213 *(u16 *)opaque = alias;
2214 return 0;
2215}
2216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002217/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002218static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219{
Alex Williamson579305f2014-07-03 09:51:43 -06002220 struct dmar_domain *domain, *tmp;
2221 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002222 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002223 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002224 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002225 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002226
David Woodhouse146922e2014-03-09 15:44:17 -07002227 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002228 if (domain)
2229 return domain;
2230
David Woodhouse146922e2014-03-09 15:44:17 -07002231 iommu = device_to_iommu(dev, &bus, &devfn);
2232 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002233 return NULL;
2234
2235 if (dev_is_pci(dev)) {
2236 struct pci_dev *pdev = to_pci_dev(dev);
2237
2238 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2239
2240 spin_lock_irqsave(&device_domain_lock, flags);
2241 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2242 PCI_BUS_NUM(dma_alias),
2243 dma_alias & 0xff);
2244 if (info) {
2245 iommu = info->iommu;
2246 domain = info->domain;
2247 }
2248 spin_unlock_irqrestore(&device_domain_lock, flags);
2249
2250 /* DMA alias already has a domain, uses it */
2251 if (info)
2252 goto found_domain;
2253 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002254
David Woodhouse146922e2014-03-09 15:44:17 -07002255 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002256 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002257 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002258 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002259 domain->id = iommu_attach_domain(domain, iommu);
2260 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002261 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002262 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002263 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002264 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002265 if (domain_init(domain, gaw)) {
2266 domain_exit(domain);
2267 return NULL;
2268 }
2269
2270 /* register PCI DMA alias device */
2271 if (dev_is_pci(dev)) {
2272 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2273 dma_alias & 0xff, NULL, domain);
2274
2275 if (!tmp || tmp != domain) {
2276 domain_exit(domain);
2277 domain = tmp;
2278 }
2279
David Woodhouseb718cd32014-03-09 13:11:33 -07002280 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002281 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002282 }
2283
2284found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002285 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2286
2287 if (!tmp || tmp != domain) {
2288 domain_exit(domain);
2289 domain = tmp;
2290 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002291
2292 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002293}
2294
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002295static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002296#define IDENTMAP_ALL 1
2297#define IDENTMAP_GFX 2
2298#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002299
David Woodhouseb2132032009-06-26 18:50:28 +01002300static int iommu_domain_identity_map(struct dmar_domain *domain,
2301 unsigned long long start,
2302 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002303{
David Woodhousec5395d52009-06-28 16:35:56 +01002304 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2305 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002306
David Woodhousec5395d52009-06-28 16:35:56 +01002307 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2308 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002309 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002310 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002311 }
2312
David Woodhousec5395d52009-06-28 16:35:56 +01002313 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2314 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002315 /*
2316 * RMRR range might have overlap with physical memory range,
2317 * clear it first
2318 */
David Woodhousec5395d52009-06-28 16:35:56 +01002319 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002320
David Woodhousec5395d52009-06-28 16:35:56 +01002321 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2322 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002323 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002324}
2325
David Woodhouse0b9d9752014-03-09 15:48:15 -07002326static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002327 unsigned long long start,
2328 unsigned long long end)
2329{
2330 struct dmar_domain *domain;
2331 int ret;
2332
David Woodhouse0b9d9752014-03-09 15:48:15 -07002333 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002334 if (!domain)
2335 return -ENOMEM;
2336
David Woodhouse19943b02009-08-04 16:19:20 +01002337 /* For _hardware_ passthrough, don't bother. But for software
2338 passthrough, we do it anyway -- it may indicate a memory
2339 range which is reserved in E820, so which didn't get set
2340 up to start with in si_domain */
2341 if (domain == si_domain && hw_pass_through) {
2342 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002343 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002344 return 0;
2345 }
2346
2347 printk(KERN_INFO
2348 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002349 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002350
David Woodhouse5595b522009-12-02 09:21:55 +00002351 if (end < start) {
2352 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2353 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2354 dmi_get_system_info(DMI_BIOS_VENDOR),
2355 dmi_get_system_info(DMI_BIOS_VERSION),
2356 dmi_get_system_info(DMI_PRODUCT_VERSION));
2357 ret = -EIO;
2358 goto error;
2359 }
2360
David Woodhouse2ff729f2009-08-26 14:25:41 +01002361 if (end >> agaw_to_width(domain->agaw)) {
2362 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2363 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2364 agaw_to_width(domain->agaw),
2365 dmi_get_system_info(DMI_BIOS_VENDOR),
2366 dmi_get_system_info(DMI_BIOS_VERSION),
2367 dmi_get_system_info(DMI_PRODUCT_VERSION));
2368 ret = -EIO;
2369 goto error;
2370 }
David Woodhouse19943b02009-08-04 16:19:20 +01002371
David Woodhouseb2132032009-06-26 18:50:28 +01002372 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002373 if (ret)
2374 goto error;
2375
2376 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002377 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002378 if (ret)
2379 goto error;
2380
2381 return 0;
2382
2383 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384 domain_exit(domain);
2385 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386}
2387
2388static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002389 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002390{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002391 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002392 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002393 return iommu_prepare_identity_map(dev, rmrr->base_address,
2394 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002395}
2396
Suresh Siddhad3f13812011-08-23 17:05:25 -07002397#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002398static inline void iommu_prepare_isa(void)
2399{
2400 struct pci_dev *pdev;
2401 int ret;
2402
2403 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2404 if (!pdev)
2405 return;
2406
David Woodhousec7ab48d2009-06-26 19:10:36 +01002407 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002408 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002409
2410 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002411 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2412 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002413
Yijing Wang9b27e822014-05-20 20:37:52 +08002414 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002415}
2416#else
2417static inline void iommu_prepare_isa(void)
2418{
2419 return;
2420}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002421#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002422
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002423static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002424
Matt Kraai071e1372009-08-23 22:30:22 -07002425static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002426{
2427 struct dmar_drhd_unit *drhd;
2428 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002429 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002430 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002431
Jiang Liuab8dfe22014-07-11 14:19:27 +08002432 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433 if (!si_domain)
2434 return -EFAULT;
2435
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002436 for_each_active_iommu(iommu, drhd) {
2437 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002438 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002439 domain_exit(si_domain);
2440 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002441 } else if (first) {
2442 si_domain->id = ret;
2443 first = false;
2444 } else if (si_domain->id != ret) {
2445 domain_exit(si_domain);
2446 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002447 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002448 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002449 }
2450
2451 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2452 domain_exit(si_domain);
2453 return -EFAULT;
2454 }
2455
Jiang Liu9544c002014-01-06 14:18:13 +08002456 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2457 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002458
David Woodhouse19943b02009-08-04 16:19:20 +01002459 if (hw)
2460 return 0;
2461
David Woodhousec7ab48d2009-06-26 19:10:36 +01002462 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002463 unsigned long start_pfn, end_pfn;
2464 int i;
2465
2466 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2467 ret = iommu_domain_identity_map(si_domain,
2468 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2469 if (ret)
2470 return ret;
2471 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002472 }
2473
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002474 return 0;
2475}
2476
David Woodhouse9b226622014-03-09 14:03:28 -07002477static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002478{
2479 struct device_domain_info *info;
2480
2481 if (likely(!iommu_identity_mapping))
2482 return 0;
2483
David Woodhouse9b226622014-03-09 14:03:28 -07002484 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002485 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2486 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002488 return 0;
2489}
2490
2491static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002492 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002493{
David Woodhouse0ac72662014-03-09 13:19:22 -07002494 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002495 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002496 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002497 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002498
David Woodhouse5913c9b2014-03-09 16:27:31 -07002499 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002500 if (!iommu)
2501 return -ENODEV;
2502
David Woodhouse5913c9b2014-03-09 16:27:31 -07002503 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002504 if (ndomain != domain)
2505 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002506
David Woodhouse5913c9b2014-03-09 16:27:31 -07002507 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002508 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002509 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002510 return ret;
2511 }
2512
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002513 return 0;
2514}
2515
David Woodhouse0b9d9752014-03-09 15:48:15 -07002516static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002517{
2518 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002519 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002520 int i;
2521
Jiang Liu0e242612014-02-19 14:07:34 +08002522 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002523 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002524 /*
2525 * Return TRUE if this RMRR contains the device that
2526 * is passed in.
2527 */
2528 for_each_active_dev_scope(rmrr->devices,
2529 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002530 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002531 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002532 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002533 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002534 }
Jiang Liu0e242612014-02-19 14:07:34 +08002535 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002536 return false;
2537}
2538
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002539/*
2540 * There are a couple cases where we need to restrict the functionality of
2541 * devices associated with RMRRs. The first is when evaluating a device for
2542 * identity mapping because problems exist when devices are moved in and out
2543 * of domains and their respective RMRR information is lost. This means that
2544 * a device with associated RMRRs will never be in a "passthrough" domain.
2545 * The second is use of the device through the IOMMU API. This interface
2546 * expects to have full control of the IOVA space for the device. We cannot
2547 * satisfy both the requirement that RMRR access is maintained and have an
2548 * unencumbered IOVA space. We also have no ability to quiesce the device's
2549 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2550 * We therefore prevent devices associated with an RMRR from participating in
2551 * the IOMMU API, which eliminates them from device assignment.
2552 *
2553 * In both cases we assume that PCI USB devices with RMRRs have them largely
2554 * for historical reasons and that the RMRR space is not actively used post
2555 * boot. This exclusion may change if vendors begin to abuse it.
2556 */
2557static bool device_is_rmrr_locked(struct device *dev)
2558{
2559 if (!device_has_rmrr(dev))
2560 return false;
2561
2562 if (dev_is_pci(dev)) {
2563 struct pci_dev *pdev = to_pci_dev(dev);
2564
2565 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2566 return false;
2567 }
2568
2569 return true;
2570}
2571
David Woodhouse3bdb2592014-03-09 16:03:08 -07002572static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002573{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002574
David Woodhouse3bdb2592014-03-09 16:03:08 -07002575 if (dev_is_pci(dev)) {
2576 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002577
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002578 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002579 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002580
David Woodhouse3bdb2592014-03-09 16:03:08 -07002581 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2582 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002583
David Woodhouse3bdb2592014-03-09 16:03:08 -07002584 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2585 return 1;
2586
2587 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2588 return 0;
2589
2590 /*
2591 * We want to start off with all devices in the 1:1 domain, and
2592 * take them out later if we find they can't access all of memory.
2593 *
2594 * However, we can't do this for PCI devices behind bridges,
2595 * because all PCI devices behind the same bridge will end up
2596 * with the same source-id on their transactions.
2597 *
2598 * Practically speaking, we can't change things around for these
2599 * devices at run-time, because we can't be sure there'll be no
2600 * DMA transactions in flight for any of their siblings.
2601 *
2602 * So PCI devices (unless they're on the root bus) as well as
2603 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2604 * the 1:1 domain, just in _case_ one of their siblings turns out
2605 * not to be able to map all of memory.
2606 */
2607 if (!pci_is_pcie(pdev)) {
2608 if (!pci_is_root_bus(pdev->bus))
2609 return 0;
2610 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2611 return 0;
2612 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2613 return 0;
2614 } else {
2615 if (device_has_rmrr(dev))
2616 return 0;
2617 }
David Woodhouse6941af22009-07-04 18:24:27 +01002618
David Woodhouse3dfc8132009-07-04 19:11:08 +01002619 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002620 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002621 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002622 * take them out of the 1:1 domain later.
2623 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002624 if (!startup) {
2625 /*
2626 * If the device's dma_mask is less than the system's memory
2627 * size then this is not a candidate for identity mapping.
2628 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002629 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002630
David Woodhouse3bdb2592014-03-09 16:03:08 -07002631 if (dev->coherent_dma_mask &&
2632 dev->coherent_dma_mask < dma_mask)
2633 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002634
David Woodhouse3bdb2592014-03-09 16:03:08 -07002635 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002636 }
David Woodhouse6941af22009-07-04 18:24:27 +01002637
2638 return 1;
2639}
2640
David Woodhousecf04eee2014-03-21 16:49:04 +00002641static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2642{
2643 int ret;
2644
2645 if (!iommu_should_identity_map(dev, 1))
2646 return 0;
2647
2648 ret = domain_add_dev_info(si_domain, dev,
2649 hw ? CONTEXT_TT_PASS_THROUGH :
2650 CONTEXT_TT_MULTI_LEVEL);
2651 if (!ret)
2652 pr_info("IOMMU: %s identity mapping for device %s\n",
2653 hw ? "hardware" : "software", dev_name(dev));
2654 else if (ret == -ENODEV)
2655 /* device not associated with an iommu */
2656 ret = 0;
2657
2658 return ret;
2659}
2660
2661
Matt Kraai071e1372009-08-23 22:30:22 -07002662static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002663{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002664 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002665 struct dmar_drhd_unit *drhd;
2666 struct intel_iommu *iommu;
2667 struct device *dev;
2668 int i;
2669 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002670
David Woodhouse19943b02009-08-04 16:19:20 +01002671 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002672 if (ret)
2673 return -EFAULT;
2674
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002675 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002676 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2677 if (ret)
2678 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002679 }
2680
David Woodhousecf04eee2014-03-21 16:49:04 +00002681 for_each_active_iommu(iommu, drhd)
2682 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2683 struct acpi_device_physical_node *pn;
2684 struct acpi_device *adev;
2685
2686 if (dev->bus != &acpi_bus_type)
2687 continue;
2688
2689 adev= to_acpi_device(dev);
2690 mutex_lock(&adev->physical_node_lock);
2691 list_for_each_entry(pn, &adev->physical_node_list, node) {
2692 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2693 if (ret)
2694 break;
2695 }
2696 mutex_unlock(&adev->physical_node_lock);
2697 if (ret)
2698 return ret;
2699 }
2700
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002701 return 0;
2702}
2703
Jiang Liuffebeb42014-11-09 22:48:02 +08002704static void intel_iommu_init_qi(struct intel_iommu *iommu)
2705{
2706 /*
2707 * Start from the sane iommu hardware state.
2708 * If the queued invalidation is already initialized by us
2709 * (for example, while enabling interrupt-remapping) then
2710 * we got the things already rolling from a sane state.
2711 */
2712 if (!iommu->qi) {
2713 /*
2714 * Clear any previous faults.
2715 */
2716 dmar_fault(-1, iommu);
2717 /*
2718 * Disable queued invalidation if supported and already enabled
2719 * before OS handover.
2720 */
2721 dmar_disable_qi(iommu);
2722 }
2723
2724 if (dmar_enable_qi(iommu)) {
2725 /*
2726 * Queued Invalidate not enabled, use Register Based Invalidate
2727 */
2728 iommu->flush.flush_context = __iommu_flush_context;
2729 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2730 pr_info("IOMMU: %s using Register based invalidation\n",
2731 iommu->name);
2732 } else {
2733 iommu->flush.flush_context = qi_flush_context;
2734 iommu->flush.flush_iotlb = qi_flush_iotlb;
2735 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2736 }
2737}
2738
Joseph Cihulab7792602011-05-03 00:08:37 -07002739static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002740{
2741 struct dmar_drhd_unit *drhd;
2742 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002743 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002744 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002745 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002746
2747 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002748 * for each drhd
2749 * allocate root
2750 * initialize and program root entry to not present
2751 * endfor
2752 */
2753 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002754 /*
2755 * lock not needed as this is only incremented in the single
2756 * threaded kernel __init code path all other access are read
2757 * only
2758 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002759 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002760 g_num_of_iommus++;
2761 continue;
2762 }
2763 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002764 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002765 }
2766
Jiang Liuffebeb42014-11-09 22:48:02 +08002767 /* Preallocate enough resources for IOMMU hot-addition */
2768 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2769 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2770
Weidong Hand9630fe2008-12-08 11:06:32 +08002771 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2772 GFP_KERNEL);
2773 if (!g_iommus) {
2774 printk(KERN_ERR "Allocating global iommu array failed\n");
2775 ret = -ENOMEM;
2776 goto error;
2777 }
2778
mark gross80b20dd2008-04-18 13:53:58 -07002779 deferred_flush = kzalloc(g_num_of_iommus *
2780 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2781 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002782 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002783 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002784 }
2785
Jiang Liu7c919772014-01-06 14:18:18 +08002786 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002787 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002788
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002789 ret = iommu_init_domains(iommu);
2790 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002791 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002793 /*
2794 * TBD:
2795 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002796 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002797 */
2798 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002799 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002800 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002801 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002802 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002803 }
2804
Jiang Liuffebeb42014-11-09 22:48:02 +08002805 for_each_active_iommu(iommu, drhd)
2806 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002807
David Woodhouse19943b02009-08-04 16:19:20 +01002808 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002809 iommu_identity_mapping |= IDENTMAP_ALL;
2810
Suresh Siddhad3f13812011-08-23 17:05:25 -07002811#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002812 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002813#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002814
2815 check_tylersburg_isoch();
2816
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002817 /*
2818 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002819 * identity mappings for rmrr, gfx, and isa and may fall back to static
2820 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002821 */
David Woodhouse19943b02009-08-04 16:19:20 +01002822 if (iommu_identity_mapping) {
2823 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2824 if (ret) {
2825 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002826 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002827 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002828 }
David Woodhouse19943b02009-08-04 16:19:20 +01002829 /*
2830 * For each rmrr
2831 * for each dev attached to rmrr
2832 * do
2833 * locate drhd for dev, alloc domain for dev
2834 * allocate free domain
2835 * allocate page table entries for rmrr
2836 * if context not allocated for bus
2837 * allocate and init context
2838 * set present in root table for this bus
2839 * init context with domain, translation etc
2840 * endfor
2841 * endfor
2842 */
2843 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2844 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002845 /* some BIOS lists non-exist devices in DMAR table. */
2846 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002847 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002848 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002849 if (ret)
2850 printk(KERN_ERR
2851 "IOMMU: mapping reserved region failed\n");
2852 }
2853 }
2854
2855 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002856
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002857 /*
2858 * for each drhd
2859 * enable fault log
2860 * global invalidate context cache
2861 * global invalidate iotlb
2862 * enable translation
2863 */
Jiang Liu7c919772014-01-06 14:18:18 +08002864 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002865 if (drhd->ignored) {
2866 /*
2867 * we always have to disable PMRs or DMA may fail on
2868 * this device
2869 */
2870 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002871 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002872 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002873 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002874
2875 iommu_flush_write_buffer(iommu);
2876
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002877 ret = dmar_set_interrupt(iommu);
2878 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002879 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002880
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002881 iommu_set_root_entry(iommu);
2882
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002883 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002884 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002885 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002886 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002887 }
2888
2889 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002890
2891free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002892 for_each_active_iommu(iommu, drhd) {
2893 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002894 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002895 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002896 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002897free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002898 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002899error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002900 return ret;
2901}
2902
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002903/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002904static struct iova *intel_alloc_iova(struct device *dev,
2905 struct dmar_domain *domain,
2906 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002907{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002908 struct iova *iova = NULL;
2909
David Woodhouse875764d2009-06-28 21:20:51 +01002910 /* Restrict dma_mask to the width that the iommu can handle */
2911 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2912
2913 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002914 /*
2915 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002916 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002917 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002918 */
David Woodhouse875764d2009-06-28 21:20:51 +01002919 iova = alloc_iova(&domain->iovad, nrpages,
2920 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2921 if (iova)
2922 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002923 }
David Woodhouse875764d2009-06-28 21:20:51 +01002924 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2925 if (unlikely(!iova)) {
2926 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002927 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002928 return NULL;
2929 }
2930
2931 return iova;
2932}
2933
David Woodhoused4b709f2014-03-09 16:07:40 -07002934static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002935{
2936 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002937 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002938
David Woodhoused4b709f2014-03-09 16:07:40 -07002939 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002941 printk(KERN_ERR "Allocating domain for %s failed",
2942 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002943 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944 }
2945
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002946 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002947 if (unlikely(!domain_context_mapped(dev))) {
2948 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002949 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002950 printk(KERN_ERR "Domain context map for %s failed",
2951 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002952 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002953 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002954 }
2955
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002956 return domain;
2957}
2958
David Woodhoused4b709f2014-03-09 16:07:40 -07002959static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002960{
2961 struct device_domain_info *info;
2962
2963 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002964 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002965 if (likely(info))
2966 return info->domain;
2967
2968 return __get_valid_domain_for_dev(dev);
2969}
2970
David Woodhouse3d891942014-03-06 15:59:26 +00002971static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002972{
David Woodhouse3d891942014-03-06 15:59:26 +00002973 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002974}
2975
David Woodhouseecb509e2014-03-09 16:29:55 -07002976/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002977static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002978{
2979 int found;
2980
David Woodhouse3d891942014-03-06 15:59:26 +00002981 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002982 return 1;
2983
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002984 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002985 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002986
David Woodhouse9b226622014-03-09 14:03:28 -07002987 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002988 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002989 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002990 return 1;
2991 else {
2992 /*
2993 * 32 bit DMA is removed from si_domain and fall back
2994 * to non-identity mapping.
2995 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002996 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002997 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002998 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002999 return 0;
3000 }
3001 } else {
3002 /*
3003 * In case of a detached 64 bit DMA device from vm, the device
3004 * is put into si_domain for identity mapping.
3005 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003006 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003007 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003008 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003009 hw_pass_through ?
3010 CONTEXT_TT_PASS_THROUGH :
3011 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003012 if (!ret) {
3013 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003014 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003015 return 1;
3016 }
3017 }
3018 }
3019
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003020 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003021}
3022
David Woodhouse5040a912014-03-09 16:14:00 -07003023static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003024 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003025{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003026 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003027 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003028 struct iova *iova;
3029 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003030 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003031 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003032 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003033
3034 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003035
David Woodhouse5040a912014-03-09 16:14:00 -07003036 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003037 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038
David Woodhouse5040a912014-03-09 16:14:00 -07003039 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003040 if (!domain)
3041 return 0;
3042
Weidong Han8c11e792008-12-08 15:29:22 +08003043 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003044 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003045
David Woodhouse5040a912014-03-09 16:14:00 -07003046 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003047 if (!iova)
3048 goto error;
3049
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050 /*
3051 * Check if DMAR supports zero-length reads on write only
3052 * mappings..
3053 */
3054 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003055 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003056 prot |= DMA_PTE_READ;
3057 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3058 prot |= DMA_PTE_WRITE;
3059 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003060 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003062 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003063 * is not a big problem
3064 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003065 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003066 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067 if (ret)
3068 goto error;
3069
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003070 /* it's a non-present to present mapping. Only flush if caching mode */
3071 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003072 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003073 else
Weidong Han8c11e792008-12-08 15:29:22 +08003074 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003075
David Woodhouse03d6a242009-06-28 15:33:46 +01003076 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3077 start_paddr += paddr & ~PAGE_MASK;
3078 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003079
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003080error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003081 if (iova)
3082 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003083 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003084 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003085 return 0;
3086}
3087
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003088static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3089 unsigned long offset, size_t size,
3090 enum dma_data_direction dir,
3091 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003092{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003093 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003094 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003095}
3096
mark gross5e0d2a62008-03-04 15:22:08 -08003097static void flush_unmaps(void)
3098{
mark gross80b20dd2008-04-18 13:53:58 -07003099 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003100
mark gross5e0d2a62008-03-04 15:22:08 -08003101 timer_on = 0;
3102
3103 /* just flush them all */
3104 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003105 struct intel_iommu *iommu = g_iommus[i];
3106 if (!iommu)
3107 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003108
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003109 if (!deferred_flush[i].next)
3110 continue;
3111
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003112 /* In caching mode, global flushes turn emulation expensive */
3113 if (!cap_caching_mode(iommu->cap))
3114 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003115 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003116 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003117 unsigned long mask;
3118 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003119 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003120
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003121 /* On real hardware multiple invalidations are expensive */
3122 if (cap_caching_mode(iommu->cap))
3123 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003124 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003125 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003126 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003127 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003128 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3129 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3130 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003131 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003132 if (deferred_flush[i].freelist[j])
3133 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003134 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003135 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003136 }
3137
mark gross5e0d2a62008-03-04 15:22:08 -08003138 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003139}
3140
3141static void flush_unmaps_timeout(unsigned long data)
3142{
mark gross80b20dd2008-04-18 13:53:58 -07003143 unsigned long flags;
3144
3145 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003146 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003147 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003148}
3149
David Woodhouseea8ea462014-03-05 17:09:32 +00003150static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003151{
3152 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003153 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003154 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003155
3156 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003157 if (list_size == HIGH_WATER_MARK)
3158 flush_unmaps();
3159
Weidong Han8c11e792008-12-08 15:29:22 +08003160 iommu = domain_get_iommu(dom);
3161 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003162
mark gross80b20dd2008-04-18 13:53:58 -07003163 next = deferred_flush[iommu_id].next;
3164 deferred_flush[iommu_id].domain[next] = dom;
3165 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003166 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003167 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003168
3169 if (!timer_on) {
3170 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3171 timer_on = 1;
3172 }
3173 list_size++;
3174 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3175}
3176
Jiang Liud41a4ad2014-07-11 14:19:34 +08003177static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003178{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003179 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003180 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003181 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003182 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003183 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003184
David Woodhouse73676832009-07-04 14:08:36 +01003185 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003187
David Woodhouse1525a292014-03-06 16:19:30 +00003188 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003189 BUG_ON(!domain);
3190
Weidong Han8c11e792008-12-08 15:29:22 +08003191 iommu = domain_get_iommu(domain);
3192
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003193 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003194 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3195 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003196 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003197
David Woodhoused794dc92009-06-28 00:27:49 +01003198 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3199 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003200
David Woodhoused794dc92009-06-28 00:27:49 +01003201 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003202 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003203
David Woodhouseea8ea462014-03-05 17:09:32 +00003204 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003205
mark gross5e0d2a62008-03-04 15:22:08 -08003206 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003207 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003208 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003209 /* free iova */
3210 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003211 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003212 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003213 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003214 /*
3215 * queue up the release of the unmap to save the 1/6th of the
3216 * cpu used up by the iotlb flush operation...
3217 */
mark gross5e0d2a62008-03-04 15:22:08 -08003218 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003219}
3220
Jiang Liud41a4ad2014-07-11 14:19:34 +08003221static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3222 size_t size, enum dma_data_direction dir,
3223 struct dma_attrs *attrs)
3224{
3225 intel_unmap(dev, dev_addr);
3226}
3227
David Woodhouse5040a912014-03-09 16:14:00 -07003228static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003229 dma_addr_t *dma_handle, gfp_t flags,
3230 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003231{
Akinobu Mita36746432014-06-04 16:06:51 -07003232 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003233 int order;
3234
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003235 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003236 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003237
David Woodhouse5040a912014-03-09 16:14:00 -07003238 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003239 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003240 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3241 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003242 flags |= GFP_DMA;
3243 else
3244 flags |= GFP_DMA32;
3245 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003246
Akinobu Mita36746432014-06-04 16:06:51 -07003247 if (flags & __GFP_WAIT) {
3248 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003249
Akinobu Mita36746432014-06-04 16:06:51 -07003250 page = dma_alloc_from_contiguous(dev, count, order);
3251 if (page && iommu_no_mapping(dev) &&
3252 page_to_phys(page) + size > dev->coherent_dma_mask) {
3253 dma_release_from_contiguous(dev, page, count);
3254 page = NULL;
3255 }
3256 }
3257
3258 if (!page)
3259 page = alloc_pages(flags, order);
3260 if (!page)
3261 return NULL;
3262 memset(page_address(page), 0, size);
3263
3264 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003265 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003266 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003267 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003268 return page_address(page);
3269 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3270 __free_pages(page, order);
3271
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003272 return NULL;
3273}
3274
David Woodhouse5040a912014-03-09 16:14:00 -07003275static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003276 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003277{
3278 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003279 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003281 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003282 order = get_order(size);
3283
Jiang Liud41a4ad2014-07-11 14:19:34 +08003284 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003285 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3286 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003287}
3288
David Woodhouse5040a912014-03-09 16:14:00 -07003289static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003290 int nelems, enum dma_data_direction dir,
3291 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003292{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003293 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003294}
3295
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003296static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003297 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298{
3299 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003300 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003301
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003302 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003303 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003304 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003305 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003306 }
3307 return nelems;
3308}
3309
David Woodhouse5040a912014-03-09 16:14:00 -07003310static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003311 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003315 size_t size = 0;
3316 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003317 struct iova *iova = NULL;
3318 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003319 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003320 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003321 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322
3323 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003324 if (iommu_no_mapping(dev))
3325 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003326
David Woodhouse5040a912014-03-09 16:14:00 -07003327 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003328 if (!domain)
3329 return 0;
3330
Weidong Han8c11e792008-12-08 15:29:22 +08003331 iommu = domain_get_iommu(domain);
3332
David Woodhouseb536d242009-06-28 14:49:31 +01003333 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003334 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003335
David Woodhouse5040a912014-03-09 16:14:00 -07003336 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3337 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003338 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003339 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003340 return 0;
3341 }
3342
3343 /*
3344 * Check if DMAR supports zero-length reads on write only
3345 * mappings..
3346 */
3347 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003348 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003349 prot |= DMA_PTE_READ;
3350 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3351 prot |= DMA_PTE_WRITE;
3352
David Woodhouseb536d242009-06-28 14:49:31 +01003353 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003354
Fenghua Yuf5329592009-08-04 15:09:37 -07003355 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003356 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003357 dma_pte_free_pagetable(domain, start_vpfn,
3358 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003359 __free_iova(&domain->iovad, iova);
3360 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003361 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003362
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003363 /* it's a non-present to present mapping. Only flush if caching mode */
3364 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003365 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003366 else
Weidong Han8c11e792008-12-08 15:29:22 +08003367 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003368
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003369 return nelems;
3370}
3371
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003372static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3373{
3374 return !dma_addr;
3375}
3376
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003377struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003378 .alloc = intel_alloc_coherent,
3379 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003380 .map_sg = intel_map_sg,
3381 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003382 .map_page = intel_map_page,
3383 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003384 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003385};
3386
3387static inline int iommu_domain_cache_init(void)
3388{
3389 int ret = 0;
3390
3391 iommu_domain_cache = kmem_cache_create("iommu_domain",
3392 sizeof(struct dmar_domain),
3393 0,
3394 SLAB_HWCACHE_ALIGN,
3395
3396 NULL);
3397 if (!iommu_domain_cache) {
3398 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3399 ret = -ENOMEM;
3400 }
3401
3402 return ret;
3403}
3404
3405static inline int iommu_devinfo_cache_init(void)
3406{
3407 int ret = 0;
3408
3409 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3410 sizeof(struct device_domain_info),
3411 0,
3412 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003413 NULL);
3414 if (!iommu_devinfo_cache) {
3415 printk(KERN_ERR "Couldn't create devinfo cache\n");
3416 ret = -ENOMEM;
3417 }
3418
3419 return ret;
3420}
3421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003422static int __init iommu_init_mempool(void)
3423{
3424 int ret;
3425 ret = iommu_iova_cache_init();
3426 if (ret)
3427 return ret;
3428
3429 ret = iommu_domain_cache_init();
3430 if (ret)
3431 goto domain_error;
3432
3433 ret = iommu_devinfo_cache_init();
3434 if (!ret)
3435 return ret;
3436
3437 kmem_cache_destroy(iommu_domain_cache);
3438domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003439 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003440
3441 return -ENOMEM;
3442}
3443
3444static void __init iommu_exit_mempool(void)
3445{
3446 kmem_cache_destroy(iommu_devinfo_cache);
3447 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003448 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003449}
3450
Dan Williams556ab452010-07-23 15:47:56 -07003451static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3452{
3453 struct dmar_drhd_unit *drhd;
3454 u32 vtbar;
3455 int rc;
3456
3457 /* We know that this device on this chipset has its own IOMMU.
3458 * If we find it under a different IOMMU, then the BIOS is lying
3459 * to us. Hope that the IOMMU for this device is actually
3460 * disabled, and it needs no translation...
3461 */
3462 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3463 if (rc) {
3464 /* "can't" happen */
3465 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3466 return;
3467 }
3468 vtbar &= 0xffff0000;
3469
3470 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3471 drhd = dmar_find_matched_drhd_unit(pdev);
3472 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3473 TAINT_FIRMWARE_WORKAROUND,
3474 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3475 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3476}
3477DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3478
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003479static void __init init_no_remapping_devices(void)
3480{
3481 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003482 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003483 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003484
3485 for_each_drhd_unit(drhd) {
3486 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003487 for_each_active_dev_scope(drhd->devices,
3488 drhd->devices_cnt, i, dev)
3489 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003490 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003491 if (i == drhd->devices_cnt)
3492 drhd->ignored = 1;
3493 }
3494 }
3495
Jiang Liu7c919772014-01-06 14:18:18 +08003496 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003497 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003498 continue;
3499
Jiang Liub683b232014-02-19 14:07:32 +08003500 for_each_active_dev_scope(drhd->devices,
3501 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003502 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003503 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003504 if (i < drhd->devices_cnt)
3505 continue;
3506
David Woodhousec0771df2011-10-14 20:59:46 +01003507 /* This IOMMU has *only* gfx devices. Either bypass it or
3508 set the gfx_mapped flag, as appropriate */
3509 if (dmar_map_gfx) {
3510 intel_iommu_gfx_mapped = 1;
3511 } else {
3512 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003513 for_each_active_dev_scope(drhd->devices,
3514 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003515 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003516 }
3517 }
3518}
3519
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003520#ifdef CONFIG_SUSPEND
3521static int init_iommu_hw(void)
3522{
3523 struct dmar_drhd_unit *drhd;
3524 struct intel_iommu *iommu = NULL;
3525
3526 for_each_active_iommu(iommu, drhd)
3527 if (iommu->qi)
3528 dmar_reenable_qi(iommu);
3529
Joseph Cihulab7792602011-05-03 00:08:37 -07003530 for_each_iommu(iommu, drhd) {
3531 if (drhd->ignored) {
3532 /*
3533 * we always have to disable PMRs or DMA may fail on
3534 * this device
3535 */
3536 if (force_on)
3537 iommu_disable_protect_mem_regions(iommu);
3538 continue;
3539 }
3540
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003541 iommu_flush_write_buffer(iommu);
3542
3543 iommu_set_root_entry(iommu);
3544
3545 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003546 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003547 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3548 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003549 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003550 }
3551
3552 return 0;
3553}
3554
3555static void iommu_flush_all(void)
3556{
3557 struct dmar_drhd_unit *drhd;
3558 struct intel_iommu *iommu;
3559
3560 for_each_active_iommu(iommu, drhd) {
3561 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003562 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003563 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003564 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003565 }
3566}
3567
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003568static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003569{
3570 struct dmar_drhd_unit *drhd;
3571 struct intel_iommu *iommu = NULL;
3572 unsigned long flag;
3573
3574 for_each_active_iommu(iommu, drhd) {
3575 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3576 GFP_ATOMIC);
3577 if (!iommu->iommu_state)
3578 goto nomem;
3579 }
3580
3581 iommu_flush_all();
3582
3583 for_each_active_iommu(iommu, drhd) {
3584 iommu_disable_translation(iommu);
3585
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003586 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003587
3588 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3589 readl(iommu->reg + DMAR_FECTL_REG);
3590 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3591 readl(iommu->reg + DMAR_FEDATA_REG);
3592 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3593 readl(iommu->reg + DMAR_FEADDR_REG);
3594 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3595 readl(iommu->reg + DMAR_FEUADDR_REG);
3596
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003597 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003598 }
3599 return 0;
3600
3601nomem:
3602 for_each_active_iommu(iommu, drhd)
3603 kfree(iommu->iommu_state);
3604
3605 return -ENOMEM;
3606}
3607
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003608static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003609{
3610 struct dmar_drhd_unit *drhd;
3611 struct intel_iommu *iommu = NULL;
3612 unsigned long flag;
3613
3614 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003615 if (force_on)
3616 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3617 else
3618 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003619 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003620 }
3621
3622 for_each_active_iommu(iommu, drhd) {
3623
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003624 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003625
3626 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3627 iommu->reg + DMAR_FECTL_REG);
3628 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3629 iommu->reg + DMAR_FEDATA_REG);
3630 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3631 iommu->reg + DMAR_FEADDR_REG);
3632 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3633 iommu->reg + DMAR_FEUADDR_REG);
3634
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003635 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003636 }
3637
3638 for_each_active_iommu(iommu, drhd)
3639 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003640}
3641
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003642static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003643 .resume = iommu_resume,
3644 .suspend = iommu_suspend,
3645};
3646
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003647static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003648{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003649 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003650}
3651
3652#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003653static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003654#endif /* CONFIG_PM */
3655
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003656
Jiang Liuc2a0b532014-11-09 22:47:56 +08003657int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003658{
3659 struct acpi_dmar_reserved_memory *rmrr;
3660 struct dmar_rmrr_unit *rmrru;
3661
3662 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3663 if (!rmrru)
3664 return -ENOMEM;
3665
3666 rmrru->hdr = header;
3667 rmrr = (struct acpi_dmar_reserved_memory *)header;
3668 rmrru->base_address = rmrr->base_address;
3669 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003670 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3671 ((void *)rmrr) + rmrr->header.length,
3672 &rmrru->devices_cnt);
3673 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3674 kfree(rmrru);
3675 return -ENOMEM;
3676 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003677
Jiang Liu2e455282014-02-19 14:07:36 +08003678 list_add(&rmrru->list, &dmar_rmrr_units);
3679
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003680 return 0;
3681}
3682
Jiang Liu6b197242014-11-09 22:47:58 +08003683static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3684{
3685 struct dmar_atsr_unit *atsru;
3686 struct acpi_dmar_atsr *tmp;
3687
3688 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3689 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3690 if (atsr->segment != tmp->segment)
3691 continue;
3692 if (atsr->header.length != tmp->header.length)
3693 continue;
3694 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3695 return atsru;
3696 }
3697
3698 return NULL;
3699}
3700
3701int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003702{
3703 struct acpi_dmar_atsr *atsr;
3704 struct dmar_atsr_unit *atsru;
3705
Jiang Liu6b197242014-11-09 22:47:58 +08003706 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3707 return 0;
3708
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003709 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003710 atsru = dmar_find_atsr(atsr);
3711 if (atsru)
3712 return 0;
3713
3714 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003715 if (!atsru)
3716 return -ENOMEM;
3717
Jiang Liu6b197242014-11-09 22:47:58 +08003718 /*
3719 * If memory is allocated from slab by ACPI _DSM method, we need to
3720 * copy the memory content because the memory buffer will be freed
3721 * on return.
3722 */
3723 atsru->hdr = (void *)(atsru + 1);
3724 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003725 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003726 if (!atsru->include_all) {
3727 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3728 (void *)atsr + atsr->header.length,
3729 &atsru->devices_cnt);
3730 if (atsru->devices_cnt && atsru->devices == NULL) {
3731 kfree(atsru);
3732 return -ENOMEM;
3733 }
3734 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003735
Jiang Liu0e242612014-02-19 14:07:34 +08003736 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003737
3738 return 0;
3739}
3740
Jiang Liu9bdc5312014-01-06 14:18:27 +08003741static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3742{
3743 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3744 kfree(atsru);
3745}
3746
Jiang Liu6b197242014-11-09 22:47:58 +08003747int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3748{
3749 struct acpi_dmar_atsr *atsr;
3750 struct dmar_atsr_unit *atsru;
3751
3752 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3753 atsru = dmar_find_atsr(atsr);
3754 if (atsru) {
3755 list_del_rcu(&atsru->list);
3756 synchronize_rcu();
3757 intel_iommu_free_atsr(atsru);
3758 }
3759
3760 return 0;
3761}
3762
3763int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3764{
3765 int i;
3766 struct device *dev;
3767 struct acpi_dmar_atsr *atsr;
3768 struct dmar_atsr_unit *atsru;
3769
3770 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3771 atsru = dmar_find_atsr(atsr);
3772 if (!atsru)
3773 return 0;
3774
3775 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3776 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3777 i, dev)
3778 return -EBUSY;
3779
3780 return 0;
3781}
3782
Jiang Liuffebeb42014-11-09 22:48:02 +08003783static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3784{
3785 int sp, ret = 0;
3786 struct intel_iommu *iommu = dmaru->iommu;
3787
3788 if (g_iommus[iommu->seq_id])
3789 return 0;
3790
3791 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3792 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3793 iommu->name);
3794 return -ENXIO;
3795 }
3796 if (!ecap_sc_support(iommu->ecap) &&
3797 domain_update_iommu_snooping(iommu)) {
3798 pr_warn("IOMMU: %s doesn't support snooping.\n",
3799 iommu->name);
3800 return -ENXIO;
3801 }
3802 sp = domain_update_iommu_superpage(iommu) - 1;
3803 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3804 pr_warn("IOMMU: %s doesn't support large page.\n",
3805 iommu->name);
3806 return -ENXIO;
3807 }
3808
3809 /*
3810 * Disable translation if already enabled prior to OS handover.
3811 */
3812 if (iommu->gcmd & DMA_GCMD_TE)
3813 iommu_disable_translation(iommu);
3814
3815 g_iommus[iommu->seq_id] = iommu;
3816 ret = iommu_init_domains(iommu);
3817 if (ret == 0)
3818 ret = iommu_alloc_root_entry(iommu);
3819 if (ret)
3820 goto out;
3821
3822 if (dmaru->ignored) {
3823 /*
3824 * we always have to disable PMRs or DMA may fail on this device
3825 */
3826 if (force_on)
3827 iommu_disable_protect_mem_regions(iommu);
3828 return 0;
3829 }
3830
3831 intel_iommu_init_qi(iommu);
3832 iommu_flush_write_buffer(iommu);
3833 ret = dmar_set_interrupt(iommu);
3834 if (ret)
3835 goto disable_iommu;
3836
3837 iommu_set_root_entry(iommu);
3838 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3839 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3840 iommu_enable_translation(iommu);
3841
3842 if (si_domain) {
3843 ret = iommu_attach_domain(si_domain, iommu);
3844 if (ret < 0 || si_domain->id != ret)
3845 goto disable_iommu;
3846 domain_attach_iommu(si_domain, iommu);
3847 }
3848
3849 iommu_disable_protect_mem_regions(iommu);
3850 return 0;
3851
3852disable_iommu:
3853 disable_dmar_iommu(iommu);
3854out:
3855 free_dmar_iommu(iommu);
3856 return ret;
3857}
3858
Jiang Liu6b197242014-11-09 22:47:58 +08003859int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3860{
Jiang Liuffebeb42014-11-09 22:48:02 +08003861 int ret = 0;
3862 struct intel_iommu *iommu = dmaru->iommu;
3863
3864 if (!intel_iommu_enabled)
3865 return 0;
3866 if (iommu == NULL)
3867 return -EINVAL;
3868
3869 if (insert) {
3870 ret = intel_iommu_add(dmaru);
3871 } else {
3872 disable_dmar_iommu(iommu);
3873 free_dmar_iommu(iommu);
3874 }
3875
3876 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003877}
3878
Jiang Liu9bdc5312014-01-06 14:18:27 +08003879static void intel_iommu_free_dmars(void)
3880{
3881 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3882 struct dmar_atsr_unit *atsru, *atsr_n;
3883
3884 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3885 list_del(&rmrru->list);
3886 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3887 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003888 }
3889
Jiang Liu9bdc5312014-01-06 14:18:27 +08003890 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3891 list_del(&atsru->list);
3892 intel_iommu_free_atsr(atsru);
3893 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003894}
3895
3896int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3897{
Jiang Liub683b232014-02-19 14:07:32 +08003898 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003899 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003900 struct pci_dev *bridge = NULL;
3901 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003902 struct acpi_dmar_atsr *atsr;
3903 struct dmar_atsr_unit *atsru;
3904
3905 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003906 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003907 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003908 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003909 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003910 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003911 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003912 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003913 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003914 if (!bridge)
3915 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003916
Jiang Liu0e242612014-02-19 14:07:34 +08003917 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003918 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3919 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3920 if (atsr->segment != pci_domain_nr(dev->bus))
3921 continue;
3922
Jiang Liub683b232014-02-19 14:07:32 +08003923 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003924 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003925 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003926
3927 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003928 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003929 }
Jiang Liub683b232014-02-19 14:07:32 +08003930 ret = 0;
3931out:
Jiang Liu0e242612014-02-19 14:07:34 +08003932 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003933
Jiang Liub683b232014-02-19 14:07:32 +08003934 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003935}
3936
Jiang Liu59ce0512014-02-19 14:07:35 +08003937int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3938{
3939 int ret = 0;
3940 struct dmar_rmrr_unit *rmrru;
3941 struct dmar_atsr_unit *atsru;
3942 struct acpi_dmar_atsr *atsr;
3943 struct acpi_dmar_reserved_memory *rmrr;
3944
3945 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3946 return 0;
3947
3948 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3949 rmrr = container_of(rmrru->hdr,
3950 struct acpi_dmar_reserved_memory, header);
3951 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3952 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3953 ((void *)rmrr) + rmrr->header.length,
3954 rmrr->segment, rmrru->devices,
3955 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003956 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003957 return ret;
3958 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003959 dmar_remove_dev_scope(info, rmrr->segment,
3960 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003961 }
3962 }
3963
3964 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3965 if (atsru->include_all)
3966 continue;
3967
3968 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3969 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3970 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3971 (void *)atsr + atsr->header.length,
3972 atsr->segment, atsru->devices,
3973 atsru->devices_cnt);
3974 if (ret > 0)
3975 break;
3976 else if(ret < 0)
3977 return ret;
3978 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3979 if (dmar_remove_dev_scope(info, atsr->segment,
3980 atsru->devices, atsru->devices_cnt))
3981 break;
3982 }
3983 }
3984
3985 return 0;
3986}
3987
Fenghua Yu99dcade2009-11-11 07:23:06 -08003988/*
3989 * Here we only respond to action of unbound device from driver.
3990 *
3991 * Added device is not attached to its DMAR domain here yet. That will happen
3992 * when mapping the device to iova.
3993 */
3994static int device_notifier(struct notifier_block *nb,
3995 unsigned long action, void *data)
3996{
3997 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003998 struct dmar_domain *domain;
3999
David Woodhouse3d891942014-03-06 15:59:26 +00004000 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004001 return 0;
4002
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004003 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004004 return 0;
4005
David Woodhouse1525a292014-03-06 16:19:30 +00004006 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004007 if (!domain)
4008 return 0;
4009
Jiang Liu3a5670e2014-02-19 14:07:33 +08004010 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004011 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004012 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004013 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004014 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004015
Fenghua Yu99dcade2009-11-11 07:23:06 -08004016 return 0;
4017}
4018
4019static struct notifier_block device_nb = {
4020 .notifier_call = device_notifier,
4021};
4022
Jiang Liu75f05562014-02-19 14:07:37 +08004023static int intel_iommu_memory_notifier(struct notifier_block *nb,
4024 unsigned long val, void *v)
4025{
4026 struct memory_notify *mhp = v;
4027 unsigned long long start, end;
4028 unsigned long start_vpfn, last_vpfn;
4029
4030 switch (val) {
4031 case MEM_GOING_ONLINE:
4032 start = mhp->start_pfn << PAGE_SHIFT;
4033 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4034 if (iommu_domain_identity_map(si_domain, start, end)) {
4035 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4036 start, end);
4037 return NOTIFY_BAD;
4038 }
4039 break;
4040
4041 case MEM_OFFLINE:
4042 case MEM_CANCEL_ONLINE:
4043 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4044 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4045 while (start_vpfn <= last_vpfn) {
4046 struct iova *iova;
4047 struct dmar_drhd_unit *drhd;
4048 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004049 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004050
4051 iova = find_iova(&si_domain->iovad, start_vpfn);
4052 if (iova == NULL) {
4053 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4054 start_vpfn);
4055 break;
4056 }
4057
4058 iova = split_and_remove_iova(&si_domain->iovad, iova,
4059 start_vpfn, last_vpfn);
4060 if (iova == NULL) {
4061 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4062 start_vpfn, last_vpfn);
4063 return NOTIFY_BAD;
4064 }
4065
David Woodhouseea8ea462014-03-05 17:09:32 +00004066 freelist = domain_unmap(si_domain, iova->pfn_lo,
4067 iova->pfn_hi);
4068
Jiang Liu75f05562014-02-19 14:07:37 +08004069 rcu_read_lock();
4070 for_each_active_iommu(iommu, drhd)
4071 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004072 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004073 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004074 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004075 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004076
4077 start_vpfn = iova->pfn_hi + 1;
4078 free_iova_mem(iova);
4079 }
4080 break;
4081 }
4082
4083 return NOTIFY_OK;
4084}
4085
4086static struct notifier_block intel_iommu_memory_nb = {
4087 .notifier_call = intel_iommu_memory_notifier,
4088 .priority = 0
4089};
4090
Alex Williamsona5459cf2014-06-12 16:12:31 -06004091
4092static ssize_t intel_iommu_show_version(struct device *dev,
4093 struct device_attribute *attr,
4094 char *buf)
4095{
4096 struct intel_iommu *iommu = dev_get_drvdata(dev);
4097 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4098 return sprintf(buf, "%d:%d\n",
4099 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4100}
4101static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4102
4103static ssize_t intel_iommu_show_address(struct device *dev,
4104 struct device_attribute *attr,
4105 char *buf)
4106{
4107 struct intel_iommu *iommu = dev_get_drvdata(dev);
4108 return sprintf(buf, "%llx\n", iommu->reg_phys);
4109}
4110static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4111
4112static ssize_t intel_iommu_show_cap(struct device *dev,
4113 struct device_attribute *attr,
4114 char *buf)
4115{
4116 struct intel_iommu *iommu = dev_get_drvdata(dev);
4117 return sprintf(buf, "%llx\n", iommu->cap);
4118}
4119static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4120
4121static ssize_t intel_iommu_show_ecap(struct device *dev,
4122 struct device_attribute *attr,
4123 char *buf)
4124{
4125 struct intel_iommu *iommu = dev_get_drvdata(dev);
4126 return sprintf(buf, "%llx\n", iommu->ecap);
4127}
4128static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4129
4130static struct attribute *intel_iommu_attrs[] = {
4131 &dev_attr_version.attr,
4132 &dev_attr_address.attr,
4133 &dev_attr_cap.attr,
4134 &dev_attr_ecap.attr,
4135 NULL,
4136};
4137
4138static struct attribute_group intel_iommu_group = {
4139 .name = "intel-iommu",
4140 .attrs = intel_iommu_attrs,
4141};
4142
4143const struct attribute_group *intel_iommu_groups[] = {
4144 &intel_iommu_group,
4145 NULL,
4146};
4147
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004148int __init intel_iommu_init(void)
4149{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004150 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004151 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004152 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004153
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004154 /* VT-d is required for a TXT/tboot launch, so enforce that */
4155 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004156
Jiang Liu3a5670e2014-02-19 14:07:33 +08004157 if (iommu_init_mempool()) {
4158 if (force_on)
4159 panic("tboot: Failed to initialize iommu memory\n");
4160 return -ENOMEM;
4161 }
4162
4163 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004164 if (dmar_table_init()) {
4165 if (force_on)
4166 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004167 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004168 }
4169
Takao Indoh3a93c842013-04-23 17:35:03 +09004170 /*
4171 * Disable translation if already enabled prior to OS handover.
4172 */
Jiang Liu7c919772014-01-06 14:18:18 +08004173 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004174 if (iommu->gcmd & DMA_GCMD_TE)
4175 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004176
Suresh Siddhac2c72862011-08-23 17:05:19 -07004177 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004178 if (force_on)
4179 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004180 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004181 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004182
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004183 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004184 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004185
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004186 if (list_empty(&dmar_rmrr_units))
4187 printk(KERN_INFO "DMAR: No RMRR found\n");
4188
4189 if (list_empty(&dmar_atsr_units))
4190 printk(KERN_INFO "DMAR: No ATSR found\n");
4191
Joseph Cihula51a63e62011-03-21 11:04:24 -07004192 if (dmar_init_reserved_ranges()) {
4193 if (force_on)
4194 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004195 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004196 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004197
4198 init_no_remapping_devices();
4199
Joseph Cihulab7792602011-05-03 00:08:37 -07004200 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004201 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004202 if (force_on)
4203 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004204 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004205 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004206 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004207 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004208 printk(KERN_INFO
4209 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4210
mark gross5e0d2a62008-03-04 15:22:08 -08004211 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004212#ifdef CONFIG_SWIOTLB
4213 swiotlb = 0;
4214#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004215 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004216
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004217 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004218
Alex Williamsona5459cf2014-06-12 16:12:31 -06004219 for_each_active_iommu(iommu, drhd)
4220 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4221 intel_iommu_groups,
4222 iommu->name);
4223
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004224 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004225 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004226 if (si_domain && !hw_pass_through)
4227 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004228
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004229 intel_iommu_enabled = 1;
4230
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004231 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004232
4233out_free_reserved_range:
4234 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004235out_free_dmar:
4236 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004237 up_write(&dmar_global_lock);
4238 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004239 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004240}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004241
Alex Williamson579305f2014-07-03 09:51:43 -06004242static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4243{
4244 struct intel_iommu *iommu = opaque;
4245
4246 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4247 return 0;
4248}
4249
4250/*
4251 * NB - intel-iommu lacks any sort of reference counting for the users of
4252 * dependent devices. If multiple endpoints have intersecting dependent
4253 * devices, unbinding the driver from any one of them will possibly leave
4254 * the others unable to operate.
4255 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004256static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004257 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004258{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004259 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004260 return;
4261
Alex Williamson579305f2014-07-03 09:51:43 -06004262 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004263}
4264
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004265static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004266 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004267{
Yijing Wangbca2b912013-10-31 17:26:04 +08004268 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004269 struct intel_iommu *iommu;
4270 unsigned long flags;
4271 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004272 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004273
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004274 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004275 if (!iommu)
4276 return;
4277
4278 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004279 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004280 if (info->iommu == iommu && info->bus == bus &&
4281 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004282 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004283 spin_unlock_irqrestore(&device_domain_lock, flags);
4284
Yu Zhao93a23a72009-05-18 13:51:37 +08004285 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004286 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004287 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004288 free_devinfo_mem(info);
4289
4290 spin_lock_irqsave(&device_domain_lock, flags);
4291
4292 if (found)
4293 break;
4294 else
4295 continue;
4296 }
4297
4298 /* if there is no other devices under the same iommu
4299 * owned by this domain, clear this iommu in iommu_bmp
4300 * update iommu count and coherency
4301 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004302 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004303 found = 1;
4304 }
4305
Roland Dreier3e7abe22011-07-20 06:22:21 -07004306 spin_unlock_irqrestore(&device_domain_lock, flags);
4307
Weidong Hanc7151a82008-12-08 22:51:37 +08004308 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004309 domain_detach_iommu(domain, iommu);
4310 if (!domain_type_is_vm_or_si(domain))
4311 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004312 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004313}
4314
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004315static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004316{
4317 int adjust_width;
4318
Robin Murphy1b722502015-01-12 17:51:15 +00004319 init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004320 domain_reserve_special_ranges(domain);
4321
4322 /* calculate AGAW */
4323 domain->gaw = guest_width;
4324 adjust_width = guestwidth_to_adjustwidth(guest_width);
4325 domain->agaw = width_to_agaw(adjust_width);
4326
Weidong Han5e98c4b2008-12-08 23:03:27 +08004327 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004328 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004329 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004330 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004331
4332 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004333 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004334 if (!domain->pgd)
4335 return -ENOMEM;
4336 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4337 return 0;
4338}
4339
Joerg Roedel5d450802008-12-03 14:52:32 +01004340static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004341{
Joerg Roedel5d450802008-12-03 14:52:32 +01004342 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004343
Jiang Liuab8dfe22014-07-11 14:19:27 +08004344 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004345 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004346 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004347 "intel_iommu_domain_init: dmar_domain == NULL\n");
4348 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004349 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004350 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004351 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004352 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004353 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004354 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004355 }
Allen Kay8140a952011-10-14 12:32:17 -07004356 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004357 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004358
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004359 domain->geometry.aperture_start = 0;
4360 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4361 domain->geometry.force_aperture = true;
4362
Joerg Roedel5d450802008-12-03 14:52:32 +01004363 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004364}
Kay, Allen M38717942008-09-09 18:37:29 +03004365
Joerg Roedel5d450802008-12-03 14:52:32 +01004366static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004367{
Joerg Roedel5d450802008-12-03 14:52:32 +01004368 struct dmar_domain *dmar_domain = domain->priv;
4369
4370 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004371 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004372}
Kay, Allen M38717942008-09-09 18:37:29 +03004373
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004374static int intel_iommu_attach_device(struct iommu_domain *domain,
4375 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004376{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004377 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004378 struct intel_iommu *iommu;
4379 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004380 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004381
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004382 if (device_is_rmrr_locked(dev)) {
4383 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4384 return -EPERM;
4385 }
4386
David Woodhouse7207d8f2014-03-09 16:31:06 -07004387 /* normally dev is not mapped */
4388 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004389 struct dmar_domain *old_domain;
4390
David Woodhouse1525a292014-03-06 16:19:30 +00004391 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004392 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004393 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004394 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004395 else
4396 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004397
4398 if (!domain_type_is_vm_or_si(old_domain) &&
4399 list_empty(&old_domain->devices))
4400 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004401 }
4402 }
4403
David Woodhouse156baca2014-03-09 14:00:57 -07004404 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004405 if (!iommu)
4406 return -ENODEV;
4407
4408 /* check if this iommu agaw is sufficient for max mapped address */
4409 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004410 if (addr_width > cap_mgaw(iommu->cap))
4411 addr_width = cap_mgaw(iommu->cap);
4412
4413 if (dmar_domain->max_addr > (1LL << addr_width)) {
4414 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004415 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004416 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004417 return -EFAULT;
4418 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004419 dmar_domain->gaw = addr_width;
4420
4421 /*
4422 * Knock out extra levels of page tables if necessary
4423 */
4424 while (iommu->agaw < dmar_domain->agaw) {
4425 struct dma_pte *pte;
4426
4427 pte = dmar_domain->pgd;
4428 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004429 dmar_domain->pgd = (struct dma_pte *)
4430 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004431 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004432 }
4433 dmar_domain->agaw--;
4434 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004435
David Woodhouse5913c9b2014-03-09 16:27:31 -07004436 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004437}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004438
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004439static void intel_iommu_detach_device(struct iommu_domain *domain,
4440 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004441{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004442 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004443
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004444 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004445}
Kay, Allen M38717942008-09-09 18:37:29 +03004446
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004447static int intel_iommu_map(struct iommu_domain *domain,
4448 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004449 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004450{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004451 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004452 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004453 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004454 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004455
Joerg Roedeldde57a22008-12-03 15:04:09 +01004456 if (iommu_prot & IOMMU_READ)
4457 prot |= DMA_PTE_READ;
4458 if (iommu_prot & IOMMU_WRITE)
4459 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004460 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4461 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004462
David Woodhouse163cc522009-06-28 00:51:17 +01004463 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004464 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004465 u64 end;
4466
4467 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004468 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004469 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004470 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004471 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004472 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004473 return -EFAULT;
4474 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004475 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004476 }
David Woodhousead051222009-06-28 14:22:28 +01004477 /* Round up size to next multiple of PAGE_SIZE, if it and
4478 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004479 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004480 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4481 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004482 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004483}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004484
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004485static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004486 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004487{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004488 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004489 struct page *freelist = NULL;
4490 struct intel_iommu *iommu;
4491 unsigned long start_pfn, last_pfn;
4492 unsigned int npages;
4493 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004494
David Woodhouse5cf0a762014-03-19 16:07:49 +00004495 /* Cope with horrid API which requires us to unmap more than the
4496 size argument if it happens to be a large-page mapping. */
4497 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4498 BUG();
4499
4500 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4501 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4502
David Woodhouseea8ea462014-03-05 17:09:32 +00004503 start_pfn = iova >> VTD_PAGE_SHIFT;
4504 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4505
4506 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4507
4508 npages = last_pfn - start_pfn + 1;
4509
4510 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4511 iommu = g_iommus[iommu_id];
4512
4513 /*
4514 * find bit position of dmar_domain
4515 */
4516 ndomains = cap_ndoms(iommu->cap);
4517 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4518 if (iommu->domains[num] == dmar_domain)
4519 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4520 npages, !freelist, 0);
4521 }
4522
4523 }
4524
4525 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004526
David Woodhouse163cc522009-06-28 00:51:17 +01004527 if (dmar_domain->max_addr == iova + size)
4528 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004529
David Woodhouse5cf0a762014-03-19 16:07:49 +00004530 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004531}
Kay, Allen M38717942008-09-09 18:37:29 +03004532
Joerg Roedeld14d6572008-12-03 15:06:57 +01004533static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304534 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004535{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004536 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004537 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004538 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004539 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004540
David Woodhouse5cf0a762014-03-19 16:07:49 +00004541 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004542 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004543 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004544
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004545 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004546}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004547
Joerg Roedel5d587b82014-09-05 10:50:45 +02004548static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004549{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004550 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004551 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004552 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004553 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004554
Joerg Roedel5d587b82014-09-05 10:50:45 +02004555 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004556}
4557
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004558static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004559{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004560 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004561 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004562 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004563
Alex Williamsona5459cf2014-06-12 16:12:31 -06004564 iommu = device_to_iommu(dev, &bus, &devfn);
4565 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004566 return -ENODEV;
4567
Alex Williamsona5459cf2014-06-12 16:12:31 -06004568 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004569
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004570 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004571
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004572 if (IS_ERR(group))
4573 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004574
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004575 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004576 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004577}
4578
4579static void intel_iommu_remove_device(struct device *dev)
4580{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004581 struct intel_iommu *iommu;
4582 u8 bus, devfn;
4583
4584 iommu = device_to_iommu(dev, &bus, &devfn);
4585 if (!iommu)
4586 return;
4587
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004588 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004589
4590 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004591}
4592
Thierry Redingb22f6432014-06-27 09:03:12 +02004593static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004594 .capable = intel_iommu_capable,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004595 .domain_init = intel_iommu_domain_init,
4596 .domain_destroy = intel_iommu_domain_destroy,
4597 .attach_dev = intel_iommu_attach_device,
4598 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004599 .map = intel_iommu_map,
4600 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004601 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004602 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004603 .add_device = intel_iommu_add_device,
4604 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004605 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004606};
David Woodhouse9af88142009-02-13 23:18:03 +00004607
Daniel Vetter94526182013-01-20 23:50:13 +01004608static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4609{
4610 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4611 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4612 dmar_map_gfx = 0;
4613}
4614
4615DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4616DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4617DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4618DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4619DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4622
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004623static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004624{
4625 /*
4626 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004627 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004628 */
4629 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4630 rwbf_quirk = 1;
4631}
4632
4633DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004634DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4636DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4638DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4639DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004640
Adam Jacksoneecfd572010-08-25 21:17:34 +01004641#define GGC 0x52
4642#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4643#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4644#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4645#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4646#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4647#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4648#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4649#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4650
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004651static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004652{
4653 unsigned short ggc;
4654
Adam Jacksoneecfd572010-08-25 21:17:34 +01004655 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004656 return;
4657
Adam Jacksoneecfd572010-08-25 21:17:34 +01004658 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004659 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4660 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004661 } else if (dmar_map_gfx) {
4662 /* we have to ensure the gfx device is idle before we flush */
4663 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4664 intel_iommu_strict = 1;
4665 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004666}
4667DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4668DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4669DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4670DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4671
David Woodhousee0fc7e02009-09-30 09:12:17 -07004672/* On Tylersburg chipsets, some BIOSes have been known to enable the
4673 ISOCH DMAR unit for the Azalia sound device, but not give it any
4674 TLB entries, which causes it to deadlock. Check for that. We do
4675 this in a function called from init_dmars(), instead of in a PCI
4676 quirk, because we don't want to print the obnoxious "BIOS broken"
4677 message if VT-d is actually disabled.
4678*/
4679static void __init check_tylersburg_isoch(void)
4680{
4681 struct pci_dev *pdev;
4682 uint32_t vtisochctrl;
4683
4684 /* If there's no Azalia in the system anyway, forget it. */
4685 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4686 if (!pdev)
4687 return;
4688 pci_dev_put(pdev);
4689
4690 /* System Management Registers. Might be hidden, in which case
4691 we can't do the sanity check. But that's OK, because the
4692 known-broken BIOSes _don't_ actually hide it, so far. */
4693 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4694 if (!pdev)
4695 return;
4696
4697 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4698 pci_dev_put(pdev);
4699 return;
4700 }
4701
4702 pci_dev_put(pdev);
4703
4704 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4705 if (vtisochctrl & 1)
4706 return;
4707
4708 /* Drop all bits other than the number of TLB entries */
4709 vtisochctrl &= 0x1c;
4710
4711 /* If we have the recommended number of TLB entries (16), fine. */
4712 if (vtisochctrl == 0x10)
4713 return;
4714
4715 /* Zero TLB entries? You get to ride the short bus to school. */
4716 if (!vtisochctrl) {
4717 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4718 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4719 dmi_get_system_info(DMI_BIOS_VENDOR),
4720 dmi_get_system_info(DMI_BIOS_VERSION),
4721 dmi_get_system_info(DMI_PRODUCT_VERSION));
4722 iommu_identity_mapping |= IDENTMAP_AZALIA;
4723 return;
4724 }
4725
4726 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4727 vtisochctrl);
4728}