blob: 3df27c5d18a619280c1210d9567c8a552225abd8 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000053#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Mark McLoughlinf27be032008-11-20 15:49:43 +000075#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070076#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070077#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080078
Andrew Mortondf08cdc2010-09-22 13:05:11 -070079/* page table handling */
80#define LEVEL_STRIDE (9)
81#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020083/*
84 * This bitmap is used to advertise the page sizes our hardware support
85 * to the IOMMU core, which will then use this information to split
86 * physically contiguous memory regions it is mapping into page sizes
87 * that we support.
88 *
89 * Traditionally the IOMMU core just handed us the mappings directly,
90 * after making sure the size is an order of a 4KiB page and that the
91 * mapping has natural alignment.
92 *
93 * To retain this behavior, we currently advertise that we support
94 * all page sizes that are an order of 4KiB.
95 *
96 * If at some point we'd like to utilize the IOMMU core's new behavior,
97 * we could change this to advertise the real page sizes we support.
98 */
99#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700101static inline int agaw_to_level(int agaw)
102{
103 return agaw + 2;
104}
105
106static inline int agaw_to_width(int agaw)
107{
Jiang Liu5c645b32014-01-06 14:18:12 +0800108 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700109}
110
111static inline int width_to_agaw(int width)
112{
Jiang Liu5c645b32014-01-06 14:18:12 +0800113 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700114}
115
116static inline unsigned int level_to_offset_bits(int level)
117{
118 return (level - 1) * LEVEL_STRIDE;
119}
120
121static inline int pfn_level_offset(unsigned long pfn, int level)
122{
123 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124}
125
126static inline unsigned long level_mask(int level)
127{
128 return -1UL << level_to_offset_bits(level);
129}
130
131static inline unsigned long level_size(int level)
132{
133 return 1UL << level_to_offset_bits(level);
134}
135
136static inline unsigned long align_to_level(unsigned long pfn, int level)
137{
138 return (pfn + level_size(level) - 1) & level_mask(level);
139}
David Woodhousefd18de52009-05-10 23:57:41 +0100140
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100141static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142{
Jiang Liu5c645b32014-01-06 14:18:12 +0800143 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144}
145
David Woodhousedd4e8312009-06-27 16:21:20 +0100146/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147 are never going to work. */
148static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149{
150 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151}
152
153static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154{
155 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156}
157static inline unsigned long page_to_dma_pfn(struct page *pg)
158{
159 return mm_to_dma_pfn(page_to_pfn(pg));
160}
161static inline unsigned long virt_to_dma_pfn(void *p)
162{
163 return page_to_dma_pfn(virt_to_page(p));
164}
165
Weidong Hand9630fe2008-12-08 11:06:32 +0800166/* global iommu list, set NULL for ignored DMAR units */
167static struct intel_iommu **g_iommus;
168
David Woodhousee0fc7e02009-09-30 09:12:17 -0700169static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000170static int rwbf_quirk;
171
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000172/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700173 * set to 1 to panic kernel if can't successfully enable VT-d
174 * (used when kernel is launched w/ TXT)
175 */
176static int force_on = 0;
177
178/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179 * 0: Present
180 * 1-11: Reserved
181 * 12-63: Context Ptr (12 - (haw-1))
182 * 64-127: Reserved
183 */
184struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000185 u64 lo;
186 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000187};
188#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000189
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000190
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000191/*
192 * low 64 bits:
193 * 0: present
194 * 1: fault processing disable
195 * 2-3: translation type
196 * 12-63: address space root
197 * high 64 bits:
198 * 0-2: address width
199 * 3-6: aval
200 * 8-23: domain id
201 */
202struct context_entry {
203 u64 lo;
204 u64 hi;
205};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000206
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000207static inline bool context_present(struct context_entry *context)
208{
209 return (context->lo & 1);
210}
211static inline void context_set_present(struct context_entry *context)
212{
213 context->lo |= 1;
214}
215
216static inline void context_set_fault_enable(struct context_entry *context)
217{
218 context->lo &= (((u64)-1) << 2) | 1;
219}
220
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000221static inline void context_set_translation_type(struct context_entry *context,
222 unsigned long value)
223{
224 context->lo &= (((u64)-1) << 4) | 3;
225 context->lo |= (value & 3) << 2;
226}
227
228static inline void context_set_address_root(struct context_entry *context,
229 unsigned long value)
230{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800231 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000232 context->lo |= value & VTD_PAGE_MASK;
233}
234
235static inline void context_set_address_width(struct context_entry *context,
236 unsigned long value)
237{
238 context->hi |= value & 7;
239}
240
241static inline void context_set_domain_id(struct context_entry *context,
242 unsigned long value)
243{
244 context->hi |= (value & ((1 << 16) - 1)) << 8;
245}
246
247static inline void context_clear_entry(struct context_entry *context)
248{
249 context->lo = 0;
250 context->hi = 0;
251}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000252
Mark McLoughlin622ba122008-11-20 15:49:46 +0000253/*
254 * 0: readable
255 * 1: writable
256 * 2-6: reserved
257 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800258 * 8-10: available
259 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000260 * 12-63: Host physcial address
261 */
262struct dma_pte {
263 u64 val;
264};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000265
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000266static inline void dma_clear_pte(struct dma_pte *pte)
267{
268 pte->val = 0;
269}
270
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000271static inline u64 dma_pte_addr(struct dma_pte *pte)
272{
David Woodhousec85994e2009-07-01 19:21:24 +0100273#ifdef CONFIG_64BIT
274 return pte->val & VTD_PAGE_MASK;
275#else
276 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100277 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100278#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000279}
280
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000281static inline bool dma_pte_present(struct dma_pte *pte)
282{
283 return (pte->val & 3) != 0;
284}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000285
Allen Kay4399c8b2011-10-14 12:32:46 -0700286static inline bool dma_pte_superpage(struct dma_pte *pte)
287{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200288 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700289}
290
David Woodhouse75e6bf92009-07-02 11:21:16 +0100291static inline int first_pte_in_page(struct dma_pte *pte)
292{
293 return !((unsigned long)pte & ~VTD_PAGE_MASK);
294}
295
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700296/*
297 * This domain is a statically identity mapping domain.
298 * 1. This domain creats a static 1:1 mapping to all usable memory.
299 * 2. It maps to each iommu if successful.
300 * 3. Each iommu mapps to this domain if successful.
301 */
David Woodhouse19943b02009-08-04 16:19:20 +0100302static struct dmar_domain *si_domain;
303static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700304
Weidong Han1ce28fe2008-12-08 16:35:39 +0800305/* domain represents a virtual machine, more than one devices
306 * across iommus may be owned in one domain, e.g. kvm guest.
307 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800308#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800309
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700310/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800311#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700312
Mark McLoughlin99126f72008-11-20 15:49:47 +0000313struct dmar_domain {
314 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700315 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800316 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800317 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000318
319 struct list_head devices; /* all devices' list */
320 struct iova_domain iovad; /* iova's that belong to this domain */
321
322 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000323 int gaw; /* max guest address width */
324
325 /* adjusted guest address width, 0 is level 2 30-bit */
326 int agaw;
327
Weidong Han3b5410e2008-12-08 09:17:15 +0800328 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800329
330 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800331 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800332 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100333 int iommu_superpage;/* Level of superpages supported:
334 0 == 4KiB (no superpages), 1 == 2MiB,
335 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800336 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800337 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000338};
339
Mark McLoughlina647dac2008-11-20 15:49:48 +0000340/* PCI domain-device relationship */
341struct device_domain_info {
342 struct list_head link; /* link to domain siblings */
343 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100344 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000345 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000346 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800347 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000348 struct dmar_domain *domain; /* pointer to domain */
349};
350
Jiang Liub94e4112014-02-19 14:07:25 +0800351struct dmar_rmrr_unit {
352 struct list_head list; /* list of rmrr units */
353 struct acpi_dmar_header *hdr; /* ACPI header */
354 u64 base_address; /* reserved base address*/
355 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000356 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800357 int devices_cnt; /* target device count */
358};
359
360struct dmar_atsr_unit {
361 struct list_head list; /* list of ATSR units */
362 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000363 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800364 int devices_cnt; /* target device count */
365 u8 include_all:1; /* include all ports */
366};
367
368static LIST_HEAD(dmar_atsr_units);
369static LIST_HEAD(dmar_rmrr_units);
370
371#define for_each_rmrr_units(rmrr) \
372 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
373
mark gross5e0d2a62008-03-04 15:22:08 -0800374static void flush_unmaps_timeout(unsigned long data);
375
Jiang Liub707cb02014-01-06 14:18:26 +0800376static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800377
mark gross80b20dd2008-04-18 13:53:58 -0700378#define HIGH_WATER_MARK 250
379struct deferred_flush_tables {
380 int next;
381 struct iova *iova[HIGH_WATER_MARK];
382 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000383 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700384};
385
386static struct deferred_flush_tables *deferred_flush;
387
mark gross5e0d2a62008-03-04 15:22:08 -0800388/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800389static int g_num_of_iommus;
390
391static DEFINE_SPINLOCK(async_umap_flush_lock);
392static LIST_HEAD(unmaps_to_do);
393
394static int timer_on;
395static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800396
Jiang Liu92d03cc2014-02-19 14:07:28 +0800397static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700398static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800399static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700400 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800401static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000402 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800403static int domain_detach_iommu(struct dmar_domain *domain,
404 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700405
Suresh Siddhad3f13812011-08-23 17:05:25 -0700406#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800407int dmar_disabled = 0;
408#else
409int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700410#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800411
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200412int intel_iommu_enabled = 0;
413EXPORT_SYMBOL_GPL(intel_iommu_enabled);
414
David Woodhouse2d9e6672010-06-15 10:57:57 +0100415static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700416static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800417static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100418static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700419
David Woodhousec0771df2011-10-14 20:59:46 +0100420int intel_iommu_gfx_mapped;
421EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
422
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700423#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
424static DEFINE_SPINLOCK(device_domain_lock);
425static LIST_HEAD(device_domain_list);
426
Thierry Redingb22f6432014-06-27 09:03:12 +0200427static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429static int __init intel_iommu_setup(char *str)
430{
431 if (!str)
432 return -EINVAL;
433 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800434 if (!strncmp(str, "on", 2)) {
435 dmar_disabled = 0;
436 printk(KERN_INFO "Intel-IOMMU: enabled\n");
437 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700438 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800439 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440 } else if (!strncmp(str, "igfx_off", 8)) {
441 dmar_map_gfx = 0;
442 printk(KERN_INFO
443 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700444 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800445 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700446 "Intel-IOMMU: Forcing DAC for PCI devices\n");
447 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800448 } else if (!strncmp(str, "strict", 6)) {
449 printk(KERN_INFO
450 "Intel-IOMMU: disable batched IOTLB flush\n");
451 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100452 } else if (!strncmp(str, "sp_off", 6)) {
453 printk(KERN_INFO
454 "Intel-IOMMU: disable supported super page\n");
455 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700456 }
457
458 str += strcspn(str, ",");
459 while (*str == ',')
460 str++;
461 }
462 return 0;
463}
464__setup("intel_iommu=", intel_iommu_setup);
465
466static struct kmem_cache *iommu_domain_cache;
467static struct kmem_cache *iommu_devinfo_cache;
468static struct kmem_cache *iommu_iova_cache;
469
Suresh Siddha4c923d42009-10-02 11:01:24 -0700470static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700471{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700472 struct page *page;
473 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700474
Suresh Siddha4c923d42009-10-02 11:01:24 -0700475 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
476 if (page)
477 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700478 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700479}
480
481static inline void free_pgtable_page(void *vaddr)
482{
483 free_page((unsigned long)vaddr);
484}
485
486static inline void *alloc_domain_mem(void)
487{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900488 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700489}
490
Kay, Allen M38717942008-09-09 18:37:29 +0300491static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700492{
493 kmem_cache_free(iommu_domain_cache, vaddr);
494}
495
496static inline void * alloc_devinfo_mem(void)
497{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900498 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700499}
500
501static inline void free_devinfo_mem(void *vaddr)
502{
503 kmem_cache_free(iommu_devinfo_cache, vaddr);
504}
505
506struct iova *alloc_iova_mem(void)
507{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900508 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700509}
510
511void free_iova_mem(struct iova *iova)
512{
513 kmem_cache_free(iommu_iova_cache, iova);
514}
515
Jiang Liuab8dfe22014-07-11 14:19:27 +0800516static inline int domain_type_is_vm(struct dmar_domain *domain)
517{
518 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
519}
520
521static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
522{
523 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
524 DOMAIN_FLAG_STATIC_IDENTITY);
525}
Weidong Han1b573682008-12-08 15:34:06 +0800526
Jiang Liu162d1b12014-07-11 14:19:35 +0800527static inline int domain_pfn_supported(struct dmar_domain *domain,
528 unsigned long pfn)
529{
530 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
531
532 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
533}
534
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700535static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800536{
537 unsigned long sagaw;
538 int agaw = -1;
539
540 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700541 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800542 agaw >= 0; agaw--) {
543 if (test_bit(agaw, &sagaw))
544 break;
545 }
546
547 return agaw;
548}
549
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700550/*
551 * Calculate max SAGAW for each iommu.
552 */
553int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
554{
555 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
556}
557
558/*
559 * calculate agaw for each iommu.
560 * "SAGAW" may be different across iommus, use a default agaw, and
561 * get a supported less agaw for iommus that don't support the default agaw.
562 */
563int iommu_calculate_agaw(struct intel_iommu *iommu)
564{
565 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
566}
567
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700568/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800569static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
570{
571 int iommu_id;
572
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700573 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800574 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800575 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800576 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
577 return NULL;
578
579 return g_iommus[iommu_id];
580}
581
Weidong Han8e6040972008-12-08 15:49:06 +0800582static void domain_update_iommu_coherency(struct dmar_domain *domain)
583{
David Woodhoused0501962014-03-11 17:10:29 -0700584 struct dmar_drhd_unit *drhd;
585 struct intel_iommu *iommu;
586 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800587
David Woodhoused0501962014-03-11 17:10:29 -0700588 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800589
Mike Travis1b198bb2012-03-05 15:05:16 -0800590 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700591 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800592 if (!ecap_coherent(g_iommus[i]->ecap)) {
593 domain->iommu_coherency = 0;
594 break;
595 }
Weidong Han8e6040972008-12-08 15:49:06 +0800596 }
David Woodhoused0501962014-03-11 17:10:29 -0700597 if (found)
598 return;
599
600 /* No hardware attached; use lowest common denominator */
601 rcu_read_lock();
602 for_each_active_iommu(iommu, drhd) {
603 if (!ecap_coherent(iommu->ecap)) {
604 domain->iommu_coherency = 0;
605 break;
606 }
607 }
608 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800609}
610
Jiang Liu161f6932014-07-11 14:19:37 +0800611static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100612{
Allen Kay8140a952011-10-14 12:32:17 -0700613 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800614 struct intel_iommu *iommu;
615 int ret = 1;
616
617 rcu_read_lock();
618 for_each_active_iommu(iommu, drhd) {
619 if (iommu != skip) {
620 if (!ecap_sc_support(iommu->ecap)) {
621 ret = 0;
622 break;
623 }
624 }
625 }
626 rcu_read_unlock();
627
628 return ret;
629}
630
631static int domain_update_iommu_superpage(struct intel_iommu *skip)
632{
633 struct dmar_drhd_unit *drhd;
634 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700635 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100636
637 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800638 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100639 }
640
Allen Kay8140a952011-10-14 12:32:17 -0700641 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800642 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700643 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800644 if (iommu != skip) {
645 mask &= cap_super_page_val(iommu->cap);
646 if (!mask)
647 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100648 }
649 }
Jiang Liu0e242612014-02-19 14:07:34 +0800650 rcu_read_unlock();
651
Jiang Liu161f6932014-07-11 14:19:37 +0800652 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100653}
654
Sheng Yang58c610b2009-03-18 15:33:05 +0800655/* Some capabilities may be different across iommus */
656static void domain_update_iommu_cap(struct dmar_domain *domain)
657{
658 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800659 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
660 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800661}
662
David Woodhouse03ecc322015-02-13 14:35:21 +0000663static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
664 u8 bus, u8 devfn, int alloc)
665{
666 struct root_entry *root = &iommu->root_entry[bus];
667 struct context_entry *context;
668 u64 *entry;
669
670 if (ecap_ecs(iommu->ecap)) {
671 if (devfn >= 0x80) {
672 devfn -= 0x80;
673 entry = &root->hi;
674 }
675 devfn *= 2;
676 }
677 entry = &root->lo;
678 if (*entry & 1)
679 context = phys_to_virt(*entry & VTD_PAGE_MASK);
680 else {
681 unsigned long phy_addr;
682 if (!alloc)
683 return NULL;
684
685 context = alloc_pgtable_page(iommu->node);
686 if (!context)
687 return NULL;
688
689 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
690 phy_addr = virt_to_phys((void *)context);
691 *entry = phy_addr | 1;
692 __iommu_flush_cache(iommu, entry, sizeof(*entry));
693 }
694 return &context[devfn];
695}
696
David Woodhouse156baca2014-03-09 14:00:57 -0700697static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800698{
699 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800700 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700701 struct device *tmp;
702 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800703 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800704 int i;
705
David Woodhouse156baca2014-03-09 14:00:57 -0700706 if (dev_is_pci(dev)) {
707 pdev = to_pci_dev(dev);
708 segment = pci_domain_nr(pdev->bus);
709 } else if (ACPI_COMPANION(dev))
710 dev = &ACPI_COMPANION(dev)->dev;
711
Jiang Liu0e242612014-02-19 14:07:34 +0800712 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800713 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700714 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100715 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800716
Jiang Liub683b232014-02-19 14:07:32 +0800717 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700718 drhd->devices_cnt, i, tmp) {
719 if (tmp == dev) {
720 *bus = drhd->devices[i].bus;
721 *devfn = drhd->devices[i].devfn;
722 goto out;
723 }
724
725 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000726 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700727
728 ptmp = to_pci_dev(tmp);
729 if (ptmp->subordinate &&
730 ptmp->subordinate->number <= pdev->bus->number &&
731 ptmp->subordinate->busn_res.end >= pdev->bus->number)
732 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100733 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800734
David Woodhouse156baca2014-03-09 14:00:57 -0700735 if (pdev && drhd->include_all) {
736 got_pdev:
737 *bus = pdev->bus->number;
738 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800739 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700740 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800741 }
Jiang Liub683b232014-02-19 14:07:32 +0800742 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700743 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800744 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800745
Jiang Liub683b232014-02-19 14:07:32 +0800746 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800747}
748
Weidong Han5331fe62008-12-08 23:00:00 +0800749static void domain_flush_cache(struct dmar_domain *domain,
750 void *addr, int size)
751{
752 if (!domain->iommu_coherency)
753 clflush_cache_range(addr, size);
754}
755
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700756static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
757{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000759 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700760 unsigned long flags;
761
762 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000763 context = iommu_context_addr(iommu, bus, devfn, 0);
764 if (context)
765 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766 spin_unlock_irqrestore(&iommu->lock, flags);
767 return ret;
768}
769
770static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
771{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772 struct context_entry *context;
773 unsigned long flags;
774
775 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000776 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000778 context_clear_entry(context);
779 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 }
781 spin_unlock_irqrestore(&iommu->lock, flags);
782}
783
784static void free_context_table(struct intel_iommu *iommu)
785{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 int i;
787 unsigned long flags;
788 struct context_entry *context;
789
790 spin_lock_irqsave(&iommu->lock, flags);
791 if (!iommu->root_entry) {
792 goto out;
793 }
794 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000795 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 if (context)
797 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000798
799 if (!ecap_ecs(iommu->ecap))
800 continue;
801
802 context = iommu_context_addr(iommu, i, 0x80, 0);
803 if (context)
804 free_pgtable_page(context);
805
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 }
807 free_pgtable_page(iommu->root_entry);
808 iommu->root_entry = NULL;
809out:
810 spin_unlock_irqrestore(&iommu->lock, flags);
811}
812
David Woodhouseb026fd22009-06-28 10:37:25 +0100813static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000814 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 struct dma_pte *parent, *pte = NULL;
817 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700818 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819
820 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200821
Jiang Liu162d1b12014-07-11 14:19:35 +0800822 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200823 /* Address beyond IOMMU's addressing capabilities. */
824 return NULL;
825
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 parent = domain->pgd;
827
David Woodhouse5cf0a762014-03-19 16:07:49 +0000828 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 void *tmp_page;
830
David Woodhouseb026fd22009-06-28 10:37:25 +0100831 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700832 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000833 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100834 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000835 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 break;
837
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000838 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100839 uint64_t pteval;
840
Suresh Siddha4c923d42009-10-02 11:01:24 -0700841 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842
David Woodhouse206a73c12009-07-01 19:30:28 +0100843 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700844 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100845
David Woodhousec85994e2009-07-01 19:21:24 +0100846 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400847 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800848 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100849 /* Someone else set it while we were thinking; use theirs. */
850 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800851 else
David Woodhousec85994e2009-07-01 19:21:24 +0100852 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700853 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000854 if (level == 1)
855 break;
856
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000857 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700858 level--;
859 }
860
David Woodhouse5cf0a762014-03-19 16:07:49 +0000861 if (!*target_level)
862 *target_level = level;
863
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 return pte;
865}
866
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100867
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100869static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
870 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100871 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700872{
873 struct dma_pte *parent, *pte = NULL;
874 int total = agaw_to_level(domain->agaw);
875 int offset;
876
877 parent = domain->pgd;
878 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100879 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700880 pte = &parent[offset];
881 if (level == total)
882 return pte;
883
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100884 if (!dma_pte_present(pte)) {
885 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100887 }
888
Yijing Wange16922a2014-05-20 20:37:51 +0800889 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 *large_page = total;
891 return pte;
892 }
893
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000894 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895 total--;
896 }
897 return NULL;
898}
899
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000901static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100902 unsigned long start_pfn,
903 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100905 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100906 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907
Jiang Liu162d1b12014-07-11 14:19:35 +0800908 BUG_ON(!domain_pfn_supported(domain, start_pfn));
909 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700910 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100911
David Woodhouse04b18e62009-06-27 19:15:01 +0100912 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700913 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100914 large_page = 1;
915 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100916 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100917 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100918 continue;
919 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100920 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100921 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100922 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100923 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100924 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
925
David Woodhouse310a5ab2009-06-28 18:52:20 +0100926 domain_flush_cache(domain, first_pte,
927 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700928
929 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930}
931
Alex Williamson3269ee02013-06-15 10:27:19 -0600932static void dma_pte_free_level(struct dmar_domain *domain, int level,
933 struct dma_pte *pte, unsigned long pfn,
934 unsigned long start_pfn, unsigned long last_pfn)
935{
936 pfn = max(start_pfn, pfn);
937 pte = &pte[pfn_level_offset(pfn, level)];
938
939 do {
940 unsigned long level_pfn;
941 struct dma_pte *level_pte;
942
943 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
944 goto next;
945
946 level_pfn = pfn & level_mask(level - 1);
947 level_pte = phys_to_virt(dma_pte_addr(pte));
948
949 if (level > 2)
950 dma_pte_free_level(domain, level - 1, level_pte,
951 level_pfn, start_pfn, last_pfn);
952
953 /* If range covers entire pagetable, free it */
954 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800955 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600956 dma_clear_pte(pte);
957 domain_flush_cache(domain, pte, sizeof(*pte));
958 free_pgtable_page(level_pte);
959 }
960next:
961 pfn += level_size(level);
962 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
963}
964
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965/* free page table pages. last level pte should already be cleared */
966static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100967 unsigned long start_pfn,
968 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969{
Jiang Liu162d1b12014-07-11 14:19:35 +0800970 BUG_ON(!domain_pfn_supported(domain, start_pfn));
971 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700972 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973
Jiang Liud41a4ad2014-07-11 14:19:34 +0800974 dma_pte_clear_range(domain, start_pfn, last_pfn);
975
David Woodhousef3a0a522009-06-30 03:40:07 +0100976 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600977 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
978 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100979
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100981 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982 free_pgtable_page(domain->pgd);
983 domain->pgd = NULL;
984 }
985}
986
David Woodhouseea8ea462014-03-05 17:09:32 +0000987/* When a page at a given level is being unlinked from its parent, we don't
988 need to *modify* it at all. All we need to do is make a list of all the
989 pages which can be freed just as soon as we've flushed the IOTLB and we
990 know the hardware page-walk will no longer touch them.
991 The 'pte' argument is the *parent* PTE, pointing to the page that is to
992 be freed. */
993static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
994 int level, struct dma_pte *pte,
995 struct page *freelist)
996{
997 struct page *pg;
998
999 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1000 pg->freelist = freelist;
1001 freelist = pg;
1002
1003 if (level == 1)
1004 return freelist;
1005
Jiang Liuadeb2592014-04-09 10:20:39 +08001006 pte = page_address(pg);
1007 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001008 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1009 freelist = dma_pte_list_pagetables(domain, level - 1,
1010 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001011 pte++;
1012 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001013
1014 return freelist;
1015}
1016
1017static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1018 struct dma_pte *pte, unsigned long pfn,
1019 unsigned long start_pfn,
1020 unsigned long last_pfn,
1021 struct page *freelist)
1022{
1023 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1024
1025 pfn = max(start_pfn, pfn);
1026 pte = &pte[pfn_level_offset(pfn, level)];
1027
1028 do {
1029 unsigned long level_pfn;
1030
1031 if (!dma_pte_present(pte))
1032 goto next;
1033
1034 level_pfn = pfn & level_mask(level);
1035
1036 /* If range covers entire pagetable, free it */
1037 if (start_pfn <= level_pfn &&
1038 last_pfn >= level_pfn + level_size(level) - 1) {
1039 /* These suborbinate page tables are going away entirely. Don't
1040 bother to clear them; we're just going to *free* them. */
1041 if (level > 1 && !dma_pte_superpage(pte))
1042 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1043
1044 dma_clear_pte(pte);
1045 if (!first_pte)
1046 first_pte = pte;
1047 last_pte = pte;
1048 } else if (level > 1) {
1049 /* Recurse down into a level that isn't *entirely* obsolete */
1050 freelist = dma_pte_clear_level(domain, level - 1,
1051 phys_to_virt(dma_pte_addr(pte)),
1052 level_pfn, start_pfn, last_pfn,
1053 freelist);
1054 }
1055next:
1056 pfn += level_size(level);
1057 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1058
1059 if (first_pte)
1060 domain_flush_cache(domain, first_pte,
1061 (void *)++last_pte - (void *)first_pte);
1062
1063 return freelist;
1064}
1065
1066/* We can't just free the pages because the IOMMU may still be walking
1067 the page tables, and may have cached the intermediate levels. The
1068 pages can only be freed after the IOTLB flush has been done. */
1069struct page *domain_unmap(struct dmar_domain *domain,
1070 unsigned long start_pfn,
1071 unsigned long last_pfn)
1072{
David Woodhouseea8ea462014-03-05 17:09:32 +00001073 struct page *freelist = NULL;
1074
Jiang Liu162d1b12014-07-11 14:19:35 +08001075 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1076 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001077 BUG_ON(start_pfn > last_pfn);
1078
1079 /* we don't need lock here; nobody else touches the iova range */
1080 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1081 domain->pgd, 0, start_pfn, last_pfn, NULL);
1082
1083 /* free pgd */
1084 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1085 struct page *pgd_page = virt_to_page(domain->pgd);
1086 pgd_page->freelist = freelist;
1087 freelist = pgd_page;
1088
1089 domain->pgd = NULL;
1090 }
1091
1092 return freelist;
1093}
1094
1095void dma_free_pagelist(struct page *freelist)
1096{
1097 struct page *pg;
1098
1099 while ((pg = freelist)) {
1100 freelist = pg->freelist;
1101 free_pgtable_page(page_address(pg));
1102 }
1103}
1104
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105/* iommu handling */
1106static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1107{
1108 struct root_entry *root;
1109 unsigned long flags;
1110
Suresh Siddha4c923d42009-10-02 11:01:24 -07001111 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001112 if (!root) {
1113 pr_err("IOMMU: allocating root entry for %s failed\n",
1114 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001115 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001116 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001117
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001118 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001119
1120 spin_lock_irqsave(&iommu->lock, flags);
1121 iommu->root_entry = root;
1122 spin_unlock_irqrestore(&iommu->lock, flags);
1123
1124 return 0;
1125}
1126
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127static void iommu_set_root_entry(struct intel_iommu *iommu)
1128{
David Woodhouse03ecc322015-02-13 14:35:21 +00001129 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001130 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001131 unsigned long flag;
1132
David Woodhouse03ecc322015-02-13 14:35:21 +00001133 addr = virt_to_phys(iommu->root_entry);
1134 if (ecap_ecs(iommu->ecap))
1135 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001138 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001139
David Woodhousec416daa2009-05-10 20:30:58 +01001140 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141
1142 /* Make sure hardware complete it */
1143 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001144 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001146 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001147}
1148
1149static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1150{
1151 u32 val;
1152 unsigned long flag;
1153
David Woodhouse9af88142009-02-13 23:18:03 +00001154 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001157 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001158 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
1160 /* Make sure hardware complete it */
1161 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001162 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001164 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165}
1166
1167/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001168static void __iommu_flush_context(struct intel_iommu *iommu,
1169 u16 did, u16 source_id, u8 function_mask,
1170 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171{
1172 u64 val = 0;
1173 unsigned long flag;
1174
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175 switch (type) {
1176 case DMA_CCMD_GLOBAL_INVL:
1177 val = DMA_CCMD_GLOBAL_INVL;
1178 break;
1179 case DMA_CCMD_DOMAIN_INVL:
1180 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1181 break;
1182 case DMA_CCMD_DEVICE_INVL:
1183 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1184 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1185 break;
1186 default:
1187 BUG();
1188 }
1189 val |= DMA_CCMD_ICC;
1190
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001191 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1193
1194 /* Make sure hardware complete it */
1195 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1196 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1197
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001198 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199}
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001202static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1203 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204{
1205 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1206 u64 val = 0, val_iva = 0;
1207 unsigned long flag;
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209 switch (type) {
1210 case DMA_TLB_GLOBAL_FLUSH:
1211 /* global flush doesn't need set IVA_REG */
1212 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1213 break;
1214 case DMA_TLB_DSI_FLUSH:
1215 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1216 break;
1217 case DMA_TLB_PSI_FLUSH:
1218 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001219 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220 val_iva = size_order | addr;
1221 break;
1222 default:
1223 BUG();
1224 }
1225 /* Note: set drain read/write */
1226#if 0
1227 /*
1228 * This is probably to be super secure.. Looks like we can
1229 * ignore it without any impact.
1230 */
1231 if (cap_read_drain(iommu->cap))
1232 val |= DMA_TLB_READ_DRAIN;
1233#endif
1234 if (cap_write_drain(iommu->cap))
1235 val |= DMA_TLB_WRITE_DRAIN;
1236
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001237 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 /* Note: Only uses first TLB reg currently */
1239 if (val_iva)
1240 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1241 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1242
1243 /* Make sure hardware complete it */
1244 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1245 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1246
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001247 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248
1249 /* check IOTLB invalidation granularity */
1250 if (DMA_TLB_IAIG(val) == 0)
1251 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1252 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1253 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001254 (unsigned long long)DMA_TLB_IIRG(type),
1255 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256}
1257
David Woodhouse64ae8922014-03-09 12:52:30 -07001258static struct device_domain_info *
1259iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1260 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261{
Yu Zhao93a23a72009-05-18 13:51:37 +08001262 int found = 0;
1263 unsigned long flags;
1264 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001265 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001266
1267 if (!ecap_dev_iotlb_support(iommu->ecap))
1268 return NULL;
1269
1270 if (!iommu->qi)
1271 return NULL;
1272
1273 spin_lock_irqsave(&device_domain_lock, flags);
1274 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001275 if (info->iommu == iommu && info->bus == bus &&
1276 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001277 found = 1;
1278 break;
1279 }
1280 spin_unlock_irqrestore(&device_domain_lock, flags);
1281
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001282 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001283 return NULL;
1284
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001285 pdev = to_pci_dev(info->dev);
1286
1287 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001288 return NULL;
1289
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001290 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001291 return NULL;
1292
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 return info;
1294}
1295
1296static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1297{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001298 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001299 return;
1300
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001301 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001302}
1303
1304static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1305{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001306 if (!info->dev || !dev_is_pci(info->dev) ||
1307 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001308 return;
1309
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001310 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001311}
1312
1313static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1314 u64 addr, unsigned mask)
1315{
1316 u16 sid, qdep;
1317 unsigned long flags;
1318 struct device_domain_info *info;
1319
1320 spin_lock_irqsave(&device_domain_lock, flags);
1321 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001322 struct pci_dev *pdev;
1323 if (!info->dev || !dev_is_pci(info->dev))
1324 continue;
1325
1326 pdev = to_pci_dev(info->dev);
1327 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001328 continue;
1329
1330 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001331 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001332 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1333 }
1334 spin_unlock_irqrestore(&device_domain_lock, flags);
1335}
1336
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001337static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001338 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001340 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001341 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001342
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343 BUG_ON(pages == 0);
1344
David Woodhouseea8ea462014-03-05 17:09:32 +00001345 if (ih)
1346 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001348 * Fallback to domain selective flush if no PSI support or the size is
1349 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350 * PSI requires page size to be 2 ^ x, and the base address is naturally
1351 * aligned to the size
1352 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001353 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1354 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001355 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001356 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001357 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001359
1360 /*
Nadav Amit82653632010-04-01 13:24:40 +03001361 * In caching mode, changes of pages from non-present to present require
1362 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001363 */
Nadav Amit82653632010-04-01 13:24:40 +03001364 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001365 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001366}
1367
mark grossf8bab732008-02-08 04:18:38 -08001368static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1369{
1370 u32 pmen;
1371 unsigned long flags;
1372
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001373 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001374 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1375 pmen &= ~DMA_PMEN_EPM;
1376 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1377
1378 /* wait for the protected region status bit to clear */
1379 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1380 readl, !(pmen & DMA_PMEN_PRS), pmen);
1381
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001382 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001383}
1384
Jiang Liu2a41cce2014-07-11 14:19:33 +08001385static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001386{
1387 u32 sts;
1388 unsigned long flags;
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001391 iommu->gcmd |= DMA_GCMD_TE;
1392 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001393
1394 /* Make sure hardware complete it */
1395 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001396 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001397
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001398 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399}
1400
Jiang Liu2a41cce2014-07-11 14:19:33 +08001401static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402{
1403 u32 sts;
1404 unsigned long flag;
1405
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001406 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407 iommu->gcmd &= ~DMA_GCMD_TE;
1408 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1409
1410 /* Make sure hardware complete it */
1411 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001412 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001414 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415}
1416
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001417
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418static int iommu_init_domains(struct intel_iommu *iommu)
1419{
1420 unsigned long ndomains;
1421 unsigned long nlongs;
1422
1423 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001424 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1425 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426 nlongs = BITS_TO_LONGS(ndomains);
1427
Donald Dutile94a91b52009-08-20 16:51:34 -04001428 spin_lock_init(&iommu->lock);
1429
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430 /* TBD: there might be 64K domains,
1431 * consider other allocation for future chip
1432 */
1433 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1434 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001435 pr_err("IOMMU%d: allocating domain id array failed\n",
1436 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 return -ENOMEM;
1438 }
1439 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1440 GFP_KERNEL);
1441 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001442 pr_err("IOMMU%d: allocating domain array failed\n",
1443 iommu->seq_id);
1444 kfree(iommu->domain_ids);
1445 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 return -ENOMEM;
1447 }
1448
1449 /*
1450 * if Caching mode is set, then invalid translations are tagged
1451 * with domainid 0. Hence we need to pre-allocate it.
1452 */
1453 if (cap_caching_mode(iommu->cap))
1454 set_bit(0, iommu->domain_ids);
1455 return 0;
1456}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001457
Jiang Liuffebeb42014-11-09 22:48:02 +08001458static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001459{
1460 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001461 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462
Donald Dutile94a91b52009-08-20 16:51:34 -04001463 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001464 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001465 /*
1466 * Domain id 0 is reserved for invalid translation
1467 * if hardware supports caching mode.
1468 */
1469 if (cap_caching_mode(iommu->cap) && i == 0)
1470 continue;
1471
Donald Dutile94a91b52009-08-20 16:51:34 -04001472 domain = iommu->domains[i];
1473 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001474 if (domain_detach_iommu(domain, iommu) == 0 &&
1475 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001476 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001477 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001478 }
1479
1480 if (iommu->gcmd & DMA_GCMD_TE)
1481 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001482}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001483
Jiang Liuffebeb42014-11-09 22:48:02 +08001484static void free_dmar_iommu(struct intel_iommu *iommu)
1485{
1486 if ((iommu->domains) && (iommu->domain_ids)) {
1487 kfree(iommu->domains);
1488 kfree(iommu->domain_ids);
1489 iommu->domains = NULL;
1490 iommu->domain_ids = NULL;
1491 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001492
Weidong Hand9630fe2008-12-08 11:06:32 +08001493 g_iommus[iommu->seq_id] = NULL;
1494
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495 /* free context mapping */
1496 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001497}
1498
Jiang Liuab8dfe22014-07-11 14:19:27 +08001499static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001501 /* domain id for virtual machine, it won't be set in context */
1502 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001504
1505 domain = alloc_domain_mem();
1506 if (!domain)
1507 return NULL;
1508
Jiang Liuab8dfe22014-07-11 14:19:27 +08001509 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001510 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001511 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001512 spin_lock_init(&domain->iommu_lock);
1513 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001514 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001515 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516
1517 return domain;
1518}
1519
Jiang Liufb170fb2014-07-11 14:19:28 +08001520static int __iommu_attach_domain(struct dmar_domain *domain,
1521 struct intel_iommu *iommu)
1522{
1523 int num;
1524 unsigned long ndomains;
1525
1526 ndomains = cap_ndoms(iommu->cap);
1527 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1528 if (num < ndomains) {
1529 set_bit(num, iommu->domain_ids);
1530 iommu->domains[num] = domain;
1531 } else {
1532 num = -ENOSPC;
1533 }
1534
1535 return num;
1536}
1537
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001538static int iommu_attach_domain(struct dmar_domain *domain,
1539 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001541 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 unsigned long flags;
1543
Weidong Han8c11e792008-12-08 15:29:22 +08001544 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001545 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001546 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001547 if (num < 0)
1548 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549
Jiang Liufb170fb2014-07-11 14:19:28 +08001550 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001551}
1552
Jiang Liu44bde612014-07-11 14:19:29 +08001553static int iommu_attach_vm_domain(struct dmar_domain *domain,
1554 struct intel_iommu *iommu)
1555{
1556 int num;
1557 unsigned long ndomains;
1558
1559 ndomains = cap_ndoms(iommu->cap);
1560 for_each_set_bit(num, iommu->domain_ids, ndomains)
1561 if (iommu->domains[num] == domain)
1562 return num;
1563
1564 return __iommu_attach_domain(domain, iommu);
1565}
1566
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001567static void iommu_detach_domain(struct dmar_domain *domain,
1568 struct intel_iommu *iommu)
1569{
1570 unsigned long flags;
1571 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001572
1573 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001574 if (domain_type_is_vm_or_si(domain)) {
1575 ndomains = cap_ndoms(iommu->cap);
1576 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1577 if (iommu->domains[num] == domain) {
1578 clear_bit(num, iommu->domain_ids);
1579 iommu->domains[num] = NULL;
1580 break;
1581 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001582 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001583 } else {
1584 clear_bit(domain->id, iommu->domain_ids);
1585 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001586 }
Weidong Han8c11e792008-12-08 15:29:22 +08001587 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001588}
1589
Jiang Liufb170fb2014-07-11 14:19:28 +08001590static void domain_attach_iommu(struct dmar_domain *domain,
1591 struct intel_iommu *iommu)
1592{
1593 unsigned long flags;
1594
1595 spin_lock_irqsave(&domain->iommu_lock, flags);
1596 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1597 domain->iommu_count++;
1598 if (domain->iommu_count == 1)
1599 domain->nid = iommu->node;
1600 domain_update_iommu_cap(domain);
1601 }
1602 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1603}
1604
1605static int domain_detach_iommu(struct dmar_domain *domain,
1606 struct intel_iommu *iommu)
1607{
1608 unsigned long flags;
1609 int count = INT_MAX;
1610
1611 spin_lock_irqsave(&domain->iommu_lock, flags);
1612 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1613 count = --domain->iommu_count;
1614 domain_update_iommu_cap(domain);
1615 }
1616 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1617
1618 return count;
1619}
1620
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001621static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001622static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001623
Joseph Cihula51a63e62011-03-21 11:04:24 -07001624static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001625{
1626 struct pci_dev *pdev = NULL;
1627 struct iova *iova;
1628 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001629
David Millerf6611972008-02-06 01:36:23 -08001630 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631
Mark Gross8a443df2008-03-04 14:59:31 -08001632 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1633 &reserved_rbtree_key);
1634
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001635 /* IOAPIC ranges shouldn't be accessed by DMA */
1636 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1637 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001638 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001640 return -ENODEV;
1641 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642
1643 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1644 for_each_pci_dev(pdev) {
1645 struct resource *r;
1646
1647 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1648 r = &pdev->resource[i];
1649 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1650 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001651 iova = reserve_iova(&reserved_iova_list,
1652 IOVA_PFN(r->start),
1653 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001654 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001656 return -ENODEV;
1657 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658 }
1659 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001660 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661}
1662
1663static void domain_reserve_special_ranges(struct dmar_domain *domain)
1664{
1665 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1666}
1667
1668static inline int guestwidth_to_adjustwidth(int gaw)
1669{
1670 int agaw;
1671 int r = (gaw - 12) % 9;
1672
1673 if (r == 0)
1674 agaw = gaw;
1675 else
1676 agaw = gaw + 9 - r;
1677 if (agaw > 64)
1678 agaw = 64;
1679 return agaw;
1680}
1681
1682static int domain_init(struct dmar_domain *domain, int guest_width)
1683{
1684 struct intel_iommu *iommu;
1685 int adjust_width, agaw;
1686 unsigned long sagaw;
1687
David Millerf6611972008-02-06 01:36:23 -08001688 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689 domain_reserve_special_ranges(domain);
1690
1691 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001692 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693 if (guest_width > cap_mgaw(iommu->cap))
1694 guest_width = cap_mgaw(iommu->cap);
1695 domain->gaw = guest_width;
1696 adjust_width = guestwidth_to_adjustwidth(guest_width);
1697 agaw = width_to_agaw(adjust_width);
1698 sagaw = cap_sagaw(iommu->cap);
1699 if (!test_bit(agaw, &sagaw)) {
1700 /* hardware doesn't support it, choose a bigger one */
1701 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1702 agaw = find_next_bit(&sagaw, 5, agaw);
1703 if (agaw >= 5)
1704 return -ENODEV;
1705 }
1706 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707
Weidong Han8e6040972008-12-08 15:49:06 +08001708 if (ecap_coherent(iommu->ecap))
1709 domain->iommu_coherency = 1;
1710 else
1711 domain->iommu_coherency = 0;
1712
Sheng Yang58c610b2009-03-18 15:33:05 +08001713 if (ecap_sc_support(iommu->ecap))
1714 domain->iommu_snooping = 1;
1715 else
1716 domain->iommu_snooping = 0;
1717
David Woodhouse214e39a2014-03-19 10:38:49 +00001718 if (intel_iommu_superpage)
1719 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1720 else
1721 domain->iommu_superpage = 0;
1722
Suresh Siddha4c923d42009-10-02 11:01:24 -07001723 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001724
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001726 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727 if (!domain->pgd)
1728 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001729 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730 return 0;
1731}
1732
1733static void domain_exit(struct dmar_domain *domain)
1734{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001735 struct dmar_drhd_unit *drhd;
1736 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001737 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001738
1739 /* Domain 0 is reserved, so dont process it */
1740 if (!domain)
1741 return;
1742
Alex Williamson7b668352011-05-24 12:02:41 +01001743 /* Flush any lazy unmaps that may reference this domain */
1744 if (!intel_iommu_strict)
1745 flush_unmaps_timeout(0);
1746
Jiang Liu92d03cc2014-02-19 14:07:28 +08001747 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001748 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001749
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001750 /* destroy iovas */
1751 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001752
David Woodhouseea8ea462014-03-05 17:09:32 +00001753 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754
Jiang Liu92d03cc2014-02-19 14:07:28 +08001755 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001756 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001757 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001758 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001759 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001760
David Woodhouseea8ea462014-03-05 17:09:32 +00001761 dma_free_pagelist(freelist);
1762
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763 free_domain_mem(domain);
1764}
1765
David Woodhouse64ae8922014-03-09 12:52:30 -07001766static int domain_context_mapping_one(struct dmar_domain *domain,
1767 struct intel_iommu *iommu,
1768 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001769{
1770 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001772 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001773 int id;
1774 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001775 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001776
1777 pr_debug("Set context mapping for %02x:%02x.%d\n",
1778 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001779
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001781 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1782 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001783
David Woodhouse03ecc322015-02-13 14:35:21 +00001784 spin_lock_irqsave(&iommu->lock, flags);
1785 context = iommu_context_addr(iommu, bus, devfn, 1);
1786 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 if (!context)
1788 return -ENOMEM;
1789 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001790 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 spin_unlock_irqrestore(&iommu->lock, flags);
1792 return 0;
1793 }
1794
Weidong Hanea6606b2008-12-08 23:08:15 +08001795 id = domain->id;
1796 pgd = domain->pgd;
1797
Jiang Liuab8dfe22014-07-11 14:19:27 +08001798 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001799 if (domain_type_is_vm(domain)) {
1800 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001801 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001802 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001803 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001804 return -EFAULT;
1805 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001806 }
1807
1808 /* Skip top levels of page tables for
1809 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001810 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001811 */
Chris Wright1672af12009-12-02 12:06:34 -08001812 if (translation != CONTEXT_TT_PASS_THROUGH) {
1813 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1814 pgd = phys_to_virt(dma_pte_addr(pgd));
1815 if (!dma_pte_present(pgd)) {
1816 spin_unlock_irqrestore(&iommu->lock, flags);
1817 return -ENOMEM;
1818 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001819 }
1820 }
1821 }
1822
1823 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001824
Yu Zhao93a23a72009-05-18 13:51:37 +08001825 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001826 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001827 translation = info ? CONTEXT_TT_DEV_IOTLB :
1828 CONTEXT_TT_MULTI_LEVEL;
1829 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001830 /*
1831 * In pass through mode, AW must be programmed to indicate the largest
1832 * AGAW value supported by hardware. And ASR is ignored by hardware.
1833 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001834 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001835 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001836 else {
1837 context_set_address_root(context, virt_to_phys(pgd));
1838 context_set_address_width(context, iommu->agaw);
1839 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001840
1841 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001842 context_set_fault_enable(context);
1843 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001844 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001845
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001846 /*
1847 * It's a non-present to present mapping. If hardware doesn't cache
1848 * non-present entry we only need to flush the write-buffer. If the
1849 * _does_ cache non-present entries, then it does so in the special
1850 * domain #0, which we have to flush:
1851 */
1852 if (cap_caching_mode(iommu->cap)) {
1853 iommu->flush.flush_context(iommu, 0,
1854 (((u16)bus) << 8) | devfn,
1855 DMA_CCMD_MASK_NOBIT,
1856 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001857 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001858 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001860 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001861 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001862 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001863
Jiang Liufb170fb2014-07-11 14:19:28 +08001864 domain_attach_iommu(domain, iommu);
1865
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001866 return 0;
1867}
1868
Alex Williamson579305f2014-07-03 09:51:43 -06001869struct domain_context_mapping_data {
1870 struct dmar_domain *domain;
1871 struct intel_iommu *iommu;
1872 int translation;
1873};
1874
1875static int domain_context_mapping_cb(struct pci_dev *pdev,
1876 u16 alias, void *opaque)
1877{
1878 struct domain_context_mapping_data *data = opaque;
1879
1880 return domain_context_mapping_one(data->domain, data->iommu,
1881 PCI_BUS_NUM(alias), alias & 0xff,
1882 data->translation);
1883}
1884
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001886domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1887 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001888{
David Woodhouse64ae8922014-03-09 12:52:30 -07001889 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001890 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001891 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001892
David Woodhousee1f167f2014-03-09 15:24:46 -07001893 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001894 if (!iommu)
1895 return -ENODEV;
1896
Alex Williamson579305f2014-07-03 09:51:43 -06001897 if (!dev_is_pci(dev))
1898 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001899 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001900
1901 data.domain = domain;
1902 data.iommu = iommu;
1903 data.translation = translation;
1904
1905 return pci_for_each_dma_alias(to_pci_dev(dev),
1906 &domain_context_mapping_cb, &data);
1907}
1908
1909static int domain_context_mapped_cb(struct pci_dev *pdev,
1910 u16 alias, void *opaque)
1911{
1912 struct intel_iommu *iommu = opaque;
1913
1914 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915}
1916
David Woodhousee1f167f2014-03-09 15:24:46 -07001917static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918{
Weidong Han5331fe62008-12-08 23:00:00 +08001919 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001920 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001921
David Woodhousee1f167f2014-03-09 15:24:46 -07001922 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001923 if (!iommu)
1924 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001925
Alex Williamson579305f2014-07-03 09:51:43 -06001926 if (!dev_is_pci(dev))
1927 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001928
Alex Williamson579305f2014-07-03 09:51:43 -06001929 return !pci_for_each_dma_alias(to_pci_dev(dev),
1930 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931}
1932
Fenghua Yuf5329592009-08-04 15:09:37 -07001933/* Returns a number of VTD pages, but aligned to MM page size */
1934static inline unsigned long aligned_nrpages(unsigned long host_addr,
1935 size_t size)
1936{
1937 host_addr &= ~PAGE_MASK;
1938 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1939}
1940
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001941/* Return largest possible superpage level for a given mapping */
1942static inline int hardware_largepage_caps(struct dmar_domain *domain,
1943 unsigned long iov_pfn,
1944 unsigned long phy_pfn,
1945 unsigned long pages)
1946{
1947 int support, level = 1;
1948 unsigned long pfnmerge;
1949
1950 support = domain->iommu_superpage;
1951
1952 /* To use a large page, the virtual *and* physical addresses
1953 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1954 of them will mean we have to use smaller pages. So just
1955 merge them and check both at once. */
1956 pfnmerge = iov_pfn | phy_pfn;
1957
1958 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1959 pages >>= VTD_STRIDE_SHIFT;
1960 if (!pages)
1961 break;
1962 pfnmerge >>= VTD_STRIDE_SHIFT;
1963 level++;
1964 support--;
1965 }
1966 return level;
1967}
1968
David Woodhouse9051aa02009-06-29 12:30:54 +01001969static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1970 struct scatterlist *sg, unsigned long phys_pfn,
1971 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001972{
1973 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001974 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001975 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001976 unsigned int largepage_lvl = 0;
1977 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001978
Jiang Liu162d1b12014-07-11 14:19:35 +08001979 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001980
1981 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1982 return -EINVAL;
1983
1984 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1985
Jiang Liucc4f14a2014-11-26 09:42:10 +08001986 if (!sg) {
1987 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01001988 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1989 }
1990
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001991 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001992 uint64_t tmp;
1993
David Woodhousee1605492009-06-29 11:17:38 +01001994 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001995 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001996 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1997 sg->dma_length = sg->length;
1998 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001999 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002000 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002001
David Woodhousee1605492009-06-29 11:17:38 +01002002 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002003 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2004
David Woodhouse5cf0a762014-03-19 16:07:49 +00002005 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002006 if (!pte)
2007 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002008 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002009 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002010 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002011 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2012 /*
2013 * Ensure that old small page tables are
2014 * removed to make room for superpage,
2015 * if they exist.
2016 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002017 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002018 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002019 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002020 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002021 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002022
David Woodhousee1605492009-06-29 11:17:38 +01002023 }
2024 /* We don't need lock here, nobody else
2025 * touches the iova range
2026 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002027 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002028 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002029 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002030 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2031 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002032 if (dumps) {
2033 dumps--;
2034 debug_dma_dump_mappings(NULL);
2035 }
2036 WARN_ON(1);
2037 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002038
2039 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2040
2041 BUG_ON(nr_pages < lvl_pages);
2042 BUG_ON(sg_res < lvl_pages);
2043
2044 nr_pages -= lvl_pages;
2045 iov_pfn += lvl_pages;
2046 phys_pfn += lvl_pages;
2047 pteval += lvl_pages * VTD_PAGE_SIZE;
2048 sg_res -= lvl_pages;
2049
2050 /* If the next PTE would be the first in a new page, then we
2051 need to flush the cache on the entries we've just written.
2052 And then we'll need to recalculate 'pte', so clear it and
2053 let it get set again in the if (!pte) block above.
2054
2055 If we're done (!nr_pages) we need to flush the cache too.
2056
2057 Also if we've been setting superpages, we may need to
2058 recalculate 'pte' and switch back to smaller pages for the
2059 end of the mapping, if the trailing size is not enough to
2060 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002061 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002062 if (!nr_pages || first_pte_in_page(pte) ||
2063 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002064 domain_flush_cache(domain, first_pte,
2065 (void *)pte - (void *)first_pte);
2066 pte = NULL;
2067 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002068
2069 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002070 sg = sg_next(sg);
2071 }
2072 return 0;
2073}
2074
David Woodhouse9051aa02009-06-29 12:30:54 +01002075static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2076 struct scatterlist *sg, unsigned long nr_pages,
2077 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002078{
David Woodhouse9051aa02009-06-29 12:30:54 +01002079 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2080}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002081
David Woodhouse9051aa02009-06-29 12:30:54 +01002082static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2083 unsigned long phys_pfn, unsigned long nr_pages,
2084 int prot)
2085{
2086 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087}
2088
Weidong Hanc7151a82008-12-08 22:51:37 +08002089static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002090{
Weidong Hanc7151a82008-12-08 22:51:37 +08002091 if (!iommu)
2092 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002093
2094 clear_context_table(iommu, bus, devfn);
2095 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002096 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002097 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098}
2099
David Woodhouse109b9b02012-05-25 17:43:02 +01002100static inline void unlink_domain_info(struct device_domain_info *info)
2101{
2102 assert_spin_locked(&device_domain_lock);
2103 list_del(&info->link);
2104 list_del(&info->global);
2105 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002106 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002107}
2108
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002109static void domain_remove_dev_info(struct dmar_domain *domain)
2110{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002111 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002112 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113
2114 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002115 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002116 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002117 spin_unlock_irqrestore(&device_domain_lock, flags);
2118
Yu Zhao93a23a72009-05-18 13:51:37 +08002119 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002120 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002121
Jiang Liuab8dfe22014-07-11 14:19:27 +08002122 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002123 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002124 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002125 }
2126
2127 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128 spin_lock_irqsave(&device_domain_lock, flags);
2129 }
2130 spin_unlock_irqrestore(&device_domain_lock, flags);
2131}
2132
2133/*
2134 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002135 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136 */
David Woodhouse1525a292014-03-06 16:19:30 +00002137static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002138{
2139 struct device_domain_info *info;
2140
2141 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002142 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002143 if (info)
2144 return info->domain;
2145 return NULL;
2146}
2147
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002148static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002149dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2150{
2151 struct device_domain_info *info;
2152
2153 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002154 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002155 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002156 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002157
2158 return NULL;
2159}
2160
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002161static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002162 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002163 struct device *dev,
2164 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002165{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002166 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002167 struct device_domain_info *info;
2168 unsigned long flags;
2169
2170 info = alloc_devinfo_mem();
2171 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002172 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002173
Jiang Liu745f2582014-02-19 14:07:26 +08002174 info->bus = bus;
2175 info->devfn = devfn;
2176 info->dev = dev;
2177 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002178 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002179
2180 spin_lock_irqsave(&device_domain_lock, flags);
2181 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002182 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002183 else {
2184 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002185 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002186 if (info2)
2187 found = info2->domain;
2188 }
Jiang Liu745f2582014-02-19 14:07:26 +08002189 if (found) {
2190 spin_unlock_irqrestore(&device_domain_lock, flags);
2191 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002192 /* Caller must free the original domain */
2193 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002194 }
2195
David Woodhouseb718cd32014-03-09 13:11:33 -07002196 list_add(&info->link, &domain->devices);
2197 list_add(&info->global, &device_domain_list);
2198 if (dev)
2199 dev->archdata.iommu = info;
2200 spin_unlock_irqrestore(&device_domain_lock, flags);
2201
2202 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002203}
2204
Alex Williamson579305f2014-07-03 09:51:43 -06002205static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2206{
2207 *(u16 *)opaque = alias;
2208 return 0;
2209}
2210
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002211/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002212static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002213{
Alex Williamson579305f2014-07-03 09:51:43 -06002214 struct dmar_domain *domain, *tmp;
2215 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002216 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002217 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002218 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002219 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002220
David Woodhouse146922e2014-03-09 15:44:17 -07002221 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002222 if (domain)
2223 return domain;
2224
David Woodhouse146922e2014-03-09 15:44:17 -07002225 iommu = device_to_iommu(dev, &bus, &devfn);
2226 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002227 return NULL;
2228
2229 if (dev_is_pci(dev)) {
2230 struct pci_dev *pdev = to_pci_dev(dev);
2231
2232 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2233
2234 spin_lock_irqsave(&device_domain_lock, flags);
2235 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2236 PCI_BUS_NUM(dma_alias),
2237 dma_alias & 0xff);
2238 if (info) {
2239 iommu = info->iommu;
2240 domain = info->domain;
2241 }
2242 spin_unlock_irqrestore(&device_domain_lock, flags);
2243
2244 /* DMA alias already has a domain, uses it */
2245 if (info)
2246 goto found_domain;
2247 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002248
David Woodhouse146922e2014-03-09 15:44:17 -07002249 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002250 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002251 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002252 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002253 domain->id = iommu_attach_domain(domain, iommu);
2254 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002255 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002256 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002257 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002258 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002259 if (domain_init(domain, gaw)) {
2260 domain_exit(domain);
2261 return NULL;
2262 }
2263
2264 /* register PCI DMA alias device */
2265 if (dev_is_pci(dev)) {
2266 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2267 dma_alias & 0xff, NULL, domain);
2268
2269 if (!tmp || tmp != domain) {
2270 domain_exit(domain);
2271 domain = tmp;
2272 }
2273
David Woodhouseb718cd32014-03-09 13:11:33 -07002274 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002275 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002276 }
2277
2278found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002279 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2280
2281 if (!tmp || tmp != domain) {
2282 domain_exit(domain);
2283 domain = tmp;
2284 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002285
2286 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002287}
2288
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002289static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002290#define IDENTMAP_ALL 1
2291#define IDENTMAP_GFX 2
2292#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002293
David Woodhouseb2132032009-06-26 18:50:28 +01002294static int iommu_domain_identity_map(struct dmar_domain *domain,
2295 unsigned long long start,
2296 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002297{
David Woodhousec5395d52009-06-28 16:35:56 +01002298 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2299 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002300
David Woodhousec5395d52009-06-28 16:35:56 +01002301 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2302 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002303 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002304 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002305 }
2306
David Woodhousec5395d52009-06-28 16:35:56 +01002307 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2308 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002309 /*
2310 * RMRR range might have overlap with physical memory range,
2311 * clear it first
2312 */
David Woodhousec5395d52009-06-28 16:35:56 +01002313 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002314
David Woodhousec5395d52009-06-28 16:35:56 +01002315 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2316 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002317 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002318}
2319
David Woodhouse0b9d9752014-03-09 15:48:15 -07002320static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002321 unsigned long long start,
2322 unsigned long long end)
2323{
2324 struct dmar_domain *domain;
2325 int ret;
2326
David Woodhouse0b9d9752014-03-09 15:48:15 -07002327 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002328 if (!domain)
2329 return -ENOMEM;
2330
David Woodhouse19943b02009-08-04 16:19:20 +01002331 /* For _hardware_ passthrough, don't bother. But for software
2332 passthrough, we do it anyway -- it may indicate a memory
2333 range which is reserved in E820, so which didn't get set
2334 up to start with in si_domain */
2335 if (domain == si_domain && hw_pass_through) {
2336 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002337 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002338 return 0;
2339 }
2340
2341 printk(KERN_INFO
2342 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002343 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002344
David Woodhouse5595b522009-12-02 09:21:55 +00002345 if (end < start) {
2346 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2347 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2348 dmi_get_system_info(DMI_BIOS_VENDOR),
2349 dmi_get_system_info(DMI_BIOS_VERSION),
2350 dmi_get_system_info(DMI_PRODUCT_VERSION));
2351 ret = -EIO;
2352 goto error;
2353 }
2354
David Woodhouse2ff729f2009-08-26 14:25:41 +01002355 if (end >> agaw_to_width(domain->agaw)) {
2356 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2357 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2358 agaw_to_width(domain->agaw),
2359 dmi_get_system_info(DMI_BIOS_VENDOR),
2360 dmi_get_system_info(DMI_BIOS_VERSION),
2361 dmi_get_system_info(DMI_PRODUCT_VERSION));
2362 ret = -EIO;
2363 goto error;
2364 }
David Woodhouse19943b02009-08-04 16:19:20 +01002365
David Woodhouseb2132032009-06-26 18:50:28 +01002366 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002367 if (ret)
2368 goto error;
2369
2370 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002371 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002372 if (ret)
2373 goto error;
2374
2375 return 0;
2376
2377 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 domain_exit(domain);
2379 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380}
2381
2382static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002383 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002384{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002385 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002387 return iommu_prepare_identity_map(dev, rmrr->base_address,
2388 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389}
2390
Suresh Siddhad3f13812011-08-23 17:05:25 -07002391#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002392static inline void iommu_prepare_isa(void)
2393{
2394 struct pci_dev *pdev;
2395 int ret;
2396
2397 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2398 if (!pdev)
2399 return;
2400
David Woodhousec7ab48d2009-06-26 19:10:36 +01002401 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002402 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002403
2404 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002405 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2406 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002407
Yijing Wang9b27e822014-05-20 20:37:52 +08002408 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002409}
2410#else
2411static inline void iommu_prepare_isa(void)
2412{
2413 return;
2414}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002415#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002416
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002417static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002418
Matt Kraai071e1372009-08-23 22:30:22 -07002419static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002420{
2421 struct dmar_drhd_unit *drhd;
2422 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002423 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002424 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002425
Jiang Liuab8dfe22014-07-11 14:19:27 +08002426 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002427 if (!si_domain)
2428 return -EFAULT;
2429
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002430 for_each_active_iommu(iommu, drhd) {
2431 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002432 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433 domain_exit(si_domain);
2434 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002435 } else if (first) {
2436 si_domain->id = ret;
2437 first = false;
2438 } else if (si_domain->id != ret) {
2439 domain_exit(si_domain);
2440 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002441 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002442 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443 }
2444
2445 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2446 domain_exit(si_domain);
2447 return -EFAULT;
2448 }
2449
Jiang Liu9544c002014-01-06 14:18:13 +08002450 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2451 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002452
David Woodhouse19943b02009-08-04 16:19:20 +01002453 if (hw)
2454 return 0;
2455
David Woodhousec7ab48d2009-06-26 19:10:36 +01002456 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002457 unsigned long start_pfn, end_pfn;
2458 int i;
2459
2460 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2461 ret = iommu_domain_identity_map(si_domain,
2462 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2463 if (ret)
2464 return ret;
2465 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002466 }
2467
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002468 return 0;
2469}
2470
David Woodhouse9b226622014-03-09 14:03:28 -07002471static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002472{
2473 struct device_domain_info *info;
2474
2475 if (likely(!iommu_identity_mapping))
2476 return 0;
2477
David Woodhouse9b226622014-03-09 14:03:28 -07002478 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002479 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2480 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002481
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002482 return 0;
2483}
2484
2485static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002486 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487{
David Woodhouse0ac72662014-03-09 13:19:22 -07002488 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002489 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002490 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002491 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002492
David Woodhouse5913c9b2014-03-09 16:27:31 -07002493 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002494 if (!iommu)
2495 return -ENODEV;
2496
David Woodhouse5913c9b2014-03-09 16:27:31 -07002497 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002498 if (ndomain != domain)
2499 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002500
David Woodhouse5913c9b2014-03-09 16:27:31 -07002501 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002502 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002503 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002504 return ret;
2505 }
2506
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002507 return 0;
2508}
2509
David Woodhouse0b9d9752014-03-09 15:48:15 -07002510static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002511{
2512 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002513 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002514 int i;
2515
Jiang Liu0e242612014-02-19 14:07:34 +08002516 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002517 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002518 /*
2519 * Return TRUE if this RMRR contains the device that
2520 * is passed in.
2521 */
2522 for_each_active_dev_scope(rmrr->devices,
2523 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002524 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002525 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002526 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002527 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002528 }
Jiang Liu0e242612014-02-19 14:07:34 +08002529 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002530 return false;
2531}
2532
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002533/*
2534 * There are a couple cases where we need to restrict the functionality of
2535 * devices associated with RMRRs. The first is when evaluating a device for
2536 * identity mapping because problems exist when devices are moved in and out
2537 * of domains and their respective RMRR information is lost. This means that
2538 * a device with associated RMRRs will never be in a "passthrough" domain.
2539 * The second is use of the device through the IOMMU API. This interface
2540 * expects to have full control of the IOVA space for the device. We cannot
2541 * satisfy both the requirement that RMRR access is maintained and have an
2542 * unencumbered IOVA space. We also have no ability to quiesce the device's
2543 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2544 * We therefore prevent devices associated with an RMRR from participating in
2545 * the IOMMU API, which eliminates them from device assignment.
2546 *
2547 * In both cases we assume that PCI USB devices with RMRRs have them largely
2548 * for historical reasons and that the RMRR space is not actively used post
2549 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002550 *
2551 * The same exception is made for graphics devices, with the requirement that
2552 * any use of the RMRR regions will be torn down before assigning the device
2553 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002554 */
2555static bool device_is_rmrr_locked(struct device *dev)
2556{
2557 if (!device_has_rmrr(dev))
2558 return false;
2559
2560 if (dev_is_pci(dev)) {
2561 struct pci_dev *pdev = to_pci_dev(dev);
2562
David Woodhouse18436af2015-03-25 15:05:47 +00002563 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002564 return false;
2565 }
2566
2567 return true;
2568}
2569
David Woodhouse3bdb2592014-03-09 16:03:08 -07002570static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002571{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002572
David Woodhouse3bdb2592014-03-09 16:03:08 -07002573 if (dev_is_pci(dev)) {
2574 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002575
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002576 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002577 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002578
David Woodhouse3bdb2592014-03-09 16:03:08 -07002579 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2580 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002581
David Woodhouse3bdb2592014-03-09 16:03:08 -07002582 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2583 return 1;
2584
2585 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2586 return 0;
2587
2588 /*
2589 * We want to start off with all devices in the 1:1 domain, and
2590 * take them out later if we find they can't access all of memory.
2591 *
2592 * However, we can't do this for PCI devices behind bridges,
2593 * because all PCI devices behind the same bridge will end up
2594 * with the same source-id on their transactions.
2595 *
2596 * Practically speaking, we can't change things around for these
2597 * devices at run-time, because we can't be sure there'll be no
2598 * DMA transactions in flight for any of their siblings.
2599 *
2600 * So PCI devices (unless they're on the root bus) as well as
2601 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2602 * the 1:1 domain, just in _case_ one of their siblings turns out
2603 * not to be able to map all of memory.
2604 */
2605 if (!pci_is_pcie(pdev)) {
2606 if (!pci_is_root_bus(pdev->bus))
2607 return 0;
2608 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2609 return 0;
2610 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2611 return 0;
2612 } else {
2613 if (device_has_rmrr(dev))
2614 return 0;
2615 }
David Woodhouse6941af22009-07-04 18:24:27 +01002616
David Woodhouse3dfc8132009-07-04 19:11:08 +01002617 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002618 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002619 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002620 * take them out of the 1:1 domain later.
2621 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002622 if (!startup) {
2623 /*
2624 * If the device's dma_mask is less than the system's memory
2625 * size then this is not a candidate for identity mapping.
2626 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002627 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002628
David Woodhouse3bdb2592014-03-09 16:03:08 -07002629 if (dev->coherent_dma_mask &&
2630 dev->coherent_dma_mask < dma_mask)
2631 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002632
David Woodhouse3bdb2592014-03-09 16:03:08 -07002633 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002634 }
David Woodhouse6941af22009-07-04 18:24:27 +01002635
2636 return 1;
2637}
2638
David Woodhousecf04eee2014-03-21 16:49:04 +00002639static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2640{
2641 int ret;
2642
2643 if (!iommu_should_identity_map(dev, 1))
2644 return 0;
2645
2646 ret = domain_add_dev_info(si_domain, dev,
2647 hw ? CONTEXT_TT_PASS_THROUGH :
2648 CONTEXT_TT_MULTI_LEVEL);
2649 if (!ret)
2650 pr_info("IOMMU: %s identity mapping for device %s\n",
2651 hw ? "hardware" : "software", dev_name(dev));
2652 else if (ret == -ENODEV)
2653 /* device not associated with an iommu */
2654 ret = 0;
2655
2656 return ret;
2657}
2658
2659
Matt Kraai071e1372009-08-23 22:30:22 -07002660static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002661{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002662 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002663 struct dmar_drhd_unit *drhd;
2664 struct intel_iommu *iommu;
2665 struct device *dev;
2666 int i;
2667 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002668
David Woodhouse19943b02009-08-04 16:19:20 +01002669 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002670 if (ret)
2671 return -EFAULT;
2672
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002673 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002674 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2675 if (ret)
2676 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002677 }
2678
David Woodhousecf04eee2014-03-21 16:49:04 +00002679 for_each_active_iommu(iommu, drhd)
2680 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2681 struct acpi_device_physical_node *pn;
2682 struct acpi_device *adev;
2683
2684 if (dev->bus != &acpi_bus_type)
2685 continue;
2686
2687 adev= to_acpi_device(dev);
2688 mutex_lock(&adev->physical_node_lock);
2689 list_for_each_entry(pn, &adev->physical_node_list, node) {
2690 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2691 if (ret)
2692 break;
2693 }
2694 mutex_unlock(&adev->physical_node_lock);
2695 if (ret)
2696 return ret;
2697 }
2698
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002699 return 0;
2700}
2701
Jiang Liuffebeb42014-11-09 22:48:02 +08002702static void intel_iommu_init_qi(struct intel_iommu *iommu)
2703{
2704 /*
2705 * Start from the sane iommu hardware state.
2706 * If the queued invalidation is already initialized by us
2707 * (for example, while enabling interrupt-remapping) then
2708 * we got the things already rolling from a sane state.
2709 */
2710 if (!iommu->qi) {
2711 /*
2712 * Clear any previous faults.
2713 */
2714 dmar_fault(-1, iommu);
2715 /*
2716 * Disable queued invalidation if supported and already enabled
2717 * before OS handover.
2718 */
2719 dmar_disable_qi(iommu);
2720 }
2721
2722 if (dmar_enable_qi(iommu)) {
2723 /*
2724 * Queued Invalidate not enabled, use Register Based Invalidate
2725 */
2726 iommu->flush.flush_context = __iommu_flush_context;
2727 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2728 pr_info("IOMMU: %s using Register based invalidation\n",
2729 iommu->name);
2730 } else {
2731 iommu->flush.flush_context = qi_flush_context;
2732 iommu->flush.flush_iotlb = qi_flush_iotlb;
2733 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2734 }
2735}
2736
Joseph Cihulab7792602011-05-03 00:08:37 -07002737static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002738{
2739 struct dmar_drhd_unit *drhd;
2740 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002741 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002742 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002743 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002744
2745 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002746 * for each drhd
2747 * allocate root
2748 * initialize and program root entry to not present
2749 * endfor
2750 */
2751 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002752 /*
2753 * lock not needed as this is only incremented in the single
2754 * threaded kernel __init code path all other access are read
2755 * only
2756 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002757 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002758 g_num_of_iommus++;
2759 continue;
2760 }
2761 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002762 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002763 }
2764
Jiang Liuffebeb42014-11-09 22:48:02 +08002765 /* Preallocate enough resources for IOMMU hot-addition */
2766 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2767 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2768
Weidong Hand9630fe2008-12-08 11:06:32 +08002769 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2770 GFP_KERNEL);
2771 if (!g_iommus) {
2772 printk(KERN_ERR "Allocating global iommu array failed\n");
2773 ret = -ENOMEM;
2774 goto error;
2775 }
2776
mark gross80b20dd2008-04-18 13:53:58 -07002777 deferred_flush = kzalloc(g_num_of_iommus *
2778 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2779 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002780 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002781 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002782 }
2783
Jiang Liu7c919772014-01-06 14:18:18 +08002784 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002785 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002786
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002787 ret = iommu_init_domains(iommu);
2788 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002789 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002790
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002791 /*
2792 * TBD:
2793 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002794 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002795 */
2796 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002797 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002798 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002799 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002800 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002801 }
2802
Jiang Liuffebeb42014-11-09 22:48:02 +08002803 for_each_active_iommu(iommu, drhd)
2804 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002805
David Woodhouse19943b02009-08-04 16:19:20 +01002806 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002807 iommu_identity_mapping |= IDENTMAP_ALL;
2808
Suresh Siddhad3f13812011-08-23 17:05:25 -07002809#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002810 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002811#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002812
2813 check_tylersburg_isoch();
2814
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002815 /*
2816 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002817 * identity mappings for rmrr, gfx, and isa and may fall back to static
2818 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002819 */
David Woodhouse19943b02009-08-04 16:19:20 +01002820 if (iommu_identity_mapping) {
2821 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2822 if (ret) {
2823 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002824 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002825 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002826 }
David Woodhouse19943b02009-08-04 16:19:20 +01002827 /*
2828 * For each rmrr
2829 * for each dev attached to rmrr
2830 * do
2831 * locate drhd for dev, alloc domain for dev
2832 * allocate free domain
2833 * allocate page table entries for rmrr
2834 * if context not allocated for bus
2835 * allocate and init context
2836 * set present in root table for this bus
2837 * init context with domain, translation etc
2838 * endfor
2839 * endfor
2840 */
2841 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2842 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002843 /* some BIOS lists non-exist devices in DMAR table. */
2844 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002845 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002846 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002847 if (ret)
2848 printk(KERN_ERR
2849 "IOMMU: mapping reserved region failed\n");
2850 }
2851 }
2852
2853 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002854
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002855 /*
2856 * for each drhd
2857 * enable fault log
2858 * global invalidate context cache
2859 * global invalidate iotlb
2860 * enable translation
2861 */
Jiang Liu7c919772014-01-06 14:18:18 +08002862 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002863 if (drhd->ignored) {
2864 /*
2865 * we always have to disable PMRs or DMA may fail on
2866 * this device
2867 */
2868 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002869 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002871 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002872
2873 iommu_flush_write_buffer(iommu);
2874
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002875 ret = dmar_set_interrupt(iommu);
2876 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002877 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002878
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002879 iommu_set_root_entry(iommu);
2880
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002881 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002882 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002883 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002884 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002885 }
2886
2887 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002888
2889free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002890 for_each_active_iommu(iommu, drhd) {
2891 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002892 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002893 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002894 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002895free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002896 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002897error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002898 return ret;
2899}
2900
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002901/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002902static struct iova *intel_alloc_iova(struct device *dev,
2903 struct dmar_domain *domain,
2904 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002905{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002906 struct iova *iova = NULL;
2907
David Woodhouse875764d2009-06-28 21:20:51 +01002908 /* Restrict dma_mask to the width that the iommu can handle */
2909 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2910
2911 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002912 /*
2913 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002914 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002915 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916 */
David Woodhouse875764d2009-06-28 21:20:51 +01002917 iova = alloc_iova(&domain->iovad, nrpages,
2918 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2919 if (iova)
2920 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002921 }
David Woodhouse875764d2009-06-28 21:20:51 +01002922 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2923 if (unlikely(!iova)) {
2924 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002925 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002926 return NULL;
2927 }
2928
2929 return iova;
2930}
2931
David Woodhoused4b709f2014-03-09 16:07:40 -07002932static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002933{
2934 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002935 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002936
David Woodhoused4b709f2014-03-09 16:07:40 -07002937 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002938 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002939 printk(KERN_ERR "Allocating domain for %s failed",
2940 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002941 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002942 }
2943
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002945 if (unlikely(!domain_context_mapped(dev))) {
2946 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002947 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002948 printk(KERN_ERR "Domain context map for %s failed",
2949 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002950 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002951 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002952 }
2953
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002954 return domain;
2955}
2956
David Woodhoused4b709f2014-03-09 16:07:40 -07002957static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002958{
2959 struct device_domain_info *info;
2960
2961 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002962 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002963 if (likely(info))
2964 return info->domain;
2965
2966 return __get_valid_domain_for_dev(dev);
2967}
2968
David Woodhouse3d891942014-03-06 15:59:26 +00002969static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002970{
David Woodhouse3d891942014-03-06 15:59:26 +00002971 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002972}
2973
David Woodhouseecb509e2014-03-09 16:29:55 -07002974/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002975static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976{
2977 int found;
2978
David Woodhouse3d891942014-03-06 15:59:26 +00002979 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002980 return 1;
2981
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002982 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002983 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002984
David Woodhouse9b226622014-03-09 14:03:28 -07002985 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002986 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002987 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002988 return 1;
2989 else {
2990 /*
2991 * 32 bit DMA is removed from si_domain and fall back
2992 * to non-identity mapping.
2993 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002994 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002995 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002996 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002997 return 0;
2998 }
2999 } else {
3000 /*
3001 * In case of a detached 64 bit DMA device from vm, the device
3002 * is put into si_domain for identity mapping.
3003 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003004 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003005 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003006 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003007 hw_pass_through ?
3008 CONTEXT_TT_PASS_THROUGH :
3009 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003010 if (!ret) {
3011 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003012 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003013 return 1;
3014 }
3015 }
3016 }
3017
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003018 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003019}
3020
David Woodhouse5040a912014-03-09 16:14:00 -07003021static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003022 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003023{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003024 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003025 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003026 struct iova *iova;
3027 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003028 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003029 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003030 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003031
3032 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003033
David Woodhouse5040a912014-03-09 16:14:00 -07003034 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003035 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003036
David Woodhouse5040a912014-03-09 16:14:00 -07003037 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038 if (!domain)
3039 return 0;
3040
Weidong Han8c11e792008-12-08 15:29:22 +08003041 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003042 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003043
David Woodhouse5040a912014-03-09 16:14:00 -07003044 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003045 if (!iova)
3046 goto error;
3047
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048 /*
3049 * Check if DMAR supports zero-length reads on write only
3050 * mappings..
3051 */
3052 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003053 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003054 prot |= DMA_PTE_READ;
3055 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3056 prot |= DMA_PTE_WRITE;
3057 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003058 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003060 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003061 * is not a big problem
3062 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003063 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003064 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003065 if (ret)
3066 goto error;
3067
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003068 /* it's a non-present to present mapping. Only flush if caching mode */
3069 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003070 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003071 else
Weidong Han8c11e792008-12-08 15:29:22 +08003072 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003073
David Woodhouse03d6a242009-06-28 15:33:46 +01003074 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3075 start_paddr += paddr & ~PAGE_MASK;
3076 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003078error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003079 if (iova)
3080 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003081 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003082 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003083 return 0;
3084}
3085
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003086static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3087 unsigned long offset, size_t size,
3088 enum dma_data_direction dir,
3089 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003090{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003091 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003092 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003093}
3094
mark gross5e0d2a62008-03-04 15:22:08 -08003095static void flush_unmaps(void)
3096{
mark gross80b20dd2008-04-18 13:53:58 -07003097 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003098
mark gross5e0d2a62008-03-04 15:22:08 -08003099 timer_on = 0;
3100
3101 /* just flush them all */
3102 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003103 struct intel_iommu *iommu = g_iommus[i];
3104 if (!iommu)
3105 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003106
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003107 if (!deferred_flush[i].next)
3108 continue;
3109
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003110 /* In caching mode, global flushes turn emulation expensive */
3111 if (!cap_caching_mode(iommu->cap))
3112 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003113 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003114 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003115 unsigned long mask;
3116 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003117 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003118
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003119 /* On real hardware multiple invalidations are expensive */
3120 if (cap_caching_mode(iommu->cap))
3121 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003122 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003123 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003124 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003125 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003126 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3127 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3128 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003129 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003130 if (deferred_flush[i].freelist[j])
3131 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003132 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003133 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003134 }
3135
mark gross5e0d2a62008-03-04 15:22:08 -08003136 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003137}
3138
3139static void flush_unmaps_timeout(unsigned long data)
3140{
mark gross80b20dd2008-04-18 13:53:58 -07003141 unsigned long flags;
3142
3143 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003144 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003145 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003146}
3147
David Woodhouseea8ea462014-03-05 17:09:32 +00003148static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003149{
3150 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003151 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003152 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003153
3154 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003155 if (list_size == HIGH_WATER_MARK)
3156 flush_unmaps();
3157
Weidong Han8c11e792008-12-08 15:29:22 +08003158 iommu = domain_get_iommu(dom);
3159 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003160
mark gross80b20dd2008-04-18 13:53:58 -07003161 next = deferred_flush[iommu_id].next;
3162 deferred_flush[iommu_id].domain[next] = dom;
3163 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003164 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003165 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003166
3167 if (!timer_on) {
3168 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3169 timer_on = 1;
3170 }
3171 list_size++;
3172 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3173}
3174
Jiang Liud41a4ad2014-07-11 14:19:34 +08003175static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003176{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003177 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003178 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003179 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003180 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003181 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003182
David Woodhouse73676832009-07-04 14:08:36 +01003183 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003184 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003185
David Woodhouse1525a292014-03-06 16:19:30 +00003186 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003187 BUG_ON(!domain);
3188
Weidong Han8c11e792008-12-08 15:29:22 +08003189 iommu = domain_get_iommu(domain);
3190
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003191 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003192 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3193 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003194 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003195
David Woodhoused794dc92009-06-28 00:27:49 +01003196 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3197 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003198
David Woodhoused794dc92009-06-28 00:27:49 +01003199 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003200 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003201
David Woodhouseea8ea462014-03-05 17:09:32 +00003202 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003203
mark gross5e0d2a62008-03-04 15:22:08 -08003204 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003205 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003206 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003207 /* free iova */
3208 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003209 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003210 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003211 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003212 /*
3213 * queue up the release of the unmap to save the 1/6th of the
3214 * cpu used up by the iotlb flush operation...
3215 */
mark gross5e0d2a62008-03-04 15:22:08 -08003216 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003217}
3218
Jiang Liud41a4ad2014-07-11 14:19:34 +08003219static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3220 size_t size, enum dma_data_direction dir,
3221 struct dma_attrs *attrs)
3222{
3223 intel_unmap(dev, dev_addr);
3224}
3225
David Woodhouse5040a912014-03-09 16:14:00 -07003226static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003227 dma_addr_t *dma_handle, gfp_t flags,
3228 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003229{
Akinobu Mita36746432014-06-04 16:06:51 -07003230 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003231 int order;
3232
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003233 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003234 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003235
David Woodhouse5040a912014-03-09 16:14:00 -07003236 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003237 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003238 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3239 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003240 flags |= GFP_DMA;
3241 else
3242 flags |= GFP_DMA32;
3243 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244
Akinobu Mita36746432014-06-04 16:06:51 -07003245 if (flags & __GFP_WAIT) {
3246 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003247
Akinobu Mita36746432014-06-04 16:06:51 -07003248 page = dma_alloc_from_contiguous(dev, count, order);
3249 if (page && iommu_no_mapping(dev) &&
3250 page_to_phys(page) + size > dev->coherent_dma_mask) {
3251 dma_release_from_contiguous(dev, page, count);
3252 page = NULL;
3253 }
3254 }
3255
3256 if (!page)
3257 page = alloc_pages(flags, order);
3258 if (!page)
3259 return NULL;
3260 memset(page_address(page), 0, size);
3261
3262 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003263 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003264 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003265 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003266 return page_address(page);
3267 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3268 __free_pages(page, order);
3269
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003270 return NULL;
3271}
3272
David Woodhouse5040a912014-03-09 16:14:00 -07003273static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003274 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003275{
3276 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003277 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003278
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003279 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280 order = get_order(size);
3281
Jiang Liud41a4ad2014-07-11 14:19:34 +08003282 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003283 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3284 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003285}
3286
David Woodhouse5040a912014-03-09 16:14:00 -07003287static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003288 int nelems, enum dma_data_direction dir,
3289 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003290{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003291 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003292}
3293
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003294static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003295 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003296{
3297 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003298 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003299
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003300 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003301 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003302 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003303 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003304 }
3305 return nelems;
3306}
3307
David Woodhouse5040a912014-03-09 16:14:00 -07003308static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003309 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003310{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003311 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003313 size_t size = 0;
3314 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003315 struct iova *iova = NULL;
3316 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003317 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003318 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003319 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003320
3321 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003322 if (iommu_no_mapping(dev))
3323 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003324
David Woodhouse5040a912014-03-09 16:14:00 -07003325 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003326 if (!domain)
3327 return 0;
3328
Weidong Han8c11e792008-12-08 15:29:22 +08003329 iommu = domain_get_iommu(domain);
3330
David Woodhouseb536d242009-06-28 14:49:31 +01003331 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003332 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003333
David Woodhouse5040a912014-03-09 16:14:00 -07003334 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3335 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003336 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003337 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003338 return 0;
3339 }
3340
3341 /*
3342 * Check if DMAR supports zero-length reads on write only
3343 * mappings..
3344 */
3345 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003346 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003347 prot |= DMA_PTE_READ;
3348 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3349 prot |= DMA_PTE_WRITE;
3350
David Woodhouseb536d242009-06-28 14:49:31 +01003351 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003352
Fenghua Yuf5329592009-08-04 15:09:37 -07003353 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003354 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003355 dma_pte_free_pagetable(domain, start_vpfn,
3356 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003357 __free_iova(&domain->iovad, iova);
3358 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003359 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003360
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003361 /* it's a non-present to present mapping. Only flush if caching mode */
3362 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003363 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003364 else
Weidong Han8c11e792008-12-08 15:29:22 +08003365 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003366
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003367 return nelems;
3368}
3369
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003370static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3371{
3372 return !dma_addr;
3373}
3374
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003375struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003376 .alloc = intel_alloc_coherent,
3377 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003378 .map_sg = intel_map_sg,
3379 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003380 .map_page = intel_map_page,
3381 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003382 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003383};
3384
3385static inline int iommu_domain_cache_init(void)
3386{
3387 int ret = 0;
3388
3389 iommu_domain_cache = kmem_cache_create("iommu_domain",
3390 sizeof(struct dmar_domain),
3391 0,
3392 SLAB_HWCACHE_ALIGN,
3393
3394 NULL);
3395 if (!iommu_domain_cache) {
3396 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3397 ret = -ENOMEM;
3398 }
3399
3400 return ret;
3401}
3402
3403static inline int iommu_devinfo_cache_init(void)
3404{
3405 int ret = 0;
3406
3407 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3408 sizeof(struct device_domain_info),
3409 0,
3410 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003411 NULL);
3412 if (!iommu_devinfo_cache) {
3413 printk(KERN_ERR "Couldn't create devinfo cache\n");
3414 ret = -ENOMEM;
3415 }
3416
3417 return ret;
3418}
3419
3420static inline int iommu_iova_cache_init(void)
3421{
3422 int ret = 0;
3423
3424 iommu_iova_cache = kmem_cache_create("iommu_iova",
3425 sizeof(struct iova),
3426 0,
3427 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003428 NULL);
3429 if (!iommu_iova_cache) {
3430 printk(KERN_ERR "Couldn't create iova cache\n");
3431 ret = -ENOMEM;
3432 }
3433
3434 return ret;
3435}
3436
3437static int __init iommu_init_mempool(void)
3438{
3439 int ret;
3440 ret = iommu_iova_cache_init();
3441 if (ret)
3442 return ret;
3443
3444 ret = iommu_domain_cache_init();
3445 if (ret)
3446 goto domain_error;
3447
3448 ret = iommu_devinfo_cache_init();
3449 if (!ret)
3450 return ret;
3451
3452 kmem_cache_destroy(iommu_domain_cache);
3453domain_error:
3454 kmem_cache_destroy(iommu_iova_cache);
3455
3456 return -ENOMEM;
3457}
3458
3459static void __init iommu_exit_mempool(void)
3460{
3461 kmem_cache_destroy(iommu_devinfo_cache);
3462 kmem_cache_destroy(iommu_domain_cache);
3463 kmem_cache_destroy(iommu_iova_cache);
3464
3465}
3466
Dan Williams556ab452010-07-23 15:47:56 -07003467static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3468{
3469 struct dmar_drhd_unit *drhd;
3470 u32 vtbar;
3471 int rc;
3472
3473 /* We know that this device on this chipset has its own IOMMU.
3474 * If we find it under a different IOMMU, then the BIOS is lying
3475 * to us. Hope that the IOMMU for this device is actually
3476 * disabled, and it needs no translation...
3477 */
3478 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3479 if (rc) {
3480 /* "can't" happen */
3481 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3482 return;
3483 }
3484 vtbar &= 0xffff0000;
3485
3486 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3487 drhd = dmar_find_matched_drhd_unit(pdev);
3488 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3489 TAINT_FIRMWARE_WORKAROUND,
3490 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3491 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3492}
3493DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3494
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495static void __init init_no_remapping_devices(void)
3496{
3497 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003498 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003499 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003500
3501 for_each_drhd_unit(drhd) {
3502 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003503 for_each_active_dev_scope(drhd->devices,
3504 drhd->devices_cnt, i, dev)
3505 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003506 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003507 if (i == drhd->devices_cnt)
3508 drhd->ignored = 1;
3509 }
3510 }
3511
Jiang Liu7c919772014-01-06 14:18:18 +08003512 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003513 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003514 continue;
3515
Jiang Liub683b232014-02-19 14:07:32 +08003516 for_each_active_dev_scope(drhd->devices,
3517 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003518 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003519 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520 if (i < drhd->devices_cnt)
3521 continue;
3522
David Woodhousec0771df2011-10-14 20:59:46 +01003523 /* This IOMMU has *only* gfx devices. Either bypass it or
3524 set the gfx_mapped flag, as appropriate */
3525 if (dmar_map_gfx) {
3526 intel_iommu_gfx_mapped = 1;
3527 } else {
3528 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003529 for_each_active_dev_scope(drhd->devices,
3530 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003531 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003532 }
3533 }
3534}
3535
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003536#ifdef CONFIG_SUSPEND
3537static int init_iommu_hw(void)
3538{
3539 struct dmar_drhd_unit *drhd;
3540 struct intel_iommu *iommu = NULL;
3541
3542 for_each_active_iommu(iommu, drhd)
3543 if (iommu->qi)
3544 dmar_reenable_qi(iommu);
3545
Joseph Cihulab7792602011-05-03 00:08:37 -07003546 for_each_iommu(iommu, drhd) {
3547 if (drhd->ignored) {
3548 /*
3549 * we always have to disable PMRs or DMA may fail on
3550 * this device
3551 */
3552 if (force_on)
3553 iommu_disable_protect_mem_regions(iommu);
3554 continue;
3555 }
3556
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003557 iommu_flush_write_buffer(iommu);
3558
3559 iommu_set_root_entry(iommu);
3560
3561 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003562 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003563 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3564 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003565 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003566 }
3567
3568 return 0;
3569}
3570
3571static void iommu_flush_all(void)
3572{
3573 struct dmar_drhd_unit *drhd;
3574 struct intel_iommu *iommu;
3575
3576 for_each_active_iommu(iommu, drhd) {
3577 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003578 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003579 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003580 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003581 }
3582}
3583
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003584static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003585{
3586 struct dmar_drhd_unit *drhd;
3587 struct intel_iommu *iommu = NULL;
3588 unsigned long flag;
3589
3590 for_each_active_iommu(iommu, drhd) {
3591 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3592 GFP_ATOMIC);
3593 if (!iommu->iommu_state)
3594 goto nomem;
3595 }
3596
3597 iommu_flush_all();
3598
3599 for_each_active_iommu(iommu, drhd) {
3600 iommu_disable_translation(iommu);
3601
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003602 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003603
3604 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3605 readl(iommu->reg + DMAR_FECTL_REG);
3606 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3607 readl(iommu->reg + DMAR_FEDATA_REG);
3608 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3609 readl(iommu->reg + DMAR_FEADDR_REG);
3610 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3611 readl(iommu->reg + DMAR_FEUADDR_REG);
3612
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003613 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003614 }
3615 return 0;
3616
3617nomem:
3618 for_each_active_iommu(iommu, drhd)
3619 kfree(iommu->iommu_state);
3620
3621 return -ENOMEM;
3622}
3623
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003624static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003625{
3626 struct dmar_drhd_unit *drhd;
3627 struct intel_iommu *iommu = NULL;
3628 unsigned long flag;
3629
3630 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003631 if (force_on)
3632 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3633 else
3634 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003635 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003636 }
3637
3638 for_each_active_iommu(iommu, drhd) {
3639
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003640 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003641
3642 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3643 iommu->reg + DMAR_FECTL_REG);
3644 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3645 iommu->reg + DMAR_FEDATA_REG);
3646 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3647 iommu->reg + DMAR_FEADDR_REG);
3648 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3649 iommu->reg + DMAR_FEUADDR_REG);
3650
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003651 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003652 }
3653
3654 for_each_active_iommu(iommu, drhd)
3655 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003656}
3657
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003658static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003659 .resume = iommu_resume,
3660 .suspend = iommu_suspend,
3661};
3662
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003663static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003664{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003665 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003666}
3667
3668#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003669static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003670#endif /* CONFIG_PM */
3671
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003672
Jiang Liuc2a0b532014-11-09 22:47:56 +08003673int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003674{
3675 struct acpi_dmar_reserved_memory *rmrr;
3676 struct dmar_rmrr_unit *rmrru;
3677
3678 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3679 if (!rmrru)
3680 return -ENOMEM;
3681
3682 rmrru->hdr = header;
3683 rmrr = (struct acpi_dmar_reserved_memory *)header;
3684 rmrru->base_address = rmrr->base_address;
3685 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003686 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3687 ((void *)rmrr) + rmrr->header.length,
3688 &rmrru->devices_cnt);
3689 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3690 kfree(rmrru);
3691 return -ENOMEM;
3692 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003693
Jiang Liu2e455282014-02-19 14:07:36 +08003694 list_add(&rmrru->list, &dmar_rmrr_units);
3695
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003696 return 0;
3697}
3698
Jiang Liu6b197242014-11-09 22:47:58 +08003699static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3700{
3701 struct dmar_atsr_unit *atsru;
3702 struct acpi_dmar_atsr *tmp;
3703
3704 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3705 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3706 if (atsr->segment != tmp->segment)
3707 continue;
3708 if (atsr->header.length != tmp->header.length)
3709 continue;
3710 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3711 return atsru;
3712 }
3713
3714 return NULL;
3715}
3716
3717int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003718{
3719 struct acpi_dmar_atsr *atsr;
3720 struct dmar_atsr_unit *atsru;
3721
Jiang Liu6b197242014-11-09 22:47:58 +08003722 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3723 return 0;
3724
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003725 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003726 atsru = dmar_find_atsr(atsr);
3727 if (atsru)
3728 return 0;
3729
3730 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003731 if (!atsru)
3732 return -ENOMEM;
3733
Jiang Liu6b197242014-11-09 22:47:58 +08003734 /*
3735 * If memory is allocated from slab by ACPI _DSM method, we need to
3736 * copy the memory content because the memory buffer will be freed
3737 * on return.
3738 */
3739 atsru->hdr = (void *)(atsru + 1);
3740 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003741 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003742 if (!atsru->include_all) {
3743 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3744 (void *)atsr + atsr->header.length,
3745 &atsru->devices_cnt);
3746 if (atsru->devices_cnt && atsru->devices == NULL) {
3747 kfree(atsru);
3748 return -ENOMEM;
3749 }
3750 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003751
Jiang Liu0e242612014-02-19 14:07:34 +08003752 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003753
3754 return 0;
3755}
3756
Jiang Liu9bdc5312014-01-06 14:18:27 +08003757static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3758{
3759 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3760 kfree(atsru);
3761}
3762
Jiang Liu6b197242014-11-09 22:47:58 +08003763int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3764{
3765 struct acpi_dmar_atsr *atsr;
3766 struct dmar_atsr_unit *atsru;
3767
3768 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3769 atsru = dmar_find_atsr(atsr);
3770 if (atsru) {
3771 list_del_rcu(&atsru->list);
3772 synchronize_rcu();
3773 intel_iommu_free_atsr(atsru);
3774 }
3775
3776 return 0;
3777}
3778
3779int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3780{
3781 int i;
3782 struct device *dev;
3783 struct acpi_dmar_atsr *atsr;
3784 struct dmar_atsr_unit *atsru;
3785
3786 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3787 atsru = dmar_find_atsr(atsr);
3788 if (!atsru)
3789 return 0;
3790
3791 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3792 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3793 i, dev)
3794 return -EBUSY;
3795
3796 return 0;
3797}
3798
Jiang Liuffebeb42014-11-09 22:48:02 +08003799static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3800{
3801 int sp, ret = 0;
3802 struct intel_iommu *iommu = dmaru->iommu;
3803
3804 if (g_iommus[iommu->seq_id])
3805 return 0;
3806
3807 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3808 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3809 iommu->name);
3810 return -ENXIO;
3811 }
3812 if (!ecap_sc_support(iommu->ecap) &&
3813 domain_update_iommu_snooping(iommu)) {
3814 pr_warn("IOMMU: %s doesn't support snooping.\n",
3815 iommu->name);
3816 return -ENXIO;
3817 }
3818 sp = domain_update_iommu_superpage(iommu) - 1;
3819 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3820 pr_warn("IOMMU: %s doesn't support large page.\n",
3821 iommu->name);
3822 return -ENXIO;
3823 }
3824
3825 /*
3826 * Disable translation if already enabled prior to OS handover.
3827 */
3828 if (iommu->gcmd & DMA_GCMD_TE)
3829 iommu_disable_translation(iommu);
3830
3831 g_iommus[iommu->seq_id] = iommu;
3832 ret = iommu_init_domains(iommu);
3833 if (ret == 0)
3834 ret = iommu_alloc_root_entry(iommu);
3835 if (ret)
3836 goto out;
3837
3838 if (dmaru->ignored) {
3839 /*
3840 * we always have to disable PMRs or DMA may fail on this device
3841 */
3842 if (force_on)
3843 iommu_disable_protect_mem_regions(iommu);
3844 return 0;
3845 }
3846
3847 intel_iommu_init_qi(iommu);
3848 iommu_flush_write_buffer(iommu);
3849 ret = dmar_set_interrupt(iommu);
3850 if (ret)
3851 goto disable_iommu;
3852
3853 iommu_set_root_entry(iommu);
3854 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3855 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3856 iommu_enable_translation(iommu);
3857
3858 if (si_domain) {
3859 ret = iommu_attach_domain(si_domain, iommu);
3860 if (ret < 0 || si_domain->id != ret)
3861 goto disable_iommu;
3862 domain_attach_iommu(si_domain, iommu);
3863 }
3864
3865 iommu_disable_protect_mem_regions(iommu);
3866 return 0;
3867
3868disable_iommu:
3869 disable_dmar_iommu(iommu);
3870out:
3871 free_dmar_iommu(iommu);
3872 return ret;
3873}
3874
Jiang Liu6b197242014-11-09 22:47:58 +08003875int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3876{
Jiang Liuffebeb42014-11-09 22:48:02 +08003877 int ret = 0;
3878 struct intel_iommu *iommu = dmaru->iommu;
3879
3880 if (!intel_iommu_enabled)
3881 return 0;
3882 if (iommu == NULL)
3883 return -EINVAL;
3884
3885 if (insert) {
3886 ret = intel_iommu_add(dmaru);
3887 } else {
3888 disable_dmar_iommu(iommu);
3889 free_dmar_iommu(iommu);
3890 }
3891
3892 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003893}
3894
Jiang Liu9bdc5312014-01-06 14:18:27 +08003895static void intel_iommu_free_dmars(void)
3896{
3897 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3898 struct dmar_atsr_unit *atsru, *atsr_n;
3899
3900 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3901 list_del(&rmrru->list);
3902 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3903 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003904 }
3905
Jiang Liu9bdc5312014-01-06 14:18:27 +08003906 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3907 list_del(&atsru->list);
3908 intel_iommu_free_atsr(atsru);
3909 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003910}
3911
3912int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3913{
Jiang Liub683b232014-02-19 14:07:32 +08003914 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003915 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003916 struct pci_dev *bridge = NULL;
3917 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003918 struct acpi_dmar_atsr *atsr;
3919 struct dmar_atsr_unit *atsru;
3920
3921 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003922 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003923 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003924 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003925 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003926 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003927 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003928 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003929 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003930 if (!bridge)
3931 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003932
Jiang Liu0e242612014-02-19 14:07:34 +08003933 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003934 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3935 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3936 if (atsr->segment != pci_domain_nr(dev->bus))
3937 continue;
3938
Jiang Liub683b232014-02-19 14:07:32 +08003939 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003940 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003941 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003942
3943 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003944 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003945 }
Jiang Liub683b232014-02-19 14:07:32 +08003946 ret = 0;
3947out:
Jiang Liu0e242612014-02-19 14:07:34 +08003948 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003949
Jiang Liub683b232014-02-19 14:07:32 +08003950 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003951}
3952
Jiang Liu59ce0512014-02-19 14:07:35 +08003953int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3954{
3955 int ret = 0;
3956 struct dmar_rmrr_unit *rmrru;
3957 struct dmar_atsr_unit *atsru;
3958 struct acpi_dmar_atsr *atsr;
3959 struct acpi_dmar_reserved_memory *rmrr;
3960
3961 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3962 return 0;
3963
3964 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3965 rmrr = container_of(rmrru->hdr,
3966 struct acpi_dmar_reserved_memory, header);
3967 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3968 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3969 ((void *)rmrr) + rmrr->header.length,
3970 rmrr->segment, rmrru->devices,
3971 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003972 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003973 return ret;
3974 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003975 dmar_remove_dev_scope(info, rmrr->segment,
3976 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003977 }
3978 }
3979
3980 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3981 if (atsru->include_all)
3982 continue;
3983
3984 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3985 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3986 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3987 (void *)atsr + atsr->header.length,
3988 atsr->segment, atsru->devices,
3989 atsru->devices_cnt);
3990 if (ret > 0)
3991 break;
3992 else if(ret < 0)
3993 return ret;
3994 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3995 if (dmar_remove_dev_scope(info, atsr->segment,
3996 atsru->devices, atsru->devices_cnt))
3997 break;
3998 }
3999 }
4000
4001 return 0;
4002}
4003
Fenghua Yu99dcade2009-11-11 07:23:06 -08004004/*
4005 * Here we only respond to action of unbound device from driver.
4006 *
4007 * Added device is not attached to its DMAR domain here yet. That will happen
4008 * when mapping the device to iova.
4009 */
4010static int device_notifier(struct notifier_block *nb,
4011 unsigned long action, void *data)
4012{
4013 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004014 struct dmar_domain *domain;
4015
David Woodhouse3d891942014-03-06 15:59:26 +00004016 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004017 return 0;
4018
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004019 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004020 return 0;
4021
David Woodhouse1525a292014-03-06 16:19:30 +00004022 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004023 if (!domain)
4024 return 0;
4025
Jiang Liu3a5670e2014-02-19 14:07:33 +08004026 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004027 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004028 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004029 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004030 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004031
Fenghua Yu99dcade2009-11-11 07:23:06 -08004032 return 0;
4033}
4034
4035static struct notifier_block device_nb = {
4036 .notifier_call = device_notifier,
4037};
4038
Jiang Liu75f05562014-02-19 14:07:37 +08004039static int intel_iommu_memory_notifier(struct notifier_block *nb,
4040 unsigned long val, void *v)
4041{
4042 struct memory_notify *mhp = v;
4043 unsigned long long start, end;
4044 unsigned long start_vpfn, last_vpfn;
4045
4046 switch (val) {
4047 case MEM_GOING_ONLINE:
4048 start = mhp->start_pfn << PAGE_SHIFT;
4049 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4050 if (iommu_domain_identity_map(si_domain, start, end)) {
4051 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4052 start, end);
4053 return NOTIFY_BAD;
4054 }
4055 break;
4056
4057 case MEM_OFFLINE:
4058 case MEM_CANCEL_ONLINE:
4059 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4060 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4061 while (start_vpfn <= last_vpfn) {
4062 struct iova *iova;
4063 struct dmar_drhd_unit *drhd;
4064 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004065 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004066
4067 iova = find_iova(&si_domain->iovad, start_vpfn);
4068 if (iova == NULL) {
4069 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4070 start_vpfn);
4071 break;
4072 }
4073
4074 iova = split_and_remove_iova(&si_domain->iovad, iova,
4075 start_vpfn, last_vpfn);
4076 if (iova == NULL) {
4077 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4078 start_vpfn, last_vpfn);
4079 return NOTIFY_BAD;
4080 }
4081
David Woodhouseea8ea462014-03-05 17:09:32 +00004082 freelist = domain_unmap(si_domain, iova->pfn_lo,
4083 iova->pfn_hi);
4084
Jiang Liu75f05562014-02-19 14:07:37 +08004085 rcu_read_lock();
4086 for_each_active_iommu(iommu, drhd)
4087 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004088 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004089 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004090 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004091 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004092
4093 start_vpfn = iova->pfn_hi + 1;
4094 free_iova_mem(iova);
4095 }
4096 break;
4097 }
4098
4099 return NOTIFY_OK;
4100}
4101
4102static struct notifier_block intel_iommu_memory_nb = {
4103 .notifier_call = intel_iommu_memory_notifier,
4104 .priority = 0
4105};
4106
Alex Williamsona5459cf2014-06-12 16:12:31 -06004107
4108static ssize_t intel_iommu_show_version(struct device *dev,
4109 struct device_attribute *attr,
4110 char *buf)
4111{
4112 struct intel_iommu *iommu = dev_get_drvdata(dev);
4113 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4114 return sprintf(buf, "%d:%d\n",
4115 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4116}
4117static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4118
4119static ssize_t intel_iommu_show_address(struct device *dev,
4120 struct device_attribute *attr,
4121 char *buf)
4122{
4123 struct intel_iommu *iommu = dev_get_drvdata(dev);
4124 return sprintf(buf, "%llx\n", iommu->reg_phys);
4125}
4126static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4127
4128static ssize_t intel_iommu_show_cap(struct device *dev,
4129 struct device_attribute *attr,
4130 char *buf)
4131{
4132 struct intel_iommu *iommu = dev_get_drvdata(dev);
4133 return sprintf(buf, "%llx\n", iommu->cap);
4134}
4135static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4136
4137static ssize_t intel_iommu_show_ecap(struct device *dev,
4138 struct device_attribute *attr,
4139 char *buf)
4140{
4141 struct intel_iommu *iommu = dev_get_drvdata(dev);
4142 return sprintf(buf, "%llx\n", iommu->ecap);
4143}
4144static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4145
4146static struct attribute *intel_iommu_attrs[] = {
4147 &dev_attr_version.attr,
4148 &dev_attr_address.attr,
4149 &dev_attr_cap.attr,
4150 &dev_attr_ecap.attr,
4151 NULL,
4152};
4153
4154static struct attribute_group intel_iommu_group = {
4155 .name = "intel-iommu",
4156 .attrs = intel_iommu_attrs,
4157};
4158
4159const struct attribute_group *intel_iommu_groups[] = {
4160 &intel_iommu_group,
4161 NULL,
4162};
4163
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004164int __init intel_iommu_init(void)
4165{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004166 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004167 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004168 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004169
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004170 /* VT-d is required for a TXT/tboot launch, so enforce that */
4171 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004172
Jiang Liu3a5670e2014-02-19 14:07:33 +08004173 if (iommu_init_mempool()) {
4174 if (force_on)
4175 panic("tboot: Failed to initialize iommu memory\n");
4176 return -ENOMEM;
4177 }
4178
4179 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004180 if (dmar_table_init()) {
4181 if (force_on)
4182 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004183 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004184 }
4185
Takao Indoh3a93c842013-04-23 17:35:03 +09004186 /*
4187 * Disable translation if already enabled prior to OS handover.
4188 */
Jiang Liu7c919772014-01-06 14:18:18 +08004189 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004190 if (iommu->gcmd & DMA_GCMD_TE)
4191 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004192
Suresh Siddhac2c72862011-08-23 17:05:19 -07004193 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004194 if (force_on)
4195 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004196 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004197 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004198
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004199 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004200 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004201
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004202 if (list_empty(&dmar_rmrr_units))
4203 printk(KERN_INFO "DMAR: No RMRR found\n");
4204
4205 if (list_empty(&dmar_atsr_units))
4206 printk(KERN_INFO "DMAR: No ATSR found\n");
4207
Joseph Cihula51a63e62011-03-21 11:04:24 -07004208 if (dmar_init_reserved_ranges()) {
4209 if (force_on)
4210 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004211 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004212 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004213
4214 init_no_remapping_devices();
4215
Joseph Cihulab7792602011-05-03 00:08:37 -07004216 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004217 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004218 if (force_on)
4219 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004220 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004221 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004222 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004223 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004224 printk(KERN_INFO
4225 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4226
mark gross5e0d2a62008-03-04 15:22:08 -08004227 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004228#ifdef CONFIG_SWIOTLB
4229 swiotlb = 0;
4230#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004231 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004232
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004233 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004234
Alex Williamsona5459cf2014-06-12 16:12:31 -06004235 for_each_active_iommu(iommu, drhd)
4236 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4237 intel_iommu_groups,
4238 iommu->name);
4239
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004240 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004241 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004242 if (si_domain && !hw_pass_through)
4243 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004244
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004245 intel_iommu_enabled = 1;
4246
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004247 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004248
4249out_free_reserved_range:
4250 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004251out_free_dmar:
4252 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004253 up_write(&dmar_global_lock);
4254 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004255 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004256}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004257
Alex Williamson579305f2014-07-03 09:51:43 -06004258static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4259{
4260 struct intel_iommu *iommu = opaque;
4261
4262 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4263 return 0;
4264}
4265
4266/*
4267 * NB - intel-iommu lacks any sort of reference counting for the users of
4268 * dependent devices. If multiple endpoints have intersecting dependent
4269 * devices, unbinding the driver from any one of them will possibly leave
4270 * the others unable to operate.
4271 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004272static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004273 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004274{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004275 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004276 return;
4277
Alex Williamson579305f2014-07-03 09:51:43 -06004278 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004279}
4280
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004281static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004282 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004283{
Yijing Wangbca2b912013-10-31 17:26:04 +08004284 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004285 struct intel_iommu *iommu;
4286 unsigned long flags;
4287 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004288 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004289
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004290 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004291 if (!iommu)
4292 return;
4293
4294 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004295 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004296 if (info->iommu == iommu && info->bus == bus &&
4297 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004298 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004299 spin_unlock_irqrestore(&device_domain_lock, flags);
4300
Yu Zhao93a23a72009-05-18 13:51:37 +08004301 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004302 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004303 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004304 free_devinfo_mem(info);
4305
4306 spin_lock_irqsave(&device_domain_lock, flags);
4307
4308 if (found)
4309 break;
4310 else
4311 continue;
4312 }
4313
4314 /* if there is no other devices under the same iommu
4315 * owned by this domain, clear this iommu in iommu_bmp
4316 * update iommu count and coherency
4317 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004318 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004319 found = 1;
4320 }
4321
Roland Dreier3e7abe22011-07-20 06:22:21 -07004322 spin_unlock_irqrestore(&device_domain_lock, flags);
4323
Weidong Hanc7151a82008-12-08 22:51:37 +08004324 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004325 domain_detach_iommu(domain, iommu);
4326 if (!domain_type_is_vm_or_si(domain))
4327 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004328 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004329}
4330
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004331static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004332{
4333 int adjust_width;
4334
4335 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004336 domain_reserve_special_ranges(domain);
4337
4338 /* calculate AGAW */
4339 domain->gaw = guest_width;
4340 adjust_width = guestwidth_to_adjustwidth(guest_width);
4341 domain->agaw = width_to_agaw(adjust_width);
4342
Weidong Han5e98c4b2008-12-08 23:03:27 +08004343 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004344 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004345 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004346 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004347
4348 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004349 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004350 if (!domain->pgd)
4351 return -ENOMEM;
4352 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4353 return 0;
4354}
4355
Joerg Roedel5d450802008-12-03 14:52:32 +01004356static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004357{
Joerg Roedel5d450802008-12-03 14:52:32 +01004358 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004359
Jiang Liuab8dfe22014-07-11 14:19:27 +08004360 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004361 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004362 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004363 "intel_iommu_domain_init: dmar_domain == NULL\n");
4364 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004365 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004366 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004367 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004368 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004369 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004370 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004371 }
Allen Kay8140a952011-10-14 12:32:17 -07004372 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004373 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004374
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004375 domain->geometry.aperture_start = 0;
4376 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4377 domain->geometry.force_aperture = true;
4378
Joerg Roedel5d450802008-12-03 14:52:32 +01004379 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004380}
Kay, Allen M38717942008-09-09 18:37:29 +03004381
Joerg Roedel5d450802008-12-03 14:52:32 +01004382static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004383{
Joerg Roedel5d450802008-12-03 14:52:32 +01004384 struct dmar_domain *dmar_domain = domain->priv;
4385
4386 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004387 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004388}
Kay, Allen M38717942008-09-09 18:37:29 +03004389
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004390static int intel_iommu_attach_device(struct iommu_domain *domain,
4391 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004392{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004393 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004394 struct intel_iommu *iommu;
4395 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004396 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004397
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004398 if (device_is_rmrr_locked(dev)) {
4399 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4400 return -EPERM;
4401 }
4402
David Woodhouse7207d8f2014-03-09 16:31:06 -07004403 /* normally dev is not mapped */
4404 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004405 struct dmar_domain *old_domain;
4406
David Woodhouse1525a292014-03-06 16:19:30 +00004407 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004408 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004409 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004410 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004411 else
4412 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004413
4414 if (!domain_type_is_vm_or_si(old_domain) &&
4415 list_empty(&old_domain->devices))
4416 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004417 }
4418 }
4419
David Woodhouse156baca2014-03-09 14:00:57 -07004420 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004421 if (!iommu)
4422 return -ENODEV;
4423
4424 /* check if this iommu agaw is sufficient for max mapped address */
4425 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004426 if (addr_width > cap_mgaw(iommu->cap))
4427 addr_width = cap_mgaw(iommu->cap);
4428
4429 if (dmar_domain->max_addr > (1LL << addr_width)) {
4430 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004431 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004432 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004433 return -EFAULT;
4434 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004435 dmar_domain->gaw = addr_width;
4436
4437 /*
4438 * Knock out extra levels of page tables if necessary
4439 */
4440 while (iommu->agaw < dmar_domain->agaw) {
4441 struct dma_pte *pte;
4442
4443 pte = dmar_domain->pgd;
4444 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004445 dmar_domain->pgd = (struct dma_pte *)
4446 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004447 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004448 }
4449 dmar_domain->agaw--;
4450 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004451
David Woodhouse5913c9b2014-03-09 16:27:31 -07004452 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004453}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004454
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004455static void intel_iommu_detach_device(struct iommu_domain *domain,
4456 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004457{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004458 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004459
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004460 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004461}
Kay, Allen M38717942008-09-09 18:37:29 +03004462
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004463static int intel_iommu_map(struct iommu_domain *domain,
4464 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004465 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004466{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004467 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004468 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004469 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004470 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004471
Joerg Roedeldde57a22008-12-03 15:04:09 +01004472 if (iommu_prot & IOMMU_READ)
4473 prot |= DMA_PTE_READ;
4474 if (iommu_prot & IOMMU_WRITE)
4475 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004476 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4477 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004478
David Woodhouse163cc522009-06-28 00:51:17 +01004479 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004480 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004481 u64 end;
4482
4483 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004484 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004485 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004486 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004487 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004488 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004489 return -EFAULT;
4490 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004491 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004492 }
David Woodhousead051222009-06-28 14:22:28 +01004493 /* Round up size to next multiple of PAGE_SIZE, if it and
4494 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004495 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004496 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4497 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004498 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004499}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004500
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004501static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004502 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004503{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004504 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004505 struct page *freelist = NULL;
4506 struct intel_iommu *iommu;
4507 unsigned long start_pfn, last_pfn;
4508 unsigned int npages;
4509 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004510
David Woodhouse5cf0a762014-03-19 16:07:49 +00004511 /* Cope with horrid API which requires us to unmap more than the
4512 size argument if it happens to be a large-page mapping. */
4513 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4514 BUG();
4515
4516 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4517 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4518
David Woodhouseea8ea462014-03-05 17:09:32 +00004519 start_pfn = iova >> VTD_PAGE_SHIFT;
4520 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4521
4522 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4523
4524 npages = last_pfn - start_pfn + 1;
4525
4526 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4527 iommu = g_iommus[iommu_id];
4528
4529 /*
4530 * find bit position of dmar_domain
4531 */
4532 ndomains = cap_ndoms(iommu->cap);
4533 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4534 if (iommu->domains[num] == dmar_domain)
4535 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4536 npages, !freelist, 0);
4537 }
4538
4539 }
4540
4541 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004542
David Woodhouse163cc522009-06-28 00:51:17 +01004543 if (dmar_domain->max_addr == iova + size)
4544 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004545
David Woodhouse5cf0a762014-03-19 16:07:49 +00004546 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004547}
Kay, Allen M38717942008-09-09 18:37:29 +03004548
Joerg Roedeld14d6572008-12-03 15:06:57 +01004549static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304550 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004551{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004552 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004553 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004554 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004555 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004556
David Woodhouse5cf0a762014-03-19 16:07:49 +00004557 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004558 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004559 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004560
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004561 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004562}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004563
Joerg Roedel5d587b82014-09-05 10:50:45 +02004564static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004565{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004566 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004567 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004568 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004569 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004570
Joerg Roedel5d587b82014-09-05 10:50:45 +02004571 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004572}
4573
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004574static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004575{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004576 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004577 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004578 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004579
Alex Williamsona5459cf2014-06-12 16:12:31 -06004580 iommu = device_to_iommu(dev, &bus, &devfn);
4581 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004582 return -ENODEV;
4583
Alex Williamsona5459cf2014-06-12 16:12:31 -06004584 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004585
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004586 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004587
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004588 if (IS_ERR(group))
4589 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004590
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004591 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004592 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004593}
4594
4595static void intel_iommu_remove_device(struct device *dev)
4596{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004597 struct intel_iommu *iommu;
4598 u8 bus, devfn;
4599
4600 iommu = device_to_iommu(dev, &bus, &devfn);
4601 if (!iommu)
4602 return;
4603
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004604 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004605
4606 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004607}
4608
Thierry Redingb22f6432014-06-27 09:03:12 +02004609static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004610 .capable = intel_iommu_capable,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004611 .domain_init = intel_iommu_domain_init,
4612 .domain_destroy = intel_iommu_domain_destroy,
4613 .attach_dev = intel_iommu_attach_device,
4614 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004615 .map = intel_iommu_map,
4616 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004617 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004618 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004619 .add_device = intel_iommu_add_device,
4620 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004621 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004622};
David Woodhouse9af88142009-02-13 23:18:03 +00004623
Daniel Vetter94526182013-01-20 23:50:13 +01004624static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4625{
4626 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4627 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4628 dmar_map_gfx = 0;
4629}
4630
4631DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4632DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4633DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4634DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4636DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4638
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004639static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004640{
4641 /*
4642 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004643 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004644 */
4645 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4646 rwbf_quirk = 1;
4647}
4648
4649DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4651DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4652DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4653DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4654DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4655DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004656
Adam Jacksoneecfd572010-08-25 21:17:34 +01004657#define GGC 0x52
4658#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4659#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4660#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4661#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4662#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4663#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4664#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4665#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4666
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004667static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004668{
4669 unsigned short ggc;
4670
Adam Jacksoneecfd572010-08-25 21:17:34 +01004671 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004672 return;
4673
Adam Jacksoneecfd572010-08-25 21:17:34 +01004674 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004675 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4676 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004677 } else if (dmar_map_gfx) {
4678 /* we have to ensure the gfx device is idle before we flush */
4679 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4680 intel_iommu_strict = 1;
4681 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004682}
4683DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4684DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4685DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4686DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4687
David Woodhousee0fc7e02009-09-30 09:12:17 -07004688/* On Tylersburg chipsets, some BIOSes have been known to enable the
4689 ISOCH DMAR unit for the Azalia sound device, but not give it any
4690 TLB entries, which causes it to deadlock. Check for that. We do
4691 this in a function called from init_dmars(), instead of in a PCI
4692 quirk, because we don't want to print the obnoxious "BIOS broken"
4693 message if VT-d is actually disabled.
4694*/
4695static void __init check_tylersburg_isoch(void)
4696{
4697 struct pci_dev *pdev;
4698 uint32_t vtisochctrl;
4699
4700 /* If there's no Azalia in the system anyway, forget it. */
4701 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4702 if (!pdev)
4703 return;
4704 pci_dev_put(pdev);
4705
4706 /* System Management Registers. Might be hidden, in which case
4707 we can't do the sanity check. But that's OK, because the
4708 known-broken BIOSes _don't_ actually hide it, so far. */
4709 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4710 if (!pdev)
4711 return;
4712
4713 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4714 pci_dev_put(pdev);
4715 return;
4716 }
4717
4718 pci_dev_put(pdev);
4719
4720 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4721 if (vtisochctrl & 1)
4722 return;
4723
4724 /* Drop all bits other than the number of TLB entries */
4725 vtisochctrl &= 0x1c;
4726
4727 /* If we have the recommended number of TLB entries (16), fine. */
4728 if (vtisochctrl == 0x10)
4729 return;
4730
4731 /* Zero TLB entries? You get to ride the short bus to school. */
4732 if (!vtisochctrl) {
4733 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4734 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4735 dmi_get_system_info(DMI_BIOS_VENDOR),
4736 dmi_get_system_info(DMI_BIOS_VERSION),
4737 dmi_get_system_info(DMI_PRODUCT_VERSION));
4738 iommu_identity_mapping |= IDENTMAP_AZALIA;
4739 return;
4740 }
4741
4742 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4743 vtisochctrl);
4744}