blob: e3276ee60340e355c066a8f6c0e1f0e4c359c08c [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000053#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Mark McLoughlinf27be032008-11-20 15:49:43 +000075#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070076#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070077#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080078
Andrew Mortondf08cdc2010-09-22 13:05:11 -070079/* page table handling */
80#define LEVEL_STRIDE (9)
81#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020083/*
84 * This bitmap is used to advertise the page sizes our hardware support
85 * to the IOMMU core, which will then use this information to split
86 * physically contiguous memory regions it is mapping into page sizes
87 * that we support.
88 *
89 * Traditionally the IOMMU core just handed us the mappings directly,
90 * after making sure the size is an order of a 4KiB page and that the
91 * mapping has natural alignment.
92 *
93 * To retain this behavior, we currently advertise that we support
94 * all page sizes that are an order of 4KiB.
95 *
96 * If at some point we'd like to utilize the IOMMU core's new behavior,
97 * we could change this to advertise the real page sizes we support.
98 */
99#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700101static inline int agaw_to_level(int agaw)
102{
103 return agaw + 2;
104}
105
106static inline int agaw_to_width(int agaw)
107{
Jiang Liu5c645b32014-01-06 14:18:12 +0800108 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700109}
110
111static inline int width_to_agaw(int width)
112{
Jiang Liu5c645b32014-01-06 14:18:12 +0800113 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700114}
115
116static inline unsigned int level_to_offset_bits(int level)
117{
118 return (level - 1) * LEVEL_STRIDE;
119}
120
121static inline int pfn_level_offset(unsigned long pfn, int level)
122{
123 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124}
125
126static inline unsigned long level_mask(int level)
127{
128 return -1UL << level_to_offset_bits(level);
129}
130
131static inline unsigned long level_size(int level)
132{
133 return 1UL << level_to_offset_bits(level);
134}
135
136static inline unsigned long align_to_level(unsigned long pfn, int level)
137{
138 return (pfn + level_size(level) - 1) & level_mask(level);
139}
David Woodhousefd18de52009-05-10 23:57:41 +0100140
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100141static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142{
Jiang Liu5c645b32014-01-06 14:18:12 +0800143 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144}
145
David Woodhousedd4e8312009-06-27 16:21:20 +0100146/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147 are never going to work. */
148static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149{
150 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151}
152
153static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154{
155 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156}
157static inline unsigned long page_to_dma_pfn(struct page *pg)
158{
159 return mm_to_dma_pfn(page_to_pfn(pg));
160}
161static inline unsigned long virt_to_dma_pfn(void *p)
162{
163 return page_to_dma_pfn(virt_to_page(p));
164}
165
Weidong Hand9630fe2008-12-08 11:06:32 +0800166/* global iommu list, set NULL for ignored DMAR units */
167static struct intel_iommu **g_iommus;
168
David Woodhousee0fc7e02009-09-30 09:12:17 -0700169static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000170static int rwbf_quirk;
171
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000172/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700173 * set to 1 to panic kernel if can't successfully enable VT-d
174 * (used when kernel is launched w/ TXT)
175 */
176static int force_on = 0;
177
178/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179 * 0: Present
180 * 1-11: Reserved
181 * 12-63: Context Ptr (12 - (haw-1))
182 * 64-127: Reserved
183 */
184struct root_entry {
185 u64 val;
186 u64 rsvd1;
187};
188#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189static inline bool root_present(struct root_entry *root)
190{
191 return (root->val & 1);
192}
193static inline void set_root_present(struct root_entry *root)
194{
195 root->val |= 1;
196}
197static inline void set_root_value(struct root_entry *root, unsigned long value)
198{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800199 root->val &= ~VTD_PAGE_MASK;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000200 root->val |= value & VTD_PAGE_MASK;
201}
202
203static inline struct context_entry *
204get_context_addr_from_root(struct root_entry *root)
205{
206 return (struct context_entry *)
207 (root_present(root)?phys_to_virt(
208 root->val & VTD_PAGE_MASK) :
209 NULL);
210}
211
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000212/*
213 * low 64 bits:
214 * 0: present
215 * 1: fault processing disable
216 * 2-3: translation type
217 * 12-63: address space root
218 * high 64 bits:
219 * 0-2: address width
220 * 3-6: aval
221 * 8-23: domain id
222 */
223struct context_entry {
224 u64 lo;
225 u64 hi;
226};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000227
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000228static inline bool context_present(struct context_entry *context)
229{
230 return (context->lo & 1);
231}
232static inline void context_set_present(struct context_entry *context)
233{
234 context->lo |= 1;
235}
236
237static inline void context_set_fault_enable(struct context_entry *context)
238{
239 context->lo &= (((u64)-1) << 2) | 1;
240}
241
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000242static inline void context_set_translation_type(struct context_entry *context,
243 unsigned long value)
244{
245 context->lo &= (((u64)-1) << 4) | 3;
246 context->lo |= (value & 3) << 2;
247}
248
249static inline void context_set_address_root(struct context_entry *context,
250 unsigned long value)
251{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800252 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000253 context->lo |= value & VTD_PAGE_MASK;
254}
255
256static inline void context_set_address_width(struct context_entry *context,
257 unsigned long value)
258{
259 context->hi |= value & 7;
260}
261
262static inline void context_set_domain_id(struct context_entry *context,
263 unsigned long value)
264{
265 context->hi |= (value & ((1 << 16) - 1)) << 8;
266}
267
268static inline void context_clear_entry(struct context_entry *context)
269{
270 context->lo = 0;
271 context->hi = 0;
272}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000273
Mark McLoughlin622ba122008-11-20 15:49:46 +0000274/*
275 * 0: readable
276 * 1: writable
277 * 2-6: reserved
278 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800279 * 8-10: available
280 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281 * 12-63: Host physcial address
282 */
283struct dma_pte {
284 u64 val;
285};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000286
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000287static inline void dma_clear_pte(struct dma_pte *pte)
288{
289 pte->val = 0;
290}
291
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000292static inline u64 dma_pte_addr(struct dma_pte *pte)
293{
David Woodhousec85994e2009-07-01 19:21:24 +0100294#ifdef CONFIG_64BIT
295 return pte->val & VTD_PAGE_MASK;
296#else
297 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100298 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100299#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline bool dma_pte_present(struct dma_pte *pte)
303{
304 return (pte->val & 3) != 0;
305}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000306
Allen Kay4399c8b2011-10-14 12:32:46 -0700307static inline bool dma_pte_superpage(struct dma_pte *pte)
308{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200309 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700310}
311
David Woodhouse75e6bf92009-07-02 11:21:16 +0100312static inline int first_pte_in_page(struct dma_pte *pte)
313{
314 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315}
316
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700317/*
318 * This domain is a statically identity mapping domain.
319 * 1. This domain creats a static 1:1 mapping to all usable memory.
320 * 2. It maps to each iommu if successful.
321 * 3. Each iommu mapps to this domain if successful.
322 */
David Woodhouse19943b02009-08-04 16:19:20 +0100323static struct dmar_domain *si_domain;
324static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700325
Weidong Han1ce28fe2008-12-08 16:35:39 +0800326/* domain represents a virtual machine, more than one devices
327 * across iommus may be owned in one domain, e.g. kvm guest.
328 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800329#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800330
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700331/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800332#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700333
Mark McLoughlin99126f72008-11-20 15:49:47 +0000334struct dmar_domain {
335 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700336 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800337 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800338 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000339
340 struct list_head devices; /* all devices' list */
341 struct iova_domain iovad; /* iova's that belong to this domain */
342
343 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344 int gaw; /* max guest address width */
345
346 /* adjusted guest address width, 0 is level 2 30-bit */
347 int agaw;
348
Weidong Han3b5410e2008-12-08 09:17:15 +0800349 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800350
351 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800352 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800353 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100354 int iommu_superpage;/* Level of superpages supported:
355 0 == 4KiB (no superpages), 1 == 2MiB,
356 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800357 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800358 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000359};
360
Mark McLoughlina647dac2008-11-20 15:49:48 +0000361/* PCI domain-device relationship */
362struct device_domain_info {
363 struct list_head link; /* link to domain siblings */
364 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100365 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000366 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000367 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800368 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000369 struct dmar_domain *domain; /* pointer to domain */
370};
371
Jiang Liub94e4112014-02-19 14:07:25 +0800372struct dmar_rmrr_unit {
373 struct list_head list; /* list of rmrr units */
374 struct acpi_dmar_header *hdr; /* ACPI header */
375 u64 base_address; /* reserved base address*/
376 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000377 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800378 int devices_cnt; /* target device count */
379};
380
381struct dmar_atsr_unit {
382 struct list_head list; /* list of ATSR units */
383 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000384 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800385 int devices_cnt; /* target device count */
386 u8 include_all:1; /* include all ports */
387};
388
389static LIST_HEAD(dmar_atsr_units);
390static LIST_HEAD(dmar_rmrr_units);
391
392#define for_each_rmrr_units(rmrr) \
393 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
394
mark gross5e0d2a62008-03-04 15:22:08 -0800395static void flush_unmaps_timeout(unsigned long data);
396
Jiang Liub707cb02014-01-06 14:18:26 +0800397static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800398
mark gross80b20dd2008-04-18 13:53:58 -0700399#define HIGH_WATER_MARK 250
400struct deferred_flush_tables {
401 int next;
402 struct iova *iova[HIGH_WATER_MARK];
403 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000404 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700405};
406
407static struct deferred_flush_tables *deferred_flush;
408
mark gross5e0d2a62008-03-04 15:22:08 -0800409/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800410static int g_num_of_iommus;
411
412static DEFINE_SPINLOCK(async_umap_flush_lock);
413static LIST_HEAD(unmaps_to_do);
414
415static int timer_on;
416static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800417
Jiang Liu92d03cc2014-02-19 14:07:28 +0800418static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700419static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800420static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700421 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800422static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000423 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800424static int domain_detach_iommu(struct dmar_domain *domain,
425 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700426
Suresh Siddhad3f13812011-08-23 17:05:25 -0700427#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800428int dmar_disabled = 0;
429#else
430int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200433int intel_iommu_enabled = 0;
434EXPORT_SYMBOL_GPL(intel_iommu_enabled);
435
David Woodhouse2d9e6672010-06-15 10:57:57 +0100436static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700437static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800438static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100439static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440
David Woodhousec0771df2011-10-14 20:59:46 +0100441int intel_iommu_gfx_mapped;
442EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
443
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
445static DEFINE_SPINLOCK(device_domain_lock);
446static LIST_HEAD(device_domain_list);
447
Thierry Redingb22f6432014-06-27 09:03:12 +0200448static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450static int __init intel_iommu_setup(char *str)
451{
452 if (!str)
453 return -EINVAL;
454 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800455 if (!strncmp(str, "on", 2)) {
456 dmar_disabled = 0;
457 printk(KERN_INFO "Intel-IOMMU: enabled\n");
458 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700459 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800460 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461 } else if (!strncmp(str, "igfx_off", 8)) {
462 dmar_map_gfx = 0;
463 printk(KERN_INFO
464 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700465 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800466 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700467 "Intel-IOMMU: Forcing DAC for PCI devices\n");
468 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800469 } else if (!strncmp(str, "strict", 6)) {
470 printk(KERN_INFO
471 "Intel-IOMMU: disable batched IOTLB flush\n");
472 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100473 } else if (!strncmp(str, "sp_off", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable supported super page\n");
476 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700477 }
478
479 str += strcspn(str, ",");
480 while (*str == ',')
481 str++;
482 }
483 return 0;
484}
485__setup("intel_iommu=", intel_iommu_setup);
486
487static struct kmem_cache *iommu_domain_cache;
488static struct kmem_cache *iommu_devinfo_cache;
489static struct kmem_cache *iommu_iova_cache;
490
Suresh Siddha4c923d42009-10-02 11:01:24 -0700491static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700492{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700493 struct page *page;
494 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700495
Suresh Siddha4c923d42009-10-02 11:01:24 -0700496 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
497 if (page)
498 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500}
501
502static inline void free_pgtable_page(void *vaddr)
503{
504 free_page((unsigned long)vaddr);
505}
506
507static inline void *alloc_domain_mem(void)
508{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900509 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
Kay, Allen M38717942008-09-09 18:37:29 +0300512static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700513{
514 kmem_cache_free(iommu_domain_cache, vaddr);
515}
516
517static inline void * alloc_devinfo_mem(void)
518{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900519 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700520}
521
522static inline void free_devinfo_mem(void *vaddr)
523{
524 kmem_cache_free(iommu_devinfo_cache, vaddr);
525}
526
527struct iova *alloc_iova_mem(void)
528{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900529 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700530}
531
532void free_iova_mem(struct iova *iova)
533{
534 kmem_cache_free(iommu_iova_cache, iova);
535}
536
Jiang Liuab8dfe22014-07-11 14:19:27 +0800537static inline int domain_type_is_vm(struct dmar_domain *domain)
538{
539 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
540}
541
542static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
543{
544 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
545 DOMAIN_FLAG_STATIC_IDENTITY);
546}
Weidong Han1b573682008-12-08 15:34:06 +0800547
Jiang Liu162d1b12014-07-11 14:19:35 +0800548static inline int domain_pfn_supported(struct dmar_domain *domain,
549 unsigned long pfn)
550{
551 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
552
553 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
554}
555
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700556static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800557{
558 unsigned long sagaw;
559 int agaw = -1;
560
561 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700562 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800563 agaw >= 0; agaw--) {
564 if (test_bit(agaw, &sagaw))
565 break;
566 }
567
568 return agaw;
569}
570
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700571/*
572 * Calculate max SAGAW for each iommu.
573 */
574int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
575{
576 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
577}
578
579/*
580 * calculate agaw for each iommu.
581 * "SAGAW" may be different across iommus, use a default agaw, and
582 * get a supported less agaw for iommus that don't support the default agaw.
583 */
584int iommu_calculate_agaw(struct intel_iommu *iommu)
585{
586 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
587}
588
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700589/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800590static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
591{
592 int iommu_id;
593
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700594 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800595 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800596 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800597 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
598 return NULL;
599
600 return g_iommus[iommu_id];
601}
602
Weidong Han8e6040972008-12-08 15:49:06 +0800603static void domain_update_iommu_coherency(struct dmar_domain *domain)
604{
David Woodhoused0501962014-03-11 17:10:29 -0700605 struct dmar_drhd_unit *drhd;
606 struct intel_iommu *iommu;
607 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800608
David Woodhoused0501962014-03-11 17:10:29 -0700609 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800610
Mike Travis1b198bb2012-03-05 15:05:16 -0800611 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700612 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800613 if (!ecap_coherent(g_iommus[i]->ecap)) {
614 domain->iommu_coherency = 0;
615 break;
616 }
Weidong Han8e6040972008-12-08 15:49:06 +0800617 }
David Woodhoused0501962014-03-11 17:10:29 -0700618 if (found)
619 return;
620
621 /* No hardware attached; use lowest common denominator */
622 rcu_read_lock();
623 for_each_active_iommu(iommu, drhd) {
624 if (!ecap_coherent(iommu->ecap)) {
625 domain->iommu_coherency = 0;
626 break;
627 }
628 }
629 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800630}
631
Jiang Liu161f6932014-07-11 14:19:37 +0800632static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100633{
Allen Kay8140a952011-10-14 12:32:17 -0700634 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800635 struct intel_iommu *iommu;
636 int ret = 1;
637
638 rcu_read_lock();
639 for_each_active_iommu(iommu, drhd) {
640 if (iommu != skip) {
641 if (!ecap_sc_support(iommu->ecap)) {
642 ret = 0;
643 break;
644 }
645 }
646 }
647 rcu_read_unlock();
648
649 return ret;
650}
651
652static int domain_update_iommu_superpage(struct intel_iommu *skip)
653{
654 struct dmar_drhd_unit *drhd;
655 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700656 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100657
658 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800659 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100660 }
661
Allen Kay8140a952011-10-14 12:32:17 -0700662 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800663 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700664 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800665 if (iommu != skip) {
666 mask &= cap_super_page_val(iommu->cap);
667 if (!mask)
668 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100669 }
670 }
Jiang Liu0e242612014-02-19 14:07:34 +0800671 rcu_read_unlock();
672
Jiang Liu161f6932014-07-11 14:19:37 +0800673 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100674}
675
Sheng Yang58c610b2009-03-18 15:33:05 +0800676/* Some capabilities may be different across iommus */
677static void domain_update_iommu_cap(struct dmar_domain *domain)
678{
679 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800680 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
681 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800682}
683
David Woodhouse156baca2014-03-09 14:00:57 -0700684static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800685{
686 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800687 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700688 struct device *tmp;
689 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800690 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800691 int i;
692
David Woodhouse156baca2014-03-09 14:00:57 -0700693 if (dev_is_pci(dev)) {
694 pdev = to_pci_dev(dev);
695 segment = pci_domain_nr(pdev->bus);
696 } else if (ACPI_COMPANION(dev))
697 dev = &ACPI_COMPANION(dev)->dev;
698
Jiang Liu0e242612014-02-19 14:07:34 +0800699 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800700 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700701 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100702 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800703
Jiang Liub683b232014-02-19 14:07:32 +0800704 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700705 drhd->devices_cnt, i, tmp) {
706 if (tmp == dev) {
707 *bus = drhd->devices[i].bus;
708 *devfn = drhd->devices[i].devfn;
709 goto out;
710 }
711
712 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000713 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700714
715 ptmp = to_pci_dev(tmp);
716 if (ptmp->subordinate &&
717 ptmp->subordinate->number <= pdev->bus->number &&
718 ptmp->subordinate->busn_res.end >= pdev->bus->number)
719 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100720 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800721
David Woodhouse156baca2014-03-09 14:00:57 -0700722 if (pdev && drhd->include_all) {
723 got_pdev:
724 *bus = pdev->bus->number;
725 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800726 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700727 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800728 }
Jiang Liub683b232014-02-19 14:07:32 +0800729 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700730 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800731 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800732
Jiang Liub683b232014-02-19 14:07:32 +0800733 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800734}
735
Weidong Han5331fe62008-12-08 23:00:00 +0800736static void domain_flush_cache(struct dmar_domain *domain,
737 void *addr, int size)
738{
739 if (!domain->iommu_coherency)
740 clflush_cache_range(addr, size);
741}
742
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700743/* Gets context entry for a given bus and devfn */
744static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
745 u8 bus, u8 devfn)
746{
747 struct root_entry *root;
748 struct context_entry *context;
749 unsigned long phy_addr;
750 unsigned long flags;
751
752 spin_lock_irqsave(&iommu->lock, flags);
753 root = &iommu->root_entry[bus];
754 context = get_context_addr_from_root(root);
755 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700756 context = (struct context_entry *)
757 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 if (!context) {
759 spin_unlock_irqrestore(&iommu->lock, flags);
760 return NULL;
761 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700762 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700763 phy_addr = virt_to_phys((void *)context);
764 set_root_value(root, phy_addr);
765 set_root_present(root);
766 __iommu_flush_cache(iommu, root, sizeof(*root));
767 }
768 spin_unlock_irqrestore(&iommu->lock, flags);
769 return &context[devfn];
770}
771
772static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
773{
774 struct root_entry *root;
775 struct context_entry *context;
776 int ret;
777 unsigned long flags;
778
779 spin_lock_irqsave(&iommu->lock, flags);
780 root = &iommu->root_entry[bus];
781 context = get_context_addr_from_root(root);
782 if (!context) {
783 ret = 0;
784 goto out;
785 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000786 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787out:
788 spin_unlock_irqrestore(&iommu->lock, flags);
789 return ret;
790}
791
792static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
793{
794 struct root_entry *root;
795 struct context_entry *context;
796 unsigned long flags;
797
798 spin_lock_irqsave(&iommu->lock, flags);
799 root = &iommu->root_entry[bus];
800 context = get_context_addr_from_root(root);
801 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000802 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803 __iommu_flush_cache(iommu, &context[devfn], \
804 sizeof(*context));
805 }
806 spin_unlock_irqrestore(&iommu->lock, flags);
807}
808
809static void free_context_table(struct intel_iommu *iommu)
810{
811 struct root_entry *root;
812 int i;
813 unsigned long flags;
814 struct context_entry *context;
815
816 spin_lock_irqsave(&iommu->lock, flags);
817 if (!iommu->root_entry) {
818 goto out;
819 }
820 for (i = 0; i < ROOT_ENTRY_NR; i++) {
821 root = &iommu->root_entry[i];
822 context = get_context_addr_from_root(root);
823 if (context)
824 free_pgtable_page(context);
825 }
826 free_pgtable_page(iommu->root_entry);
827 iommu->root_entry = NULL;
828out:
829 spin_unlock_irqrestore(&iommu->lock, flags);
830}
831
David Woodhouseb026fd22009-06-28 10:37:25 +0100832static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000833 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 struct dma_pte *parent, *pte = NULL;
836 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700837 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838
839 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200840
Jiang Liu162d1b12014-07-11 14:19:35 +0800841 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200842 /* Address beyond IOMMU's addressing capabilities. */
843 return NULL;
844
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700845 parent = domain->pgd;
846
David Woodhouse5cf0a762014-03-19 16:07:49 +0000847 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 void *tmp_page;
849
David Woodhouseb026fd22009-06-28 10:37:25 +0100850 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000852 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100853 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000854 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 break;
856
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000857 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100858 uint64_t pteval;
859
Suresh Siddha4c923d42009-10-02 11:01:24 -0700860 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861
David Woodhouse206a73c2009-07-01 19:30:28 +0100862 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100864
David Woodhousec85994e2009-07-01 19:21:24 +0100865 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400866 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800867 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100868 /* Someone else set it while we were thinking; use theirs. */
869 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800870 else
David Woodhousec85994e2009-07-01 19:21:24 +0100871 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700872 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000873 if (level == 1)
874 break;
875
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000876 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700877 level--;
878 }
879
David Woodhouse5cf0a762014-03-19 16:07:49 +0000880 if (!*target_level)
881 *target_level = level;
882
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883 return pte;
884}
885
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100888static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
889 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891{
892 struct dma_pte *parent, *pte = NULL;
893 int total = agaw_to_level(domain->agaw);
894 int offset;
895
896 parent = domain->pgd;
897 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100898 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899 pte = &parent[offset];
900 if (level == total)
901 return pte;
902
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100903 if (!dma_pte_present(pte)) {
904 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100906 }
907
Yijing Wange16922a2014-05-20 20:37:51 +0800908 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100909 *large_page = total;
910 return pte;
911 }
912
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000913 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914 total--;
915 }
916 return NULL;
917}
918
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700919/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000920static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100921 unsigned long start_pfn,
922 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100925 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926
Jiang Liu162d1b12014-07-11 14:19:35 +0800927 BUG_ON(!domain_pfn_supported(domain, start_pfn));
928 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700929 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100930
David Woodhouse04b18e62009-06-27 19:15:01 +0100931 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700932 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100933 large_page = 1;
934 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100935 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100936 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100937 continue;
938 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100939 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100940 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100941 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100942 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100943 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
944
David Woodhouse310a5ab2009-06-28 18:52:20 +0100945 domain_flush_cache(domain, first_pte,
946 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700947
948 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700949}
950
Alex Williamson3269ee02013-06-15 10:27:19 -0600951static void dma_pte_free_level(struct dmar_domain *domain, int level,
952 struct dma_pte *pte, unsigned long pfn,
953 unsigned long start_pfn, unsigned long last_pfn)
954{
955 pfn = max(start_pfn, pfn);
956 pte = &pte[pfn_level_offset(pfn, level)];
957
958 do {
959 unsigned long level_pfn;
960 struct dma_pte *level_pte;
961
962 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
963 goto next;
964
965 level_pfn = pfn & level_mask(level - 1);
966 level_pte = phys_to_virt(dma_pte_addr(pte));
967
968 if (level > 2)
969 dma_pte_free_level(domain, level - 1, level_pte,
970 level_pfn, start_pfn, last_pfn);
971
972 /* If range covers entire pagetable, free it */
973 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800974 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600975 dma_clear_pte(pte);
976 domain_flush_cache(domain, pte, sizeof(*pte));
977 free_pgtable_page(level_pte);
978 }
979next:
980 pfn += level_size(level);
981 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
982}
983
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984/* free page table pages. last level pte should already be cleared */
985static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100986 unsigned long start_pfn,
987 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988{
Jiang Liu162d1b12014-07-11 14:19:35 +0800989 BUG_ON(!domain_pfn_supported(domain, start_pfn));
990 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700991 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992
Jiang Liud41a4ad2014-07-11 14:19:34 +0800993 dma_pte_clear_range(domain, start_pfn, last_pfn);
994
David Woodhousef3a0a522009-06-30 03:40:07 +0100995 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600996 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
997 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001000 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001 free_pgtable_page(domain->pgd);
1002 domain->pgd = NULL;
1003 }
1004}
1005
David Woodhouseea8ea462014-03-05 17:09:32 +00001006/* When a page at a given level is being unlinked from its parent, we don't
1007 need to *modify* it at all. All we need to do is make a list of all the
1008 pages which can be freed just as soon as we've flushed the IOTLB and we
1009 know the hardware page-walk will no longer touch them.
1010 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1011 be freed. */
1012static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1013 int level, struct dma_pte *pte,
1014 struct page *freelist)
1015{
1016 struct page *pg;
1017
1018 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1019 pg->freelist = freelist;
1020 freelist = pg;
1021
1022 if (level == 1)
1023 return freelist;
1024
Jiang Liuadeb2592014-04-09 10:20:39 +08001025 pte = page_address(pg);
1026 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001027 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1028 freelist = dma_pte_list_pagetables(domain, level - 1,
1029 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001030 pte++;
1031 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001032
1033 return freelist;
1034}
1035
1036static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1037 struct dma_pte *pte, unsigned long pfn,
1038 unsigned long start_pfn,
1039 unsigned long last_pfn,
1040 struct page *freelist)
1041{
1042 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1043
1044 pfn = max(start_pfn, pfn);
1045 pte = &pte[pfn_level_offset(pfn, level)];
1046
1047 do {
1048 unsigned long level_pfn;
1049
1050 if (!dma_pte_present(pte))
1051 goto next;
1052
1053 level_pfn = pfn & level_mask(level);
1054
1055 /* If range covers entire pagetable, free it */
1056 if (start_pfn <= level_pfn &&
1057 last_pfn >= level_pfn + level_size(level) - 1) {
1058 /* These suborbinate page tables are going away entirely. Don't
1059 bother to clear them; we're just going to *free* them. */
1060 if (level > 1 && !dma_pte_superpage(pte))
1061 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1062
1063 dma_clear_pte(pte);
1064 if (!first_pte)
1065 first_pte = pte;
1066 last_pte = pte;
1067 } else if (level > 1) {
1068 /* Recurse down into a level that isn't *entirely* obsolete */
1069 freelist = dma_pte_clear_level(domain, level - 1,
1070 phys_to_virt(dma_pte_addr(pte)),
1071 level_pfn, start_pfn, last_pfn,
1072 freelist);
1073 }
1074next:
1075 pfn += level_size(level);
1076 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1077
1078 if (first_pte)
1079 domain_flush_cache(domain, first_pte,
1080 (void *)++last_pte - (void *)first_pte);
1081
1082 return freelist;
1083}
1084
1085/* We can't just free the pages because the IOMMU may still be walking
1086 the page tables, and may have cached the intermediate levels. The
1087 pages can only be freed after the IOTLB flush has been done. */
1088struct page *domain_unmap(struct dmar_domain *domain,
1089 unsigned long start_pfn,
1090 unsigned long last_pfn)
1091{
David Woodhouseea8ea462014-03-05 17:09:32 +00001092 struct page *freelist = NULL;
1093
Jiang Liu162d1b12014-07-11 14:19:35 +08001094 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1095 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001096 BUG_ON(start_pfn > last_pfn);
1097
1098 /* we don't need lock here; nobody else touches the iova range */
1099 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1100 domain->pgd, 0, start_pfn, last_pfn, NULL);
1101
1102 /* free pgd */
1103 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1104 struct page *pgd_page = virt_to_page(domain->pgd);
1105 pgd_page->freelist = freelist;
1106 freelist = pgd_page;
1107
1108 domain->pgd = NULL;
1109 }
1110
1111 return freelist;
1112}
1113
1114void dma_free_pagelist(struct page *freelist)
1115{
1116 struct page *pg;
1117
1118 while ((pg = freelist)) {
1119 freelist = pg->freelist;
1120 free_pgtable_page(page_address(pg));
1121 }
1122}
1123
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124/* iommu handling */
1125static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1126{
1127 struct root_entry *root;
1128 unsigned long flags;
1129
Suresh Siddha4c923d42009-10-02 11:01:24 -07001130 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001131 if (!root) {
1132 pr_err("IOMMU: allocating root entry for %s failed\n",
1133 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001134 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001135 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001137 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138
1139 spin_lock_irqsave(&iommu->lock, flags);
1140 iommu->root_entry = root;
1141 spin_unlock_irqrestore(&iommu->lock, flags);
1142
1143 return 0;
1144}
1145
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146static void iommu_set_root_entry(struct intel_iommu *iommu)
1147{
1148 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001149 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150 unsigned long flag;
1151
1152 addr = iommu->root_entry;
1153
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1156
David Woodhousec416daa2009-05-10 20:30:58 +01001157 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158
1159 /* Make sure hardware complete it */
1160 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001161 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001163 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164}
1165
1166static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1167{
1168 u32 val;
1169 unsigned long flag;
1170
David Woodhouse9af88142009-02-13 23:18:03 +00001171 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001175 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
1177 /* Make sure hardware complete it */
1178 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001179 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001181 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182}
1183
1184/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001185static void __iommu_flush_context(struct intel_iommu *iommu,
1186 u16 did, u16 source_id, u8 function_mask,
1187 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188{
1189 u64 val = 0;
1190 unsigned long flag;
1191
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192 switch (type) {
1193 case DMA_CCMD_GLOBAL_INVL:
1194 val = DMA_CCMD_GLOBAL_INVL;
1195 break;
1196 case DMA_CCMD_DOMAIN_INVL:
1197 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1198 break;
1199 case DMA_CCMD_DEVICE_INVL:
1200 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1201 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1202 break;
1203 default:
1204 BUG();
1205 }
1206 val |= DMA_CCMD_ICC;
1207
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001208 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1210
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1213 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1214
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001215 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216}
1217
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001218/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001219static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1220 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221{
1222 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1223 u64 val = 0, val_iva = 0;
1224 unsigned long flag;
1225
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 switch (type) {
1227 case DMA_TLB_GLOBAL_FLUSH:
1228 /* global flush doesn't need set IVA_REG */
1229 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1230 break;
1231 case DMA_TLB_DSI_FLUSH:
1232 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1233 break;
1234 case DMA_TLB_PSI_FLUSH:
1235 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001236 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237 val_iva = size_order | addr;
1238 break;
1239 default:
1240 BUG();
1241 }
1242 /* Note: set drain read/write */
1243#if 0
1244 /*
1245 * This is probably to be super secure.. Looks like we can
1246 * ignore it without any impact.
1247 */
1248 if (cap_read_drain(iommu->cap))
1249 val |= DMA_TLB_READ_DRAIN;
1250#endif
1251 if (cap_write_drain(iommu->cap))
1252 val |= DMA_TLB_WRITE_DRAIN;
1253
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001254 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 /* Note: Only uses first TLB reg currently */
1256 if (val_iva)
1257 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1258 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1259
1260 /* Make sure hardware complete it */
1261 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1262 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1263
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001264 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001265
1266 /* check IOTLB invalidation granularity */
1267 if (DMA_TLB_IAIG(val) == 0)
1268 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1269 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1270 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001271 (unsigned long long)DMA_TLB_IIRG(type),
1272 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001273}
1274
David Woodhouse64ae8922014-03-09 12:52:30 -07001275static struct device_domain_info *
1276iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1277 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001278{
Yu Zhao93a23a72009-05-18 13:51:37 +08001279 int found = 0;
1280 unsigned long flags;
1281 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001282 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001283
1284 if (!ecap_dev_iotlb_support(iommu->ecap))
1285 return NULL;
1286
1287 if (!iommu->qi)
1288 return NULL;
1289
1290 spin_lock_irqsave(&device_domain_lock, flags);
1291 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001292 if (info->iommu == iommu && info->bus == bus &&
1293 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001294 found = 1;
1295 break;
1296 }
1297 spin_unlock_irqrestore(&device_domain_lock, flags);
1298
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001299 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001300 return NULL;
1301
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001302 pdev = to_pci_dev(info->dev);
1303
1304 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001305 return NULL;
1306
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001307 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001308 return NULL;
1309
Yu Zhao93a23a72009-05-18 13:51:37 +08001310 return info;
1311}
1312
1313static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1314{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001315 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001316 return;
1317
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001318 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001319}
1320
1321static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1322{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001323 if (!info->dev || !dev_is_pci(info->dev) ||
1324 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001325 return;
1326
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001327 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001328}
1329
1330static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1331 u64 addr, unsigned mask)
1332{
1333 u16 sid, qdep;
1334 unsigned long flags;
1335 struct device_domain_info *info;
1336
1337 spin_lock_irqsave(&device_domain_lock, flags);
1338 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001339 struct pci_dev *pdev;
1340 if (!info->dev || !dev_is_pci(info->dev))
1341 continue;
1342
1343 pdev = to_pci_dev(info->dev);
1344 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001345 continue;
1346
1347 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001348 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001349 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1350 }
1351 spin_unlock_irqrestore(&device_domain_lock, flags);
1352}
1353
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001354static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001355 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001356{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001357 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001358 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001359
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001360 BUG_ON(pages == 0);
1361
David Woodhouseea8ea462014-03-05 17:09:32 +00001362 if (ih)
1363 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001364 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001365 * Fallback to domain selective flush if no PSI support or the size is
1366 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367 * PSI requires page size to be 2 ^ x, and the base address is naturally
1368 * aligned to the size
1369 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001370 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1371 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001372 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001373 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001374 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001375 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001376
1377 /*
Nadav Amit82653632010-04-01 13:24:40 +03001378 * In caching mode, changes of pages from non-present to present require
1379 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001380 */
Nadav Amit82653632010-04-01 13:24:40 +03001381 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001382 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001383}
1384
mark grossf8bab732008-02-08 04:18:38 -08001385static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1386{
1387 u32 pmen;
1388 unsigned long flags;
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001391 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1392 pmen &= ~DMA_PMEN_EPM;
1393 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1394
1395 /* wait for the protected region status bit to clear */
1396 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1397 readl, !(pmen & DMA_PMEN_PRS), pmen);
1398
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001399 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001400}
1401
Jiang Liu2a41cce2014-07-11 14:19:33 +08001402static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403{
1404 u32 sts;
1405 unsigned long flags;
1406
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001407 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001408 iommu->gcmd |= DMA_GCMD_TE;
1409 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
1411 /* Make sure hardware complete it */
1412 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001413 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001415 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416}
1417
Jiang Liu2a41cce2014-07-11 14:19:33 +08001418static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419{
1420 u32 sts;
1421 unsigned long flag;
1422
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001423 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424 iommu->gcmd &= ~DMA_GCMD_TE;
1425 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1426
1427 /* Make sure hardware complete it */
1428 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001429 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001431 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432}
1433
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001434
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435static int iommu_init_domains(struct intel_iommu *iommu)
1436{
1437 unsigned long ndomains;
1438 unsigned long nlongs;
1439
1440 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001441 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1442 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 nlongs = BITS_TO_LONGS(ndomains);
1444
Donald Dutile94a91b52009-08-20 16:51:34 -04001445 spin_lock_init(&iommu->lock);
1446
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001447 /* TBD: there might be 64K domains,
1448 * consider other allocation for future chip
1449 */
1450 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1451 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001452 pr_err("IOMMU%d: allocating domain id array failed\n",
1453 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 return -ENOMEM;
1455 }
1456 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1457 GFP_KERNEL);
1458 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001459 pr_err("IOMMU%d: allocating domain array failed\n",
1460 iommu->seq_id);
1461 kfree(iommu->domain_ids);
1462 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463 return -ENOMEM;
1464 }
1465
1466 /*
1467 * if Caching mode is set, then invalid translations are tagged
1468 * with domainid 0. Hence we need to pre-allocate it.
1469 */
1470 if (cap_caching_mode(iommu->cap))
1471 set_bit(0, iommu->domain_ids);
1472 return 0;
1473}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001474
Jiang Liuffebeb42014-11-09 22:48:02 +08001475static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476{
1477 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001478 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001479
Donald Dutile94a91b52009-08-20 16:51:34 -04001480 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001481 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001482 /*
1483 * Domain id 0 is reserved for invalid translation
1484 * if hardware supports caching mode.
1485 */
1486 if (cap_caching_mode(iommu->cap) && i == 0)
1487 continue;
1488
Donald Dutile94a91b52009-08-20 16:51:34 -04001489 domain = iommu->domains[i];
1490 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001491 if (domain_detach_iommu(domain, iommu) == 0 &&
1492 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001493 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001494 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495 }
1496
1497 if (iommu->gcmd & DMA_GCMD_TE)
1498 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001499}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
Jiang Liuffebeb42014-11-09 22:48:02 +08001501static void free_dmar_iommu(struct intel_iommu *iommu)
1502{
1503 if ((iommu->domains) && (iommu->domain_ids)) {
1504 kfree(iommu->domains);
1505 kfree(iommu->domain_ids);
1506 iommu->domains = NULL;
1507 iommu->domain_ids = NULL;
1508 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509
Weidong Hand9630fe2008-12-08 11:06:32 +08001510 g_iommus[iommu->seq_id] = NULL;
1511
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 /* free context mapping */
1513 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514}
1515
Jiang Liuab8dfe22014-07-11 14:19:27 +08001516static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001518 /* domain id for virtual machine, it won't be set in context */
1519 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521
1522 domain = alloc_domain_mem();
1523 if (!domain)
1524 return NULL;
1525
Jiang Liuab8dfe22014-07-11 14:19:27 +08001526 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001527 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001528 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001529 spin_lock_init(&domain->iommu_lock);
1530 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001531 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001532 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533
1534 return domain;
1535}
1536
Jiang Liufb170fb2014-07-11 14:19:28 +08001537static int __iommu_attach_domain(struct dmar_domain *domain,
1538 struct intel_iommu *iommu)
1539{
1540 int num;
1541 unsigned long ndomains;
1542
1543 ndomains = cap_ndoms(iommu->cap);
1544 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1545 if (num < ndomains) {
1546 set_bit(num, iommu->domain_ids);
1547 iommu->domains[num] = domain;
1548 } else {
1549 num = -ENOSPC;
1550 }
1551
1552 return num;
1553}
1554
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001555static int iommu_attach_domain(struct dmar_domain *domain,
1556 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001557{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001558 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001559 unsigned long flags;
1560
Weidong Han8c11e792008-12-08 15:29:22 +08001561 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001562 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001563 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001564 if (num < 0)
1565 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001566
Jiang Liufb170fb2014-07-11 14:19:28 +08001567 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001568}
1569
Jiang Liu44bde612014-07-11 14:19:29 +08001570static int iommu_attach_vm_domain(struct dmar_domain *domain,
1571 struct intel_iommu *iommu)
1572{
1573 int num;
1574 unsigned long ndomains;
1575
1576 ndomains = cap_ndoms(iommu->cap);
1577 for_each_set_bit(num, iommu->domain_ids, ndomains)
1578 if (iommu->domains[num] == domain)
1579 return num;
1580
1581 return __iommu_attach_domain(domain, iommu);
1582}
1583
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001584static void iommu_detach_domain(struct dmar_domain *domain,
1585 struct intel_iommu *iommu)
1586{
1587 unsigned long flags;
1588 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001589
1590 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001591 if (domain_type_is_vm_or_si(domain)) {
1592 ndomains = cap_ndoms(iommu->cap);
1593 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1594 if (iommu->domains[num] == domain) {
1595 clear_bit(num, iommu->domain_ids);
1596 iommu->domains[num] = NULL;
1597 break;
1598 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001599 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001600 } else {
1601 clear_bit(domain->id, iommu->domain_ids);
1602 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001603 }
Weidong Han8c11e792008-12-08 15:29:22 +08001604 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605}
1606
Jiang Liufb170fb2014-07-11 14:19:28 +08001607static void domain_attach_iommu(struct dmar_domain *domain,
1608 struct intel_iommu *iommu)
1609{
1610 unsigned long flags;
1611
1612 spin_lock_irqsave(&domain->iommu_lock, flags);
1613 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1614 domain->iommu_count++;
1615 if (domain->iommu_count == 1)
1616 domain->nid = iommu->node;
1617 domain_update_iommu_cap(domain);
1618 }
1619 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1620}
1621
1622static int domain_detach_iommu(struct dmar_domain *domain,
1623 struct intel_iommu *iommu)
1624{
1625 unsigned long flags;
1626 int count = INT_MAX;
1627
1628 spin_lock_irqsave(&domain->iommu_lock, flags);
1629 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1630 count = --domain->iommu_count;
1631 domain_update_iommu_cap(domain);
1632 }
1633 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1634
1635 return count;
1636}
1637
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001639static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640
Joseph Cihula51a63e62011-03-21 11:04:24 -07001641static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642{
1643 struct pci_dev *pdev = NULL;
1644 struct iova *iova;
1645 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646
David Millerf6611972008-02-06 01:36:23 -08001647 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648
Mark Gross8a443df2008-03-04 14:59:31 -08001649 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1650 &reserved_rbtree_key);
1651
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652 /* IOAPIC ranges shouldn't be accessed by DMA */
1653 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1654 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001655 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001657 return -ENODEV;
1658 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659
1660 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1661 for_each_pci_dev(pdev) {
1662 struct resource *r;
1663
1664 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1665 r = &pdev->resource[i];
1666 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1667 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001668 iova = reserve_iova(&reserved_iova_list,
1669 IOVA_PFN(r->start),
1670 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001671 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001673 return -ENODEV;
1674 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675 }
1676 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001677 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678}
1679
1680static void domain_reserve_special_ranges(struct dmar_domain *domain)
1681{
1682 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1683}
1684
1685static inline int guestwidth_to_adjustwidth(int gaw)
1686{
1687 int agaw;
1688 int r = (gaw - 12) % 9;
1689
1690 if (r == 0)
1691 agaw = gaw;
1692 else
1693 agaw = gaw + 9 - r;
1694 if (agaw > 64)
1695 agaw = 64;
1696 return agaw;
1697}
1698
1699static int domain_init(struct dmar_domain *domain, int guest_width)
1700{
1701 struct intel_iommu *iommu;
1702 int adjust_width, agaw;
1703 unsigned long sagaw;
1704
David Millerf6611972008-02-06 01:36:23 -08001705 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706 domain_reserve_special_ranges(domain);
1707
1708 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001709 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001710 if (guest_width > cap_mgaw(iommu->cap))
1711 guest_width = cap_mgaw(iommu->cap);
1712 domain->gaw = guest_width;
1713 adjust_width = guestwidth_to_adjustwidth(guest_width);
1714 agaw = width_to_agaw(adjust_width);
1715 sagaw = cap_sagaw(iommu->cap);
1716 if (!test_bit(agaw, &sagaw)) {
1717 /* hardware doesn't support it, choose a bigger one */
1718 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1719 agaw = find_next_bit(&sagaw, 5, agaw);
1720 if (agaw >= 5)
1721 return -ENODEV;
1722 }
1723 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724
Weidong Han8e6040972008-12-08 15:49:06 +08001725 if (ecap_coherent(iommu->ecap))
1726 domain->iommu_coherency = 1;
1727 else
1728 domain->iommu_coherency = 0;
1729
Sheng Yang58c610b2009-03-18 15:33:05 +08001730 if (ecap_sc_support(iommu->ecap))
1731 domain->iommu_snooping = 1;
1732 else
1733 domain->iommu_snooping = 0;
1734
David Woodhouse214e39a2014-03-19 10:38:49 +00001735 if (intel_iommu_superpage)
1736 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1737 else
1738 domain->iommu_superpage = 0;
1739
Suresh Siddha4c923d42009-10-02 11:01:24 -07001740 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001741
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001743 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744 if (!domain->pgd)
1745 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001746 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747 return 0;
1748}
1749
1750static void domain_exit(struct dmar_domain *domain)
1751{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001752 struct dmar_drhd_unit *drhd;
1753 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001754 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755
1756 /* Domain 0 is reserved, so dont process it */
1757 if (!domain)
1758 return;
1759
Alex Williamson7b668352011-05-24 12:02:41 +01001760 /* Flush any lazy unmaps that may reference this domain */
1761 if (!intel_iommu_strict)
1762 flush_unmaps_timeout(0);
1763
Jiang Liu92d03cc2014-02-19 14:07:28 +08001764 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001766
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767 /* destroy iovas */
1768 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001769
David Woodhouseea8ea462014-03-05 17:09:32 +00001770 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771
Jiang Liu92d03cc2014-02-19 14:07:28 +08001772 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001773 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001774 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001775 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001776 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001777
David Woodhouseea8ea462014-03-05 17:09:32 +00001778 dma_free_pagelist(freelist);
1779
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780 free_domain_mem(domain);
1781}
1782
David Woodhouse64ae8922014-03-09 12:52:30 -07001783static int domain_context_mapping_one(struct dmar_domain *domain,
1784 struct intel_iommu *iommu,
1785 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786{
1787 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001788 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001789 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001790 int id;
1791 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001792 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793
1794 pr_debug("Set context mapping for %02x:%02x.%d\n",
1795 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001796
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001798 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1799 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001800
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001801 context = device_to_context_entry(iommu, bus, devfn);
1802 if (!context)
1803 return -ENOMEM;
1804 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001805 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001806 spin_unlock_irqrestore(&iommu->lock, flags);
1807 return 0;
1808 }
1809
Weidong Hanea6606b2008-12-08 23:08:15 +08001810 id = domain->id;
1811 pgd = domain->pgd;
1812
Jiang Liuab8dfe22014-07-11 14:19:27 +08001813 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001814 if (domain_type_is_vm(domain)) {
1815 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001816 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001817 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001818 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001819 return -EFAULT;
1820 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001821 }
1822
1823 /* Skip top levels of page tables for
1824 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001825 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001826 */
Chris Wright1672af12009-12-02 12:06:34 -08001827 if (translation != CONTEXT_TT_PASS_THROUGH) {
1828 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1829 pgd = phys_to_virt(dma_pte_addr(pgd));
1830 if (!dma_pte_present(pgd)) {
1831 spin_unlock_irqrestore(&iommu->lock, flags);
1832 return -ENOMEM;
1833 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001834 }
1835 }
1836 }
1837
1838 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001839
Yu Zhao93a23a72009-05-18 13:51:37 +08001840 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001841 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001842 translation = info ? CONTEXT_TT_DEV_IOTLB :
1843 CONTEXT_TT_MULTI_LEVEL;
1844 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001845 /*
1846 * In pass through mode, AW must be programmed to indicate the largest
1847 * AGAW value supported by hardware. And ASR is ignored by hardware.
1848 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001849 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001850 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001851 else {
1852 context_set_address_root(context, virt_to_phys(pgd));
1853 context_set_address_width(context, iommu->agaw);
1854 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001855
1856 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001857 context_set_fault_enable(context);
1858 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001859 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001860
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001861 /*
1862 * It's a non-present to present mapping. If hardware doesn't cache
1863 * non-present entry we only need to flush the write-buffer. If the
1864 * _does_ cache non-present entries, then it does so in the special
1865 * domain #0, which we have to flush:
1866 */
1867 if (cap_caching_mode(iommu->cap)) {
1868 iommu->flush.flush_context(iommu, 0,
1869 (((u16)bus) << 8) | devfn,
1870 DMA_CCMD_MASK_NOBIT,
1871 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001872 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001873 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001875 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001876 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001878
Jiang Liufb170fb2014-07-11 14:19:28 +08001879 domain_attach_iommu(domain, iommu);
1880
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881 return 0;
1882}
1883
Alex Williamson579305f2014-07-03 09:51:43 -06001884struct domain_context_mapping_data {
1885 struct dmar_domain *domain;
1886 struct intel_iommu *iommu;
1887 int translation;
1888};
1889
1890static int domain_context_mapping_cb(struct pci_dev *pdev,
1891 u16 alias, void *opaque)
1892{
1893 struct domain_context_mapping_data *data = opaque;
1894
1895 return domain_context_mapping_one(data->domain, data->iommu,
1896 PCI_BUS_NUM(alias), alias & 0xff,
1897 data->translation);
1898}
1899
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001900static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001901domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1902 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903{
David Woodhouse64ae8922014-03-09 12:52:30 -07001904 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001905 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001906 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001907
David Woodhousee1f167f2014-03-09 15:24:46 -07001908 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001909 if (!iommu)
1910 return -ENODEV;
1911
Alex Williamson579305f2014-07-03 09:51:43 -06001912 if (!dev_is_pci(dev))
1913 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001914 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001915
1916 data.domain = domain;
1917 data.iommu = iommu;
1918 data.translation = translation;
1919
1920 return pci_for_each_dma_alias(to_pci_dev(dev),
1921 &domain_context_mapping_cb, &data);
1922}
1923
1924static int domain_context_mapped_cb(struct pci_dev *pdev,
1925 u16 alias, void *opaque)
1926{
1927 struct intel_iommu *iommu = opaque;
1928
1929 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930}
1931
David Woodhousee1f167f2014-03-09 15:24:46 -07001932static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933{
Weidong Han5331fe62008-12-08 23:00:00 +08001934 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001935 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001936
David Woodhousee1f167f2014-03-09 15:24:46 -07001937 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001938 if (!iommu)
1939 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940
Alex Williamson579305f2014-07-03 09:51:43 -06001941 if (!dev_is_pci(dev))
1942 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001943
Alex Williamson579305f2014-07-03 09:51:43 -06001944 return !pci_for_each_dma_alias(to_pci_dev(dev),
1945 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001946}
1947
Fenghua Yuf5329592009-08-04 15:09:37 -07001948/* Returns a number of VTD pages, but aligned to MM page size */
1949static inline unsigned long aligned_nrpages(unsigned long host_addr,
1950 size_t size)
1951{
1952 host_addr &= ~PAGE_MASK;
1953 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1954}
1955
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001956/* Return largest possible superpage level for a given mapping */
1957static inline int hardware_largepage_caps(struct dmar_domain *domain,
1958 unsigned long iov_pfn,
1959 unsigned long phy_pfn,
1960 unsigned long pages)
1961{
1962 int support, level = 1;
1963 unsigned long pfnmerge;
1964
1965 support = domain->iommu_superpage;
1966
1967 /* To use a large page, the virtual *and* physical addresses
1968 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1969 of them will mean we have to use smaller pages. So just
1970 merge them and check both at once. */
1971 pfnmerge = iov_pfn | phy_pfn;
1972
1973 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1974 pages >>= VTD_STRIDE_SHIFT;
1975 if (!pages)
1976 break;
1977 pfnmerge >>= VTD_STRIDE_SHIFT;
1978 level++;
1979 support--;
1980 }
1981 return level;
1982}
1983
David Woodhouse9051aa02009-06-29 12:30:54 +01001984static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1985 struct scatterlist *sg, unsigned long phys_pfn,
1986 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001987{
1988 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001989 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001990 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001991 unsigned int largepage_lvl = 0;
1992 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001993
Jiang Liu162d1b12014-07-11 14:19:35 +08001994 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001995
1996 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1997 return -EINVAL;
1998
1999 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2000
Jiang Liucc4f14a2014-11-26 09:42:10 +08002001 if (!sg) {
2002 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002003 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2004 }
2005
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002006 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002007 uint64_t tmp;
2008
David Woodhousee1605492009-06-29 11:17:38 +01002009 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002010 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002011 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2012 sg->dma_length = sg->length;
2013 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002015 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016
David Woodhousee1605492009-06-29 11:17:38 +01002017 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002018 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2019
David Woodhouse5cf0a762014-03-19 16:07:49 +00002020 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002021 if (!pte)
2022 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002023 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002024 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002025 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002026 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2027 /*
2028 * Ensure that old small page tables are
2029 * removed to make room for superpage,
2030 * if they exist.
2031 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002032 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002033 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002034 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002035 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002036 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002037
David Woodhousee1605492009-06-29 11:17:38 +01002038 }
2039 /* We don't need lock here, nobody else
2040 * touches the iova range
2041 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002042 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002043 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002044 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002045 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2046 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002047 if (dumps) {
2048 dumps--;
2049 debug_dma_dump_mappings(NULL);
2050 }
2051 WARN_ON(1);
2052 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002053
2054 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2055
2056 BUG_ON(nr_pages < lvl_pages);
2057 BUG_ON(sg_res < lvl_pages);
2058
2059 nr_pages -= lvl_pages;
2060 iov_pfn += lvl_pages;
2061 phys_pfn += lvl_pages;
2062 pteval += lvl_pages * VTD_PAGE_SIZE;
2063 sg_res -= lvl_pages;
2064
2065 /* If the next PTE would be the first in a new page, then we
2066 need to flush the cache on the entries we've just written.
2067 And then we'll need to recalculate 'pte', so clear it and
2068 let it get set again in the if (!pte) block above.
2069
2070 If we're done (!nr_pages) we need to flush the cache too.
2071
2072 Also if we've been setting superpages, we may need to
2073 recalculate 'pte' and switch back to smaller pages for the
2074 end of the mapping, if the trailing size is not enough to
2075 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002076 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002077 if (!nr_pages || first_pte_in_page(pte) ||
2078 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002079 domain_flush_cache(domain, first_pte,
2080 (void *)pte - (void *)first_pte);
2081 pte = NULL;
2082 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002083
2084 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002085 sg = sg_next(sg);
2086 }
2087 return 0;
2088}
2089
David Woodhouse9051aa02009-06-29 12:30:54 +01002090static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2091 struct scatterlist *sg, unsigned long nr_pages,
2092 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002093{
David Woodhouse9051aa02009-06-29 12:30:54 +01002094 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2095}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002096
David Woodhouse9051aa02009-06-29 12:30:54 +01002097static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2098 unsigned long phys_pfn, unsigned long nr_pages,
2099 int prot)
2100{
2101 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002102}
2103
Weidong Hanc7151a82008-12-08 22:51:37 +08002104static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002105{
Weidong Hanc7151a82008-12-08 22:51:37 +08002106 if (!iommu)
2107 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002108
2109 clear_context_table(iommu, bus, devfn);
2110 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002111 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002112 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113}
2114
David Woodhouse109b9b02012-05-25 17:43:02 +01002115static inline void unlink_domain_info(struct device_domain_info *info)
2116{
2117 assert_spin_locked(&device_domain_lock);
2118 list_del(&info->link);
2119 list_del(&info->global);
2120 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002121 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002122}
2123
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124static void domain_remove_dev_info(struct dmar_domain *domain)
2125{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002126 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002127 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128
2129 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002130 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002131 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002132 spin_unlock_irqrestore(&device_domain_lock, flags);
2133
Yu Zhao93a23a72009-05-18 13:51:37 +08002134 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002135 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136
Jiang Liuab8dfe22014-07-11 14:19:27 +08002137 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002138 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002139 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002140 }
2141
2142 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002143 spin_lock_irqsave(&device_domain_lock, flags);
2144 }
2145 spin_unlock_irqrestore(&device_domain_lock, flags);
2146}
2147
2148/*
2149 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002150 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151 */
David Woodhouse1525a292014-03-06 16:19:30 +00002152static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002153{
2154 struct device_domain_info *info;
2155
2156 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002157 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002158 if (info)
2159 return info->domain;
2160 return NULL;
2161}
2162
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002163static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002164dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2165{
2166 struct device_domain_info *info;
2167
2168 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002169 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002170 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002171 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002172
2173 return NULL;
2174}
2175
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002176static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002177 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002178 struct device *dev,
2179 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002180{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002181 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002182 struct device_domain_info *info;
2183 unsigned long flags;
2184
2185 info = alloc_devinfo_mem();
2186 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002187 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002188
Jiang Liu745f2582014-02-19 14:07:26 +08002189 info->bus = bus;
2190 info->devfn = devfn;
2191 info->dev = dev;
2192 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002193 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002194
2195 spin_lock_irqsave(&device_domain_lock, flags);
2196 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002197 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002198 else {
2199 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002200 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002201 if (info2)
2202 found = info2->domain;
2203 }
Jiang Liu745f2582014-02-19 14:07:26 +08002204 if (found) {
2205 spin_unlock_irqrestore(&device_domain_lock, flags);
2206 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002207 /* Caller must free the original domain */
2208 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002209 }
2210
David Woodhouseb718cd32014-03-09 13:11:33 -07002211 list_add(&info->link, &domain->devices);
2212 list_add(&info->global, &device_domain_list);
2213 if (dev)
2214 dev->archdata.iommu = info;
2215 spin_unlock_irqrestore(&device_domain_lock, flags);
2216
2217 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002218}
2219
Alex Williamson579305f2014-07-03 09:51:43 -06002220static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2221{
2222 *(u16 *)opaque = alias;
2223 return 0;
2224}
2225
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002226/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002227static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002228{
Alex Williamson579305f2014-07-03 09:51:43 -06002229 struct dmar_domain *domain, *tmp;
2230 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002231 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002232 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002233 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002234 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002235
David Woodhouse146922e2014-03-09 15:44:17 -07002236 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002237 if (domain)
2238 return domain;
2239
David Woodhouse146922e2014-03-09 15:44:17 -07002240 iommu = device_to_iommu(dev, &bus, &devfn);
2241 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002242 return NULL;
2243
2244 if (dev_is_pci(dev)) {
2245 struct pci_dev *pdev = to_pci_dev(dev);
2246
2247 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2248
2249 spin_lock_irqsave(&device_domain_lock, flags);
2250 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2251 PCI_BUS_NUM(dma_alias),
2252 dma_alias & 0xff);
2253 if (info) {
2254 iommu = info->iommu;
2255 domain = info->domain;
2256 }
2257 spin_unlock_irqrestore(&device_domain_lock, flags);
2258
2259 /* DMA alias already has a domain, uses it */
2260 if (info)
2261 goto found_domain;
2262 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002263
David Woodhouse146922e2014-03-09 15:44:17 -07002264 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002265 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002266 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002267 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002268 domain->id = iommu_attach_domain(domain, iommu);
2269 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002270 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002271 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002272 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002273 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002274 if (domain_init(domain, gaw)) {
2275 domain_exit(domain);
2276 return NULL;
2277 }
2278
2279 /* register PCI DMA alias device */
2280 if (dev_is_pci(dev)) {
2281 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2282 dma_alias & 0xff, NULL, domain);
2283
2284 if (!tmp || tmp != domain) {
2285 domain_exit(domain);
2286 domain = tmp;
2287 }
2288
David Woodhouseb718cd32014-03-09 13:11:33 -07002289 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002290 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002291 }
2292
2293found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002294 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2295
2296 if (!tmp || tmp != domain) {
2297 domain_exit(domain);
2298 domain = tmp;
2299 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002300
2301 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002302}
2303
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002304static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002305#define IDENTMAP_ALL 1
2306#define IDENTMAP_GFX 2
2307#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002308
David Woodhouseb2132032009-06-26 18:50:28 +01002309static int iommu_domain_identity_map(struct dmar_domain *domain,
2310 unsigned long long start,
2311 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312{
David Woodhousec5395d52009-06-28 16:35:56 +01002313 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2314 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002315
David Woodhousec5395d52009-06-28 16:35:56 +01002316 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2317 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002318 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002319 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002320 }
2321
David Woodhousec5395d52009-06-28 16:35:56 +01002322 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2323 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002324 /*
2325 * RMRR range might have overlap with physical memory range,
2326 * clear it first
2327 */
David Woodhousec5395d52009-06-28 16:35:56 +01002328 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002329
David Woodhousec5395d52009-06-28 16:35:56 +01002330 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2331 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002332 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002333}
2334
David Woodhouse0b9d9752014-03-09 15:48:15 -07002335static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002336 unsigned long long start,
2337 unsigned long long end)
2338{
2339 struct dmar_domain *domain;
2340 int ret;
2341
David Woodhouse0b9d9752014-03-09 15:48:15 -07002342 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002343 if (!domain)
2344 return -ENOMEM;
2345
David Woodhouse19943b02009-08-04 16:19:20 +01002346 /* For _hardware_ passthrough, don't bother. But for software
2347 passthrough, we do it anyway -- it may indicate a memory
2348 range which is reserved in E820, so which didn't get set
2349 up to start with in si_domain */
2350 if (domain == si_domain && hw_pass_through) {
2351 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002352 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002353 return 0;
2354 }
2355
2356 printk(KERN_INFO
2357 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002358 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002359
David Woodhouse5595b522009-12-02 09:21:55 +00002360 if (end < start) {
2361 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2362 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2363 dmi_get_system_info(DMI_BIOS_VENDOR),
2364 dmi_get_system_info(DMI_BIOS_VERSION),
2365 dmi_get_system_info(DMI_PRODUCT_VERSION));
2366 ret = -EIO;
2367 goto error;
2368 }
2369
David Woodhouse2ff729f2009-08-26 14:25:41 +01002370 if (end >> agaw_to_width(domain->agaw)) {
2371 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2372 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2373 agaw_to_width(domain->agaw),
2374 dmi_get_system_info(DMI_BIOS_VENDOR),
2375 dmi_get_system_info(DMI_BIOS_VERSION),
2376 dmi_get_system_info(DMI_PRODUCT_VERSION));
2377 ret = -EIO;
2378 goto error;
2379 }
David Woodhouse19943b02009-08-04 16:19:20 +01002380
David Woodhouseb2132032009-06-26 18:50:28 +01002381 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382 if (ret)
2383 goto error;
2384
2385 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002386 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002387 if (ret)
2388 goto error;
2389
2390 return 0;
2391
2392 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393 domain_exit(domain);
2394 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002395}
2396
2397static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002398 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002400 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002401 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002402 return iommu_prepare_identity_map(dev, rmrr->base_address,
2403 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002404}
2405
Suresh Siddhad3f13812011-08-23 17:05:25 -07002406#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002407static inline void iommu_prepare_isa(void)
2408{
2409 struct pci_dev *pdev;
2410 int ret;
2411
2412 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2413 if (!pdev)
2414 return;
2415
David Woodhousec7ab48d2009-06-26 19:10:36 +01002416 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002417 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002418
2419 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002420 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2421 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002422
Yijing Wang9b27e822014-05-20 20:37:52 +08002423 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002424}
2425#else
2426static inline void iommu_prepare_isa(void)
2427{
2428 return;
2429}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002430#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002431
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002432static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002433
Matt Kraai071e1372009-08-23 22:30:22 -07002434static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002435{
2436 struct dmar_drhd_unit *drhd;
2437 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002438 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002439 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002440
Jiang Liuab8dfe22014-07-11 14:19:27 +08002441 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002442 if (!si_domain)
2443 return -EFAULT;
2444
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002445 for_each_active_iommu(iommu, drhd) {
2446 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002447 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002448 domain_exit(si_domain);
2449 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002450 } else if (first) {
2451 si_domain->id = ret;
2452 first = false;
2453 } else if (si_domain->id != ret) {
2454 domain_exit(si_domain);
2455 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002456 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002457 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002458 }
2459
2460 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2461 domain_exit(si_domain);
2462 return -EFAULT;
2463 }
2464
Jiang Liu9544c002014-01-06 14:18:13 +08002465 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2466 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002467
David Woodhouse19943b02009-08-04 16:19:20 +01002468 if (hw)
2469 return 0;
2470
David Woodhousec7ab48d2009-06-26 19:10:36 +01002471 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002472 unsigned long start_pfn, end_pfn;
2473 int i;
2474
2475 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2476 ret = iommu_domain_identity_map(si_domain,
2477 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2478 if (ret)
2479 return ret;
2480 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002481 }
2482
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002483 return 0;
2484}
2485
David Woodhouse9b226622014-03-09 14:03:28 -07002486static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487{
2488 struct device_domain_info *info;
2489
2490 if (likely(!iommu_identity_mapping))
2491 return 0;
2492
David Woodhouse9b226622014-03-09 14:03:28 -07002493 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002494 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2495 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002496
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002497 return 0;
2498}
2499
2500static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002501 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002502{
David Woodhouse0ac72662014-03-09 13:19:22 -07002503 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002504 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002505 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002506 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002507
David Woodhouse5913c9b2014-03-09 16:27:31 -07002508 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002509 if (!iommu)
2510 return -ENODEV;
2511
David Woodhouse5913c9b2014-03-09 16:27:31 -07002512 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002513 if (ndomain != domain)
2514 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002515
David Woodhouse5913c9b2014-03-09 16:27:31 -07002516 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002517 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002518 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002519 return ret;
2520 }
2521
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002522 return 0;
2523}
2524
David Woodhouse0b9d9752014-03-09 15:48:15 -07002525static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002526{
2527 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002528 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002529 int i;
2530
Jiang Liu0e242612014-02-19 14:07:34 +08002531 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002532 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002533 /*
2534 * Return TRUE if this RMRR contains the device that
2535 * is passed in.
2536 */
2537 for_each_active_dev_scope(rmrr->devices,
2538 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002539 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002540 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002541 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002542 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002543 }
Jiang Liu0e242612014-02-19 14:07:34 +08002544 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002545 return false;
2546}
2547
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002548/*
2549 * There are a couple cases where we need to restrict the functionality of
2550 * devices associated with RMRRs. The first is when evaluating a device for
2551 * identity mapping because problems exist when devices are moved in and out
2552 * of domains and their respective RMRR information is lost. This means that
2553 * a device with associated RMRRs will never be in a "passthrough" domain.
2554 * The second is use of the device through the IOMMU API. This interface
2555 * expects to have full control of the IOVA space for the device. We cannot
2556 * satisfy both the requirement that RMRR access is maintained and have an
2557 * unencumbered IOVA space. We also have no ability to quiesce the device's
2558 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2559 * We therefore prevent devices associated with an RMRR from participating in
2560 * the IOMMU API, which eliminates them from device assignment.
2561 *
2562 * In both cases we assume that PCI USB devices with RMRRs have them largely
2563 * for historical reasons and that the RMRR space is not actively used post
2564 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002565 *
2566 * The same exception is made for graphics devices, with the requirement that
2567 * any use of the RMRR regions will be torn down before assigning the device
2568 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002569 */
2570static bool device_is_rmrr_locked(struct device *dev)
2571{
2572 if (!device_has_rmrr(dev))
2573 return false;
2574
2575 if (dev_is_pci(dev)) {
2576 struct pci_dev *pdev = to_pci_dev(dev);
2577
David Woodhouse18436af2015-03-25 15:05:47 +00002578 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002579 return false;
2580 }
2581
2582 return true;
2583}
2584
David Woodhouse3bdb2592014-03-09 16:03:08 -07002585static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002586{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002587
David Woodhouse3bdb2592014-03-09 16:03:08 -07002588 if (dev_is_pci(dev)) {
2589 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002590
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002591 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002592 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002593
David Woodhouse3bdb2592014-03-09 16:03:08 -07002594 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2595 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002596
David Woodhouse3bdb2592014-03-09 16:03:08 -07002597 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2598 return 1;
2599
2600 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2601 return 0;
2602
2603 /*
2604 * We want to start off with all devices in the 1:1 domain, and
2605 * take them out later if we find they can't access all of memory.
2606 *
2607 * However, we can't do this for PCI devices behind bridges,
2608 * because all PCI devices behind the same bridge will end up
2609 * with the same source-id on their transactions.
2610 *
2611 * Practically speaking, we can't change things around for these
2612 * devices at run-time, because we can't be sure there'll be no
2613 * DMA transactions in flight for any of their siblings.
2614 *
2615 * So PCI devices (unless they're on the root bus) as well as
2616 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2617 * the 1:1 domain, just in _case_ one of their siblings turns out
2618 * not to be able to map all of memory.
2619 */
2620 if (!pci_is_pcie(pdev)) {
2621 if (!pci_is_root_bus(pdev->bus))
2622 return 0;
2623 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2624 return 0;
2625 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2626 return 0;
2627 } else {
2628 if (device_has_rmrr(dev))
2629 return 0;
2630 }
David Woodhouse6941af22009-07-04 18:24:27 +01002631
David Woodhouse3dfc8132009-07-04 19:11:08 +01002632 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002633 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002634 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002635 * take them out of the 1:1 domain later.
2636 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002637 if (!startup) {
2638 /*
2639 * If the device's dma_mask is less than the system's memory
2640 * size then this is not a candidate for identity mapping.
2641 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002642 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002643
David Woodhouse3bdb2592014-03-09 16:03:08 -07002644 if (dev->coherent_dma_mask &&
2645 dev->coherent_dma_mask < dma_mask)
2646 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002647
David Woodhouse3bdb2592014-03-09 16:03:08 -07002648 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002649 }
David Woodhouse6941af22009-07-04 18:24:27 +01002650
2651 return 1;
2652}
2653
David Woodhousecf04eee2014-03-21 16:49:04 +00002654static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2655{
2656 int ret;
2657
2658 if (!iommu_should_identity_map(dev, 1))
2659 return 0;
2660
2661 ret = domain_add_dev_info(si_domain, dev,
2662 hw ? CONTEXT_TT_PASS_THROUGH :
2663 CONTEXT_TT_MULTI_LEVEL);
2664 if (!ret)
2665 pr_info("IOMMU: %s identity mapping for device %s\n",
2666 hw ? "hardware" : "software", dev_name(dev));
2667 else if (ret == -ENODEV)
2668 /* device not associated with an iommu */
2669 ret = 0;
2670
2671 return ret;
2672}
2673
2674
Matt Kraai071e1372009-08-23 22:30:22 -07002675static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002676{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002677 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002678 struct dmar_drhd_unit *drhd;
2679 struct intel_iommu *iommu;
2680 struct device *dev;
2681 int i;
2682 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002683
David Woodhouse19943b02009-08-04 16:19:20 +01002684 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002685 if (ret)
2686 return -EFAULT;
2687
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002688 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002689 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2690 if (ret)
2691 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002692 }
2693
David Woodhousecf04eee2014-03-21 16:49:04 +00002694 for_each_active_iommu(iommu, drhd)
2695 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2696 struct acpi_device_physical_node *pn;
2697 struct acpi_device *adev;
2698
2699 if (dev->bus != &acpi_bus_type)
2700 continue;
2701
2702 adev= to_acpi_device(dev);
2703 mutex_lock(&adev->physical_node_lock);
2704 list_for_each_entry(pn, &adev->physical_node_list, node) {
2705 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2706 if (ret)
2707 break;
2708 }
2709 mutex_unlock(&adev->physical_node_lock);
2710 if (ret)
2711 return ret;
2712 }
2713
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002714 return 0;
2715}
2716
Jiang Liuffebeb42014-11-09 22:48:02 +08002717static void intel_iommu_init_qi(struct intel_iommu *iommu)
2718{
2719 /*
2720 * Start from the sane iommu hardware state.
2721 * If the queued invalidation is already initialized by us
2722 * (for example, while enabling interrupt-remapping) then
2723 * we got the things already rolling from a sane state.
2724 */
2725 if (!iommu->qi) {
2726 /*
2727 * Clear any previous faults.
2728 */
2729 dmar_fault(-1, iommu);
2730 /*
2731 * Disable queued invalidation if supported and already enabled
2732 * before OS handover.
2733 */
2734 dmar_disable_qi(iommu);
2735 }
2736
2737 if (dmar_enable_qi(iommu)) {
2738 /*
2739 * Queued Invalidate not enabled, use Register Based Invalidate
2740 */
2741 iommu->flush.flush_context = __iommu_flush_context;
2742 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2743 pr_info("IOMMU: %s using Register based invalidation\n",
2744 iommu->name);
2745 } else {
2746 iommu->flush.flush_context = qi_flush_context;
2747 iommu->flush.flush_iotlb = qi_flush_iotlb;
2748 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2749 }
2750}
2751
Joseph Cihulab7792602011-05-03 00:08:37 -07002752static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002753{
2754 struct dmar_drhd_unit *drhd;
2755 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002756 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002758 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759
2760 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761 * for each drhd
2762 * allocate root
2763 * initialize and program root entry to not present
2764 * endfor
2765 */
2766 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002767 /*
2768 * lock not needed as this is only incremented in the single
2769 * threaded kernel __init code path all other access are read
2770 * only
2771 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002772 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002773 g_num_of_iommus++;
2774 continue;
2775 }
2776 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002777 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002778 }
2779
Jiang Liuffebeb42014-11-09 22:48:02 +08002780 /* Preallocate enough resources for IOMMU hot-addition */
2781 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2782 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2783
Weidong Hand9630fe2008-12-08 11:06:32 +08002784 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2785 GFP_KERNEL);
2786 if (!g_iommus) {
2787 printk(KERN_ERR "Allocating global iommu array failed\n");
2788 ret = -ENOMEM;
2789 goto error;
2790 }
2791
mark gross80b20dd2008-04-18 13:53:58 -07002792 deferred_flush = kzalloc(g_num_of_iommus *
2793 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2794 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002795 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002796 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002797 }
2798
Jiang Liu7c919772014-01-06 14:18:18 +08002799 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002800 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002801
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002802 ret = iommu_init_domains(iommu);
2803 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002804 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002805
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002806 /*
2807 * TBD:
2808 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002809 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002810 */
2811 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002812 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002813 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002814 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002815 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002816 }
2817
Jiang Liuffebeb42014-11-09 22:48:02 +08002818 for_each_active_iommu(iommu, drhd)
2819 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002820
David Woodhouse19943b02009-08-04 16:19:20 +01002821 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002822 iommu_identity_mapping |= IDENTMAP_ALL;
2823
Suresh Siddhad3f13812011-08-23 17:05:25 -07002824#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002825 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002826#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002827
2828 check_tylersburg_isoch();
2829
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002830 /*
2831 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002832 * identity mappings for rmrr, gfx, and isa and may fall back to static
2833 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002834 */
David Woodhouse19943b02009-08-04 16:19:20 +01002835 if (iommu_identity_mapping) {
2836 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2837 if (ret) {
2838 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002839 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002840 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002841 }
David Woodhouse19943b02009-08-04 16:19:20 +01002842 /*
2843 * For each rmrr
2844 * for each dev attached to rmrr
2845 * do
2846 * locate drhd for dev, alloc domain for dev
2847 * allocate free domain
2848 * allocate page table entries for rmrr
2849 * if context not allocated for bus
2850 * allocate and init context
2851 * set present in root table for this bus
2852 * init context with domain, translation etc
2853 * endfor
2854 * endfor
2855 */
2856 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2857 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002858 /* some BIOS lists non-exist devices in DMAR table. */
2859 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002860 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002861 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002862 if (ret)
2863 printk(KERN_ERR
2864 "IOMMU: mapping reserved region failed\n");
2865 }
2866 }
2867
2868 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870 /*
2871 * for each drhd
2872 * enable fault log
2873 * global invalidate context cache
2874 * global invalidate iotlb
2875 * enable translation
2876 */
Jiang Liu7c919772014-01-06 14:18:18 +08002877 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002878 if (drhd->ignored) {
2879 /*
2880 * we always have to disable PMRs or DMA may fail on
2881 * this device
2882 */
2883 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002884 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002885 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002886 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002887
2888 iommu_flush_write_buffer(iommu);
2889
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002890 ret = dmar_set_interrupt(iommu);
2891 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002892 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002893
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002894 iommu_set_root_entry(iommu);
2895
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002896 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002897 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002898 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002899 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002900 }
2901
2902 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002903
2904free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002905 for_each_active_iommu(iommu, drhd) {
2906 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002907 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002908 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002909 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002910free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002911 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002912error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002913 return ret;
2914}
2915
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002916/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002917static struct iova *intel_alloc_iova(struct device *dev,
2918 struct dmar_domain *domain,
2919 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002920{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002921 struct iova *iova = NULL;
2922
David Woodhouse875764d2009-06-28 21:20:51 +01002923 /* Restrict dma_mask to the width that the iommu can handle */
2924 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2925
2926 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002927 /*
2928 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002929 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002930 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002931 */
David Woodhouse875764d2009-06-28 21:20:51 +01002932 iova = alloc_iova(&domain->iovad, nrpages,
2933 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2934 if (iova)
2935 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002936 }
David Woodhouse875764d2009-06-28 21:20:51 +01002937 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2938 if (unlikely(!iova)) {
2939 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002940 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002941 return NULL;
2942 }
2943
2944 return iova;
2945}
2946
David Woodhoused4b709f2014-03-09 16:07:40 -07002947static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948{
2949 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002950 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002951
David Woodhoused4b709f2014-03-09 16:07:40 -07002952 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002953 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002954 printk(KERN_ERR "Allocating domain for %s failed",
2955 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002956 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002957 }
2958
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002960 if (unlikely(!domain_context_mapped(dev))) {
2961 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002962 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002963 printk(KERN_ERR "Domain context map for %s failed",
2964 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002965 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002966 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 }
2968
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002969 return domain;
2970}
2971
David Woodhoused4b709f2014-03-09 16:07:40 -07002972static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002973{
2974 struct device_domain_info *info;
2975
2976 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002977 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002978 if (likely(info))
2979 return info->domain;
2980
2981 return __get_valid_domain_for_dev(dev);
2982}
2983
David Woodhouse3d891942014-03-06 15:59:26 +00002984static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002985{
David Woodhouse3d891942014-03-06 15:59:26 +00002986 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002987}
2988
David Woodhouseecb509e2014-03-09 16:29:55 -07002989/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002990static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002991{
2992 int found;
2993
David Woodhouse3d891942014-03-06 15:59:26 +00002994 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002995 return 1;
2996
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002997 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002998 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002999
David Woodhouse9b226622014-03-09 14:03:28 -07003000 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003001 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003002 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003003 return 1;
3004 else {
3005 /*
3006 * 32 bit DMA is removed from si_domain and fall back
3007 * to non-identity mapping.
3008 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003009 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003010 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003011 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003012 return 0;
3013 }
3014 } else {
3015 /*
3016 * In case of a detached 64 bit DMA device from vm, the device
3017 * is put into si_domain for identity mapping.
3018 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003019 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003020 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003021 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003022 hw_pass_through ?
3023 CONTEXT_TT_PASS_THROUGH :
3024 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003025 if (!ret) {
3026 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003027 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003028 return 1;
3029 }
3030 }
3031 }
3032
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003033 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003034}
3035
David Woodhouse5040a912014-03-09 16:14:00 -07003036static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003037 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003039 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003040 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003041 struct iova *iova;
3042 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003043 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003044 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003045 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003046
3047 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003048
David Woodhouse5040a912014-03-09 16:14:00 -07003049 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003050 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051
David Woodhouse5040a912014-03-09 16:14:00 -07003052 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003053 if (!domain)
3054 return 0;
3055
Weidong Han8c11e792008-12-08 15:29:22 +08003056 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003057 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003058
David Woodhouse5040a912014-03-09 16:14:00 -07003059 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003060 if (!iova)
3061 goto error;
3062
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003063 /*
3064 * Check if DMAR supports zero-length reads on write only
3065 * mappings..
3066 */
3067 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003068 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069 prot |= DMA_PTE_READ;
3070 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3071 prot |= DMA_PTE_WRITE;
3072 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003073 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003074 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003075 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003076 * is not a big problem
3077 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003078 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003079 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003080 if (ret)
3081 goto error;
3082
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003083 /* it's a non-present to present mapping. Only flush if caching mode */
3084 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003085 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003086 else
Weidong Han8c11e792008-12-08 15:29:22 +08003087 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003088
David Woodhouse03d6a242009-06-28 15:33:46 +01003089 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3090 start_paddr += paddr & ~PAGE_MASK;
3091 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003092
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003093error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003094 if (iova)
3095 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003096 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003097 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003098 return 0;
3099}
3100
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003101static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3102 unsigned long offset, size_t size,
3103 enum dma_data_direction dir,
3104 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003105{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003106 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003107 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003108}
3109
mark gross5e0d2a62008-03-04 15:22:08 -08003110static void flush_unmaps(void)
3111{
mark gross80b20dd2008-04-18 13:53:58 -07003112 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003113
mark gross5e0d2a62008-03-04 15:22:08 -08003114 timer_on = 0;
3115
3116 /* just flush them all */
3117 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003118 struct intel_iommu *iommu = g_iommus[i];
3119 if (!iommu)
3120 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003121
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003122 if (!deferred_flush[i].next)
3123 continue;
3124
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003125 /* In caching mode, global flushes turn emulation expensive */
3126 if (!cap_caching_mode(iommu->cap))
3127 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003128 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003129 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003130 unsigned long mask;
3131 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003132 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003133
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003134 /* On real hardware multiple invalidations are expensive */
3135 if (cap_caching_mode(iommu->cap))
3136 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003137 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003138 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003139 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003140 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003141 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3142 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3143 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003144 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003145 if (deferred_flush[i].freelist[j])
3146 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003147 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003148 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003149 }
3150
mark gross5e0d2a62008-03-04 15:22:08 -08003151 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003152}
3153
3154static void flush_unmaps_timeout(unsigned long data)
3155{
mark gross80b20dd2008-04-18 13:53:58 -07003156 unsigned long flags;
3157
3158 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003159 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003160 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003161}
3162
David Woodhouseea8ea462014-03-05 17:09:32 +00003163static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003164{
3165 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003166 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003167 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003168
3169 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003170 if (list_size == HIGH_WATER_MARK)
3171 flush_unmaps();
3172
Weidong Han8c11e792008-12-08 15:29:22 +08003173 iommu = domain_get_iommu(dom);
3174 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003175
mark gross80b20dd2008-04-18 13:53:58 -07003176 next = deferred_flush[iommu_id].next;
3177 deferred_flush[iommu_id].domain[next] = dom;
3178 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003179 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003180 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003181
3182 if (!timer_on) {
3183 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3184 timer_on = 1;
3185 }
3186 list_size++;
3187 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3188}
3189
Jiang Liud41a4ad2014-07-11 14:19:34 +08003190static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003191{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003192 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003193 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003194 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003195 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003196 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003197
David Woodhouse73676832009-07-04 14:08:36 +01003198 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003199 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003200
David Woodhouse1525a292014-03-06 16:19:30 +00003201 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003202 BUG_ON(!domain);
3203
Weidong Han8c11e792008-12-08 15:29:22 +08003204 iommu = domain_get_iommu(domain);
3205
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003206 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003207 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3208 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003209 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003210
David Woodhoused794dc92009-06-28 00:27:49 +01003211 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3212 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003213
David Woodhoused794dc92009-06-28 00:27:49 +01003214 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003215 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003216
David Woodhouseea8ea462014-03-05 17:09:32 +00003217 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003218
mark gross5e0d2a62008-03-04 15:22:08 -08003219 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003220 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003221 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003222 /* free iova */
3223 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003224 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003225 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003226 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003227 /*
3228 * queue up the release of the unmap to save the 1/6th of the
3229 * cpu used up by the iotlb flush operation...
3230 */
mark gross5e0d2a62008-03-04 15:22:08 -08003231 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232}
3233
Jiang Liud41a4ad2014-07-11 14:19:34 +08003234static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3235 size_t size, enum dma_data_direction dir,
3236 struct dma_attrs *attrs)
3237{
3238 intel_unmap(dev, dev_addr);
3239}
3240
David Woodhouse5040a912014-03-09 16:14:00 -07003241static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003242 dma_addr_t *dma_handle, gfp_t flags,
3243 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244{
Akinobu Mita36746432014-06-04 16:06:51 -07003245 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003246 int order;
3247
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003248 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003249 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003250
David Woodhouse5040a912014-03-09 16:14:00 -07003251 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003252 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003253 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3254 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003255 flags |= GFP_DMA;
3256 else
3257 flags |= GFP_DMA32;
3258 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003259
Akinobu Mita36746432014-06-04 16:06:51 -07003260 if (flags & __GFP_WAIT) {
3261 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262
Akinobu Mita36746432014-06-04 16:06:51 -07003263 page = dma_alloc_from_contiguous(dev, count, order);
3264 if (page && iommu_no_mapping(dev) &&
3265 page_to_phys(page) + size > dev->coherent_dma_mask) {
3266 dma_release_from_contiguous(dev, page, count);
3267 page = NULL;
3268 }
3269 }
3270
3271 if (!page)
3272 page = alloc_pages(flags, order);
3273 if (!page)
3274 return NULL;
3275 memset(page_address(page), 0, size);
3276
3277 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003278 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003279 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003281 return page_address(page);
3282 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3283 __free_pages(page, order);
3284
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003285 return NULL;
3286}
3287
David Woodhouse5040a912014-03-09 16:14:00 -07003288static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003289 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003290{
3291 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003292 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003293
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003294 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003295 order = get_order(size);
3296
Jiang Liud41a4ad2014-07-11 14:19:34 +08003297 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003298 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3299 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300}
3301
David Woodhouse5040a912014-03-09 16:14:00 -07003302static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003303 int nelems, enum dma_data_direction dir,
3304 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003305{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003306 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003307}
3308
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003309static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003310 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003311{
3312 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003313 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003315 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003316 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003317 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003318 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003319 }
3320 return nelems;
3321}
3322
David Woodhouse5040a912014-03-09 16:14:00 -07003323static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003324 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003325{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003326 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003327 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003328 size_t size = 0;
3329 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003330 struct iova *iova = NULL;
3331 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003332 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003333 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003334 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003335
3336 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003337 if (iommu_no_mapping(dev))
3338 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003339
David Woodhouse5040a912014-03-09 16:14:00 -07003340 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003341 if (!domain)
3342 return 0;
3343
Weidong Han8c11e792008-12-08 15:29:22 +08003344 iommu = domain_get_iommu(domain);
3345
David Woodhouseb536d242009-06-28 14:49:31 +01003346 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003347 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003348
David Woodhouse5040a912014-03-09 16:14:00 -07003349 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3350 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003351 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003352 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003353 return 0;
3354 }
3355
3356 /*
3357 * Check if DMAR supports zero-length reads on write only
3358 * mappings..
3359 */
3360 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003361 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003362 prot |= DMA_PTE_READ;
3363 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3364 prot |= DMA_PTE_WRITE;
3365
David Woodhouseb536d242009-06-28 14:49:31 +01003366 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003367
Fenghua Yuf5329592009-08-04 15:09:37 -07003368 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003369 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003370 dma_pte_free_pagetable(domain, start_vpfn,
3371 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003372 __free_iova(&domain->iovad, iova);
3373 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003374 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003375
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003376 /* it's a non-present to present mapping. Only flush if caching mode */
3377 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003378 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003379 else
Weidong Han8c11e792008-12-08 15:29:22 +08003380 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003381
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003382 return nelems;
3383}
3384
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003385static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3386{
3387 return !dma_addr;
3388}
3389
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003390struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003391 .alloc = intel_alloc_coherent,
3392 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003393 .map_sg = intel_map_sg,
3394 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003395 .map_page = intel_map_page,
3396 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003397 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003398};
3399
3400static inline int iommu_domain_cache_init(void)
3401{
3402 int ret = 0;
3403
3404 iommu_domain_cache = kmem_cache_create("iommu_domain",
3405 sizeof(struct dmar_domain),
3406 0,
3407 SLAB_HWCACHE_ALIGN,
3408
3409 NULL);
3410 if (!iommu_domain_cache) {
3411 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3412 ret = -ENOMEM;
3413 }
3414
3415 return ret;
3416}
3417
3418static inline int iommu_devinfo_cache_init(void)
3419{
3420 int ret = 0;
3421
3422 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3423 sizeof(struct device_domain_info),
3424 0,
3425 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003426 NULL);
3427 if (!iommu_devinfo_cache) {
3428 printk(KERN_ERR "Couldn't create devinfo cache\n");
3429 ret = -ENOMEM;
3430 }
3431
3432 return ret;
3433}
3434
3435static inline int iommu_iova_cache_init(void)
3436{
3437 int ret = 0;
3438
3439 iommu_iova_cache = kmem_cache_create("iommu_iova",
3440 sizeof(struct iova),
3441 0,
3442 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003443 NULL);
3444 if (!iommu_iova_cache) {
3445 printk(KERN_ERR "Couldn't create iova cache\n");
3446 ret = -ENOMEM;
3447 }
3448
3449 return ret;
3450}
3451
3452static int __init iommu_init_mempool(void)
3453{
3454 int ret;
3455 ret = iommu_iova_cache_init();
3456 if (ret)
3457 return ret;
3458
3459 ret = iommu_domain_cache_init();
3460 if (ret)
3461 goto domain_error;
3462
3463 ret = iommu_devinfo_cache_init();
3464 if (!ret)
3465 return ret;
3466
3467 kmem_cache_destroy(iommu_domain_cache);
3468domain_error:
3469 kmem_cache_destroy(iommu_iova_cache);
3470
3471 return -ENOMEM;
3472}
3473
3474static void __init iommu_exit_mempool(void)
3475{
3476 kmem_cache_destroy(iommu_devinfo_cache);
3477 kmem_cache_destroy(iommu_domain_cache);
3478 kmem_cache_destroy(iommu_iova_cache);
3479
3480}
3481
Dan Williams556ab452010-07-23 15:47:56 -07003482static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3483{
3484 struct dmar_drhd_unit *drhd;
3485 u32 vtbar;
3486 int rc;
3487
3488 /* We know that this device on this chipset has its own IOMMU.
3489 * If we find it under a different IOMMU, then the BIOS is lying
3490 * to us. Hope that the IOMMU for this device is actually
3491 * disabled, and it needs no translation...
3492 */
3493 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3494 if (rc) {
3495 /* "can't" happen */
3496 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3497 return;
3498 }
3499 vtbar &= 0xffff0000;
3500
3501 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3502 drhd = dmar_find_matched_drhd_unit(pdev);
3503 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3504 TAINT_FIRMWARE_WORKAROUND,
3505 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3506 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3507}
3508DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3509
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003510static void __init init_no_remapping_devices(void)
3511{
3512 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003513 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003514 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003515
3516 for_each_drhd_unit(drhd) {
3517 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003518 for_each_active_dev_scope(drhd->devices,
3519 drhd->devices_cnt, i, dev)
3520 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003521 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003522 if (i == drhd->devices_cnt)
3523 drhd->ignored = 1;
3524 }
3525 }
3526
Jiang Liu7c919772014-01-06 14:18:18 +08003527 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003528 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003529 continue;
3530
Jiang Liub683b232014-02-19 14:07:32 +08003531 for_each_active_dev_scope(drhd->devices,
3532 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003533 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003534 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003535 if (i < drhd->devices_cnt)
3536 continue;
3537
David Woodhousec0771df2011-10-14 20:59:46 +01003538 /* This IOMMU has *only* gfx devices. Either bypass it or
3539 set the gfx_mapped flag, as appropriate */
3540 if (dmar_map_gfx) {
3541 intel_iommu_gfx_mapped = 1;
3542 } else {
3543 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003544 for_each_active_dev_scope(drhd->devices,
3545 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003546 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003547 }
3548 }
3549}
3550
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003551#ifdef CONFIG_SUSPEND
3552static int init_iommu_hw(void)
3553{
3554 struct dmar_drhd_unit *drhd;
3555 struct intel_iommu *iommu = NULL;
3556
3557 for_each_active_iommu(iommu, drhd)
3558 if (iommu->qi)
3559 dmar_reenable_qi(iommu);
3560
Joseph Cihulab7792602011-05-03 00:08:37 -07003561 for_each_iommu(iommu, drhd) {
3562 if (drhd->ignored) {
3563 /*
3564 * we always have to disable PMRs or DMA may fail on
3565 * this device
3566 */
3567 if (force_on)
3568 iommu_disable_protect_mem_regions(iommu);
3569 continue;
3570 }
3571
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003572 iommu_flush_write_buffer(iommu);
3573
3574 iommu_set_root_entry(iommu);
3575
3576 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003577 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003578 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3579 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003580 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003581 }
3582
3583 return 0;
3584}
3585
3586static void iommu_flush_all(void)
3587{
3588 struct dmar_drhd_unit *drhd;
3589 struct intel_iommu *iommu;
3590
3591 for_each_active_iommu(iommu, drhd) {
3592 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003593 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003594 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003595 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003596 }
3597}
3598
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003599static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003600{
3601 struct dmar_drhd_unit *drhd;
3602 struct intel_iommu *iommu = NULL;
3603 unsigned long flag;
3604
3605 for_each_active_iommu(iommu, drhd) {
3606 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3607 GFP_ATOMIC);
3608 if (!iommu->iommu_state)
3609 goto nomem;
3610 }
3611
3612 iommu_flush_all();
3613
3614 for_each_active_iommu(iommu, drhd) {
3615 iommu_disable_translation(iommu);
3616
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003617 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003618
3619 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3620 readl(iommu->reg + DMAR_FECTL_REG);
3621 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3622 readl(iommu->reg + DMAR_FEDATA_REG);
3623 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3624 readl(iommu->reg + DMAR_FEADDR_REG);
3625 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3626 readl(iommu->reg + DMAR_FEUADDR_REG);
3627
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003628 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003629 }
3630 return 0;
3631
3632nomem:
3633 for_each_active_iommu(iommu, drhd)
3634 kfree(iommu->iommu_state);
3635
3636 return -ENOMEM;
3637}
3638
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003639static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003640{
3641 struct dmar_drhd_unit *drhd;
3642 struct intel_iommu *iommu = NULL;
3643 unsigned long flag;
3644
3645 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003646 if (force_on)
3647 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3648 else
3649 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003650 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003651 }
3652
3653 for_each_active_iommu(iommu, drhd) {
3654
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003655 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003656
3657 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3658 iommu->reg + DMAR_FECTL_REG);
3659 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3660 iommu->reg + DMAR_FEDATA_REG);
3661 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3662 iommu->reg + DMAR_FEADDR_REG);
3663 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3664 iommu->reg + DMAR_FEUADDR_REG);
3665
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003666 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003667 }
3668
3669 for_each_active_iommu(iommu, drhd)
3670 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003671}
3672
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003673static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003674 .resume = iommu_resume,
3675 .suspend = iommu_suspend,
3676};
3677
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003678static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003679{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003680 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003681}
3682
3683#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003684static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003685#endif /* CONFIG_PM */
3686
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003687
Jiang Liuc2a0b532014-11-09 22:47:56 +08003688int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003689{
3690 struct acpi_dmar_reserved_memory *rmrr;
3691 struct dmar_rmrr_unit *rmrru;
3692
3693 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3694 if (!rmrru)
3695 return -ENOMEM;
3696
3697 rmrru->hdr = header;
3698 rmrr = (struct acpi_dmar_reserved_memory *)header;
3699 rmrru->base_address = rmrr->base_address;
3700 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003701 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3702 ((void *)rmrr) + rmrr->header.length,
3703 &rmrru->devices_cnt);
3704 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3705 kfree(rmrru);
3706 return -ENOMEM;
3707 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003708
Jiang Liu2e455282014-02-19 14:07:36 +08003709 list_add(&rmrru->list, &dmar_rmrr_units);
3710
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003711 return 0;
3712}
3713
Jiang Liu6b197242014-11-09 22:47:58 +08003714static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3715{
3716 struct dmar_atsr_unit *atsru;
3717 struct acpi_dmar_atsr *tmp;
3718
3719 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3720 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3721 if (atsr->segment != tmp->segment)
3722 continue;
3723 if (atsr->header.length != tmp->header.length)
3724 continue;
3725 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3726 return atsru;
3727 }
3728
3729 return NULL;
3730}
3731
3732int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003733{
3734 struct acpi_dmar_atsr *atsr;
3735 struct dmar_atsr_unit *atsru;
3736
Jiang Liu6b197242014-11-09 22:47:58 +08003737 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3738 return 0;
3739
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003740 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003741 atsru = dmar_find_atsr(atsr);
3742 if (atsru)
3743 return 0;
3744
3745 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003746 if (!atsru)
3747 return -ENOMEM;
3748
Jiang Liu6b197242014-11-09 22:47:58 +08003749 /*
3750 * If memory is allocated from slab by ACPI _DSM method, we need to
3751 * copy the memory content because the memory buffer will be freed
3752 * on return.
3753 */
3754 atsru->hdr = (void *)(atsru + 1);
3755 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003756 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003757 if (!atsru->include_all) {
3758 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3759 (void *)atsr + atsr->header.length,
3760 &atsru->devices_cnt);
3761 if (atsru->devices_cnt && atsru->devices == NULL) {
3762 kfree(atsru);
3763 return -ENOMEM;
3764 }
3765 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003766
Jiang Liu0e242612014-02-19 14:07:34 +08003767 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003768
3769 return 0;
3770}
3771
Jiang Liu9bdc5312014-01-06 14:18:27 +08003772static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3773{
3774 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3775 kfree(atsru);
3776}
3777
Jiang Liu6b197242014-11-09 22:47:58 +08003778int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3779{
3780 struct acpi_dmar_atsr *atsr;
3781 struct dmar_atsr_unit *atsru;
3782
3783 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3784 atsru = dmar_find_atsr(atsr);
3785 if (atsru) {
3786 list_del_rcu(&atsru->list);
3787 synchronize_rcu();
3788 intel_iommu_free_atsr(atsru);
3789 }
3790
3791 return 0;
3792}
3793
3794int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3795{
3796 int i;
3797 struct device *dev;
3798 struct acpi_dmar_atsr *atsr;
3799 struct dmar_atsr_unit *atsru;
3800
3801 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3802 atsru = dmar_find_atsr(atsr);
3803 if (!atsru)
3804 return 0;
3805
3806 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3807 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3808 i, dev)
3809 return -EBUSY;
3810
3811 return 0;
3812}
3813
Jiang Liuffebeb42014-11-09 22:48:02 +08003814static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3815{
3816 int sp, ret = 0;
3817 struct intel_iommu *iommu = dmaru->iommu;
3818
3819 if (g_iommus[iommu->seq_id])
3820 return 0;
3821
3822 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3823 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3824 iommu->name);
3825 return -ENXIO;
3826 }
3827 if (!ecap_sc_support(iommu->ecap) &&
3828 domain_update_iommu_snooping(iommu)) {
3829 pr_warn("IOMMU: %s doesn't support snooping.\n",
3830 iommu->name);
3831 return -ENXIO;
3832 }
3833 sp = domain_update_iommu_superpage(iommu) - 1;
3834 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3835 pr_warn("IOMMU: %s doesn't support large page.\n",
3836 iommu->name);
3837 return -ENXIO;
3838 }
3839
3840 /*
3841 * Disable translation if already enabled prior to OS handover.
3842 */
3843 if (iommu->gcmd & DMA_GCMD_TE)
3844 iommu_disable_translation(iommu);
3845
3846 g_iommus[iommu->seq_id] = iommu;
3847 ret = iommu_init_domains(iommu);
3848 if (ret == 0)
3849 ret = iommu_alloc_root_entry(iommu);
3850 if (ret)
3851 goto out;
3852
3853 if (dmaru->ignored) {
3854 /*
3855 * we always have to disable PMRs or DMA may fail on this device
3856 */
3857 if (force_on)
3858 iommu_disable_protect_mem_regions(iommu);
3859 return 0;
3860 }
3861
3862 intel_iommu_init_qi(iommu);
3863 iommu_flush_write_buffer(iommu);
3864 ret = dmar_set_interrupt(iommu);
3865 if (ret)
3866 goto disable_iommu;
3867
3868 iommu_set_root_entry(iommu);
3869 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3870 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3871 iommu_enable_translation(iommu);
3872
3873 if (si_domain) {
3874 ret = iommu_attach_domain(si_domain, iommu);
3875 if (ret < 0 || si_domain->id != ret)
3876 goto disable_iommu;
3877 domain_attach_iommu(si_domain, iommu);
3878 }
3879
3880 iommu_disable_protect_mem_regions(iommu);
3881 return 0;
3882
3883disable_iommu:
3884 disable_dmar_iommu(iommu);
3885out:
3886 free_dmar_iommu(iommu);
3887 return ret;
3888}
3889
Jiang Liu6b197242014-11-09 22:47:58 +08003890int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3891{
Jiang Liuffebeb42014-11-09 22:48:02 +08003892 int ret = 0;
3893 struct intel_iommu *iommu = dmaru->iommu;
3894
3895 if (!intel_iommu_enabled)
3896 return 0;
3897 if (iommu == NULL)
3898 return -EINVAL;
3899
3900 if (insert) {
3901 ret = intel_iommu_add(dmaru);
3902 } else {
3903 disable_dmar_iommu(iommu);
3904 free_dmar_iommu(iommu);
3905 }
3906
3907 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003908}
3909
Jiang Liu9bdc5312014-01-06 14:18:27 +08003910static void intel_iommu_free_dmars(void)
3911{
3912 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3913 struct dmar_atsr_unit *atsru, *atsr_n;
3914
3915 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3916 list_del(&rmrru->list);
3917 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3918 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003919 }
3920
Jiang Liu9bdc5312014-01-06 14:18:27 +08003921 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3922 list_del(&atsru->list);
3923 intel_iommu_free_atsr(atsru);
3924 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003925}
3926
3927int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3928{
Jiang Liub683b232014-02-19 14:07:32 +08003929 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003930 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003931 struct pci_dev *bridge = NULL;
3932 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003933 struct acpi_dmar_atsr *atsr;
3934 struct dmar_atsr_unit *atsru;
3935
3936 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003937 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003938 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003939 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003940 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003941 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003942 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003943 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003944 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003945 if (!bridge)
3946 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003947
Jiang Liu0e242612014-02-19 14:07:34 +08003948 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003949 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3950 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3951 if (atsr->segment != pci_domain_nr(dev->bus))
3952 continue;
3953
Jiang Liub683b232014-02-19 14:07:32 +08003954 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003955 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003956 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003957
3958 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003959 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003960 }
Jiang Liub683b232014-02-19 14:07:32 +08003961 ret = 0;
3962out:
Jiang Liu0e242612014-02-19 14:07:34 +08003963 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003964
Jiang Liub683b232014-02-19 14:07:32 +08003965 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003966}
3967
Jiang Liu59ce0512014-02-19 14:07:35 +08003968int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3969{
3970 int ret = 0;
3971 struct dmar_rmrr_unit *rmrru;
3972 struct dmar_atsr_unit *atsru;
3973 struct acpi_dmar_atsr *atsr;
3974 struct acpi_dmar_reserved_memory *rmrr;
3975
3976 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3977 return 0;
3978
3979 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3980 rmrr = container_of(rmrru->hdr,
3981 struct acpi_dmar_reserved_memory, header);
3982 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3983 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3984 ((void *)rmrr) + rmrr->header.length,
3985 rmrr->segment, rmrru->devices,
3986 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003987 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003988 return ret;
3989 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003990 dmar_remove_dev_scope(info, rmrr->segment,
3991 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003992 }
3993 }
3994
3995 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3996 if (atsru->include_all)
3997 continue;
3998
3999 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4000 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4001 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4002 (void *)atsr + atsr->header.length,
4003 atsr->segment, atsru->devices,
4004 atsru->devices_cnt);
4005 if (ret > 0)
4006 break;
4007 else if(ret < 0)
4008 return ret;
4009 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4010 if (dmar_remove_dev_scope(info, atsr->segment,
4011 atsru->devices, atsru->devices_cnt))
4012 break;
4013 }
4014 }
4015
4016 return 0;
4017}
4018
Fenghua Yu99dcade2009-11-11 07:23:06 -08004019/*
4020 * Here we only respond to action of unbound device from driver.
4021 *
4022 * Added device is not attached to its DMAR domain here yet. That will happen
4023 * when mapping the device to iova.
4024 */
4025static int device_notifier(struct notifier_block *nb,
4026 unsigned long action, void *data)
4027{
4028 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004029 struct dmar_domain *domain;
4030
David Woodhouse3d891942014-03-06 15:59:26 +00004031 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004032 return 0;
4033
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004034 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004035 return 0;
4036
David Woodhouse1525a292014-03-06 16:19:30 +00004037 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004038 if (!domain)
4039 return 0;
4040
Jiang Liu3a5670e2014-02-19 14:07:33 +08004041 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004042 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004043 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004044 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004045 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004046
Fenghua Yu99dcade2009-11-11 07:23:06 -08004047 return 0;
4048}
4049
4050static struct notifier_block device_nb = {
4051 .notifier_call = device_notifier,
4052};
4053
Jiang Liu75f05562014-02-19 14:07:37 +08004054static int intel_iommu_memory_notifier(struct notifier_block *nb,
4055 unsigned long val, void *v)
4056{
4057 struct memory_notify *mhp = v;
4058 unsigned long long start, end;
4059 unsigned long start_vpfn, last_vpfn;
4060
4061 switch (val) {
4062 case MEM_GOING_ONLINE:
4063 start = mhp->start_pfn << PAGE_SHIFT;
4064 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4065 if (iommu_domain_identity_map(si_domain, start, end)) {
4066 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4067 start, end);
4068 return NOTIFY_BAD;
4069 }
4070 break;
4071
4072 case MEM_OFFLINE:
4073 case MEM_CANCEL_ONLINE:
4074 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4075 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4076 while (start_vpfn <= last_vpfn) {
4077 struct iova *iova;
4078 struct dmar_drhd_unit *drhd;
4079 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004080 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004081
4082 iova = find_iova(&si_domain->iovad, start_vpfn);
4083 if (iova == NULL) {
4084 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4085 start_vpfn);
4086 break;
4087 }
4088
4089 iova = split_and_remove_iova(&si_domain->iovad, iova,
4090 start_vpfn, last_vpfn);
4091 if (iova == NULL) {
4092 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4093 start_vpfn, last_vpfn);
4094 return NOTIFY_BAD;
4095 }
4096
David Woodhouseea8ea462014-03-05 17:09:32 +00004097 freelist = domain_unmap(si_domain, iova->pfn_lo,
4098 iova->pfn_hi);
4099
Jiang Liu75f05562014-02-19 14:07:37 +08004100 rcu_read_lock();
4101 for_each_active_iommu(iommu, drhd)
4102 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004103 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004104 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004105 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004106 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004107
4108 start_vpfn = iova->pfn_hi + 1;
4109 free_iova_mem(iova);
4110 }
4111 break;
4112 }
4113
4114 return NOTIFY_OK;
4115}
4116
4117static struct notifier_block intel_iommu_memory_nb = {
4118 .notifier_call = intel_iommu_memory_notifier,
4119 .priority = 0
4120};
4121
Alex Williamsona5459cf2014-06-12 16:12:31 -06004122
4123static ssize_t intel_iommu_show_version(struct device *dev,
4124 struct device_attribute *attr,
4125 char *buf)
4126{
4127 struct intel_iommu *iommu = dev_get_drvdata(dev);
4128 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4129 return sprintf(buf, "%d:%d\n",
4130 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4131}
4132static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4133
4134static ssize_t intel_iommu_show_address(struct device *dev,
4135 struct device_attribute *attr,
4136 char *buf)
4137{
4138 struct intel_iommu *iommu = dev_get_drvdata(dev);
4139 return sprintf(buf, "%llx\n", iommu->reg_phys);
4140}
4141static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4142
4143static ssize_t intel_iommu_show_cap(struct device *dev,
4144 struct device_attribute *attr,
4145 char *buf)
4146{
4147 struct intel_iommu *iommu = dev_get_drvdata(dev);
4148 return sprintf(buf, "%llx\n", iommu->cap);
4149}
4150static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4151
4152static ssize_t intel_iommu_show_ecap(struct device *dev,
4153 struct device_attribute *attr,
4154 char *buf)
4155{
4156 struct intel_iommu *iommu = dev_get_drvdata(dev);
4157 return sprintf(buf, "%llx\n", iommu->ecap);
4158}
4159static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4160
4161static struct attribute *intel_iommu_attrs[] = {
4162 &dev_attr_version.attr,
4163 &dev_attr_address.attr,
4164 &dev_attr_cap.attr,
4165 &dev_attr_ecap.attr,
4166 NULL,
4167};
4168
4169static struct attribute_group intel_iommu_group = {
4170 .name = "intel-iommu",
4171 .attrs = intel_iommu_attrs,
4172};
4173
4174const struct attribute_group *intel_iommu_groups[] = {
4175 &intel_iommu_group,
4176 NULL,
4177};
4178
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004179int __init intel_iommu_init(void)
4180{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004181 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004182 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004183 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004184
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004185 /* VT-d is required for a TXT/tboot launch, so enforce that */
4186 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004187
Jiang Liu3a5670e2014-02-19 14:07:33 +08004188 if (iommu_init_mempool()) {
4189 if (force_on)
4190 panic("tboot: Failed to initialize iommu memory\n");
4191 return -ENOMEM;
4192 }
4193
4194 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004195 if (dmar_table_init()) {
4196 if (force_on)
4197 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004198 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004199 }
4200
Takao Indoh3a93c842013-04-23 17:35:03 +09004201 /*
4202 * Disable translation if already enabled prior to OS handover.
4203 */
Jiang Liu7c919772014-01-06 14:18:18 +08004204 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004205 if (iommu->gcmd & DMA_GCMD_TE)
4206 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004207
Suresh Siddhac2c72862011-08-23 17:05:19 -07004208 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004209 if (force_on)
4210 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004211 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004212 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004213
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004214 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004215 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004216
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004217 if (list_empty(&dmar_rmrr_units))
4218 printk(KERN_INFO "DMAR: No RMRR found\n");
4219
4220 if (list_empty(&dmar_atsr_units))
4221 printk(KERN_INFO "DMAR: No ATSR found\n");
4222
Joseph Cihula51a63e62011-03-21 11:04:24 -07004223 if (dmar_init_reserved_ranges()) {
4224 if (force_on)
4225 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004226 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004227 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004228
4229 init_no_remapping_devices();
4230
Joseph Cihulab7792602011-05-03 00:08:37 -07004231 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004232 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004233 if (force_on)
4234 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004235 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004236 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004237 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004238 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004239 printk(KERN_INFO
4240 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4241
mark gross5e0d2a62008-03-04 15:22:08 -08004242 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004243#ifdef CONFIG_SWIOTLB
4244 swiotlb = 0;
4245#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004246 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004247
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004248 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004249
Alex Williamsona5459cf2014-06-12 16:12:31 -06004250 for_each_active_iommu(iommu, drhd)
4251 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4252 intel_iommu_groups,
4253 iommu->name);
4254
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004255 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004256 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004257 if (si_domain && !hw_pass_through)
4258 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004259
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004260 intel_iommu_enabled = 1;
4261
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004262 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004263
4264out_free_reserved_range:
4265 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004266out_free_dmar:
4267 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004268 up_write(&dmar_global_lock);
4269 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004270 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004271}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004272
Alex Williamson579305f2014-07-03 09:51:43 -06004273static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4274{
4275 struct intel_iommu *iommu = opaque;
4276
4277 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4278 return 0;
4279}
4280
4281/*
4282 * NB - intel-iommu lacks any sort of reference counting for the users of
4283 * dependent devices. If multiple endpoints have intersecting dependent
4284 * devices, unbinding the driver from any one of them will possibly leave
4285 * the others unable to operate.
4286 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004287static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004288 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004289{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004290 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004291 return;
4292
Alex Williamson579305f2014-07-03 09:51:43 -06004293 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004294}
4295
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004296static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004297 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004298{
Yijing Wangbca2b912013-10-31 17:26:04 +08004299 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004300 struct intel_iommu *iommu;
4301 unsigned long flags;
4302 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004303 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004304
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004305 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004306 if (!iommu)
4307 return;
4308
4309 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004310 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004311 if (info->iommu == iommu && info->bus == bus &&
4312 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004313 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004314 spin_unlock_irqrestore(&device_domain_lock, flags);
4315
Yu Zhao93a23a72009-05-18 13:51:37 +08004316 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004317 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004318 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004319 free_devinfo_mem(info);
4320
4321 spin_lock_irqsave(&device_domain_lock, flags);
4322
4323 if (found)
4324 break;
4325 else
4326 continue;
4327 }
4328
4329 /* if there is no other devices under the same iommu
4330 * owned by this domain, clear this iommu in iommu_bmp
4331 * update iommu count and coherency
4332 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004333 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004334 found = 1;
4335 }
4336
Roland Dreier3e7abe22011-07-20 06:22:21 -07004337 spin_unlock_irqrestore(&device_domain_lock, flags);
4338
Weidong Hanc7151a82008-12-08 22:51:37 +08004339 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004340 domain_detach_iommu(domain, iommu);
4341 if (!domain_type_is_vm_or_si(domain))
4342 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004343 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004344}
4345
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004346static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004347{
4348 int adjust_width;
4349
4350 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004351 domain_reserve_special_ranges(domain);
4352
4353 /* calculate AGAW */
4354 domain->gaw = guest_width;
4355 adjust_width = guestwidth_to_adjustwidth(guest_width);
4356 domain->agaw = width_to_agaw(adjust_width);
4357
Weidong Han5e98c4b2008-12-08 23:03:27 +08004358 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004359 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004360 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004361 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004362
4363 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004364 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004365 if (!domain->pgd)
4366 return -ENOMEM;
4367 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4368 return 0;
4369}
4370
Joerg Roedel5d450802008-12-03 14:52:32 +01004371static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004372{
Joerg Roedel5d450802008-12-03 14:52:32 +01004373 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004374
Jiang Liuab8dfe22014-07-11 14:19:27 +08004375 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004376 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004377 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004378 "intel_iommu_domain_init: dmar_domain == NULL\n");
4379 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004380 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004381 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004382 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004383 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004384 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004385 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004386 }
Allen Kay8140a952011-10-14 12:32:17 -07004387 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004388 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004389
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004390 domain->geometry.aperture_start = 0;
4391 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4392 domain->geometry.force_aperture = true;
4393
Joerg Roedel5d450802008-12-03 14:52:32 +01004394 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004395}
Kay, Allen M38717942008-09-09 18:37:29 +03004396
Joerg Roedel5d450802008-12-03 14:52:32 +01004397static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004398{
Joerg Roedel5d450802008-12-03 14:52:32 +01004399 struct dmar_domain *dmar_domain = domain->priv;
4400
4401 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004402 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004403}
Kay, Allen M38717942008-09-09 18:37:29 +03004404
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004405static int intel_iommu_attach_device(struct iommu_domain *domain,
4406 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004407{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004408 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004409 struct intel_iommu *iommu;
4410 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004411 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004412
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004413 if (device_is_rmrr_locked(dev)) {
4414 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4415 return -EPERM;
4416 }
4417
David Woodhouse7207d8f2014-03-09 16:31:06 -07004418 /* normally dev is not mapped */
4419 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004420 struct dmar_domain *old_domain;
4421
David Woodhouse1525a292014-03-06 16:19:30 +00004422 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004423 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004424 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004425 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004426 else
4427 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004428
4429 if (!domain_type_is_vm_or_si(old_domain) &&
4430 list_empty(&old_domain->devices))
4431 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004432 }
4433 }
4434
David Woodhouse156baca2014-03-09 14:00:57 -07004435 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004436 if (!iommu)
4437 return -ENODEV;
4438
4439 /* check if this iommu agaw is sufficient for max mapped address */
4440 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004441 if (addr_width > cap_mgaw(iommu->cap))
4442 addr_width = cap_mgaw(iommu->cap);
4443
4444 if (dmar_domain->max_addr > (1LL << addr_width)) {
4445 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004446 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004447 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004448 return -EFAULT;
4449 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004450 dmar_domain->gaw = addr_width;
4451
4452 /*
4453 * Knock out extra levels of page tables if necessary
4454 */
4455 while (iommu->agaw < dmar_domain->agaw) {
4456 struct dma_pte *pte;
4457
4458 pte = dmar_domain->pgd;
4459 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004460 dmar_domain->pgd = (struct dma_pte *)
4461 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004462 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004463 }
4464 dmar_domain->agaw--;
4465 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004466
David Woodhouse5913c9b2014-03-09 16:27:31 -07004467 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004468}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004469
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004470static void intel_iommu_detach_device(struct iommu_domain *domain,
4471 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004472{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004473 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004474
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004475 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004476}
Kay, Allen M38717942008-09-09 18:37:29 +03004477
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004478static int intel_iommu_map(struct iommu_domain *domain,
4479 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004480 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004481{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004482 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004483 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004484 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004485 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004486
Joerg Roedeldde57a22008-12-03 15:04:09 +01004487 if (iommu_prot & IOMMU_READ)
4488 prot |= DMA_PTE_READ;
4489 if (iommu_prot & IOMMU_WRITE)
4490 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004491 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4492 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004493
David Woodhouse163cc522009-06-28 00:51:17 +01004494 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004495 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004496 u64 end;
4497
4498 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004499 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004500 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004501 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004502 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004503 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004504 return -EFAULT;
4505 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004506 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004507 }
David Woodhousead051222009-06-28 14:22:28 +01004508 /* Round up size to next multiple of PAGE_SIZE, if it and
4509 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004510 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004511 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4512 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004513 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004514}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004515
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004516static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004517 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004518{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004519 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004520 struct page *freelist = NULL;
4521 struct intel_iommu *iommu;
4522 unsigned long start_pfn, last_pfn;
4523 unsigned int npages;
4524 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004525
David Woodhouse5cf0a762014-03-19 16:07:49 +00004526 /* Cope with horrid API which requires us to unmap more than the
4527 size argument if it happens to be a large-page mapping. */
4528 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4529 BUG();
4530
4531 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4532 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4533
David Woodhouseea8ea462014-03-05 17:09:32 +00004534 start_pfn = iova >> VTD_PAGE_SHIFT;
4535 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4536
4537 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4538
4539 npages = last_pfn - start_pfn + 1;
4540
4541 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4542 iommu = g_iommus[iommu_id];
4543
4544 /*
4545 * find bit position of dmar_domain
4546 */
4547 ndomains = cap_ndoms(iommu->cap);
4548 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4549 if (iommu->domains[num] == dmar_domain)
4550 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4551 npages, !freelist, 0);
4552 }
4553
4554 }
4555
4556 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004557
David Woodhouse163cc522009-06-28 00:51:17 +01004558 if (dmar_domain->max_addr == iova + size)
4559 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004560
David Woodhouse5cf0a762014-03-19 16:07:49 +00004561 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004562}
Kay, Allen M38717942008-09-09 18:37:29 +03004563
Joerg Roedeld14d6572008-12-03 15:06:57 +01004564static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304565 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004566{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004567 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004568 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004569 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004570 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004571
David Woodhouse5cf0a762014-03-19 16:07:49 +00004572 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004573 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004574 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004575
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004576 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004577}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004578
Joerg Roedel5d587b82014-09-05 10:50:45 +02004579static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004580{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004581 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004582 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004583 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004584 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004585
Joerg Roedel5d587b82014-09-05 10:50:45 +02004586 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004587}
4588
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004589static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004590{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004591 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004592 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004593 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004594
Alex Williamsona5459cf2014-06-12 16:12:31 -06004595 iommu = device_to_iommu(dev, &bus, &devfn);
4596 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004597 return -ENODEV;
4598
Alex Williamsona5459cf2014-06-12 16:12:31 -06004599 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004600
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004601 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004602
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004603 if (IS_ERR(group))
4604 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004605
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004606 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004607 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004608}
4609
4610static void intel_iommu_remove_device(struct device *dev)
4611{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004612 struct intel_iommu *iommu;
4613 u8 bus, devfn;
4614
4615 iommu = device_to_iommu(dev, &bus, &devfn);
4616 if (!iommu)
4617 return;
4618
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004619 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004620
4621 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004622}
4623
Thierry Redingb22f6432014-06-27 09:03:12 +02004624static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004625 .capable = intel_iommu_capable,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004626 .domain_init = intel_iommu_domain_init,
4627 .domain_destroy = intel_iommu_domain_destroy,
4628 .attach_dev = intel_iommu_attach_device,
4629 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004630 .map = intel_iommu_map,
4631 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004632 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004633 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004634 .add_device = intel_iommu_add_device,
4635 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004636 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004637};
David Woodhouse9af88142009-02-13 23:18:03 +00004638
Daniel Vetter94526182013-01-20 23:50:13 +01004639static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4640{
4641 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4642 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4643 dmar_map_gfx = 0;
4644}
4645
4646DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4647DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4648DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4649DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4651DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4652DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4653
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004654static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004655{
4656 /*
4657 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004658 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004659 */
4660 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4661 rwbf_quirk = 1;
4662}
4663
4664DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004665DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4666DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4667DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4668DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4669DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4670DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004671
Adam Jacksoneecfd572010-08-25 21:17:34 +01004672#define GGC 0x52
4673#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4674#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4675#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4676#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4677#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4678#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4679#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4680#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4681
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004682static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004683{
4684 unsigned short ggc;
4685
Adam Jacksoneecfd572010-08-25 21:17:34 +01004686 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004687 return;
4688
Adam Jacksoneecfd572010-08-25 21:17:34 +01004689 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004690 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4691 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004692 } else if (dmar_map_gfx) {
4693 /* we have to ensure the gfx device is idle before we flush */
4694 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4695 intel_iommu_strict = 1;
4696 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004697}
4698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4699DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4700DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4701DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4702
David Woodhousee0fc7e02009-09-30 09:12:17 -07004703/* On Tylersburg chipsets, some BIOSes have been known to enable the
4704 ISOCH DMAR unit for the Azalia sound device, but not give it any
4705 TLB entries, which causes it to deadlock. Check for that. We do
4706 this in a function called from init_dmars(), instead of in a PCI
4707 quirk, because we don't want to print the obnoxious "BIOS broken"
4708 message if VT-d is actually disabled.
4709*/
4710static void __init check_tylersburg_isoch(void)
4711{
4712 struct pci_dev *pdev;
4713 uint32_t vtisochctrl;
4714
4715 /* If there's no Azalia in the system anyway, forget it. */
4716 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4717 if (!pdev)
4718 return;
4719 pci_dev_put(pdev);
4720
4721 /* System Management Registers. Might be hidden, in which case
4722 we can't do the sanity check. But that's OK, because the
4723 known-broken BIOSes _don't_ actually hide it, so far. */
4724 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4725 if (!pdev)
4726 return;
4727
4728 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4729 pci_dev_put(pdev);
4730 return;
4731 }
4732
4733 pci_dev_put(pdev);
4734
4735 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4736 if (vtisochctrl & 1)
4737 return;
4738
4739 /* Drop all bits other than the number of TLB entries */
4740 vtisochctrl &= 0x1c;
4741
4742 /* If we have the recommended number of TLB entries (16), fine. */
4743 if (vtisochctrl == 0x10)
4744 return;
4745
4746 /* Zero TLB entries? You get to ride the short bus to school. */
4747 if (!vtisochctrl) {
4748 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4749 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4750 dmi_get_system_info(DMI_BIOS_VENDOR),
4751 dmi_get_system_info(DMI_BIOS_VERSION),
4752 dmi_get_system_info(DMI_PRODUCT_VERSION));
4753 iommu_identity_mapping |= IDENTMAP_AZALIA;
4754 return;
4755 }
4756
4757 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4758 vtisochctrl);
4759}