blob: eaf825ac7d28fb0c3feb95c932e459a47be315c0 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Mark McLoughlinf27be032008-11-20 15:49:43 +000074#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070075#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070076#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080077
Andrew Mortondf08cdc2010-09-22 13:05:11 -070078/* page table handling */
79#define LEVEL_STRIDE (9)
80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020082/*
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
87 *
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
91 *
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
94 *
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
97 */
98#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700100static inline int agaw_to_level(int agaw)
101{
102 return agaw + 2;
103}
104
105static inline int agaw_to_width(int agaw)
106{
Jiang Liu5c645b32014-01-06 14:18:12 +0800107 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108}
109
110static inline int width_to_agaw(int width)
111{
Jiang Liu5c645b32014-01-06 14:18:12 +0800112 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700113}
114
115static inline unsigned int level_to_offset_bits(int level)
116{
117 return (level - 1) * LEVEL_STRIDE;
118}
119
120static inline int pfn_level_offset(unsigned long pfn, int level)
121{
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123}
124
125static inline unsigned long level_mask(int level)
126{
127 return -1UL << level_to_offset_bits(level);
128}
129
130static inline unsigned long level_size(int level)
131{
132 return 1UL << level_to_offset_bits(level);
133}
134
135static inline unsigned long align_to_level(unsigned long pfn, int level)
136{
137 return (pfn + level_size(level) - 1) & level_mask(level);
138}
David Woodhousefd18de52009-05-10 23:57:41 +0100139
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100140static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141{
Jiang Liu5c645b32014-01-06 14:18:12 +0800142 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143}
144
David Woodhousedd4e8312009-06-27 16:21:20 +0100145/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148{
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150}
151
152static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153{
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155}
156static inline unsigned long page_to_dma_pfn(struct page *pg)
157{
158 return mm_to_dma_pfn(page_to_pfn(pg));
159}
160static inline unsigned long virt_to_dma_pfn(void *p)
161{
162 return page_to_dma_pfn(virt_to_page(p));
163}
164
Weidong Hand9630fe2008-12-08 11:06:32 +0800165/* global iommu list, set NULL for ignored DMAR units */
166static struct intel_iommu **g_iommus;
167
David Woodhousee0fc7e02009-09-30 09:12:17 -0700168static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000169static int rwbf_quirk;
170
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000171/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
174 */
175static int force_on = 0;
176
177/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000178 * 0: Present
179 * 1-11: Reserved
180 * 12-63: Context Ptr (12 - (haw-1))
181 * 64-127: Reserved
182 */
183struct root_entry {
184 u64 val;
185 u64 rsvd1;
186};
187#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188static inline bool root_present(struct root_entry *root)
189{
190 return (root->val & 1);
191}
192static inline void set_root_present(struct root_entry *root)
193{
194 root->val |= 1;
195}
196static inline void set_root_value(struct root_entry *root, unsigned long value)
197{
198 root->val |= value & VTD_PAGE_MASK;
199}
200
201static inline struct context_entry *
202get_context_addr_from_root(struct root_entry *root)
203{
204 return (struct context_entry *)
205 (root_present(root)?phys_to_virt(
206 root->val & VTD_PAGE_MASK) :
207 NULL);
208}
209
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000210/*
211 * low 64 bits:
212 * 0: present
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
216 * high 64 bits:
217 * 0-2: address width
218 * 3-6: aval
219 * 8-23: domain id
220 */
221struct context_entry {
222 u64 lo;
223 u64 hi;
224};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000225
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000226static inline bool context_present(struct context_entry *context)
227{
228 return (context->lo & 1);
229}
230static inline void context_set_present(struct context_entry *context)
231{
232 context->lo |= 1;
233}
234
235static inline void context_set_fault_enable(struct context_entry *context)
236{
237 context->lo &= (((u64)-1) << 2) | 1;
238}
239
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000240static inline void context_set_translation_type(struct context_entry *context,
241 unsigned long value)
242{
243 context->lo &= (((u64)-1) << 4) | 3;
244 context->lo |= (value & 3) << 2;
245}
246
247static inline void context_set_address_root(struct context_entry *context,
248 unsigned long value)
249{
250 context->lo |= value & VTD_PAGE_MASK;
251}
252
253static inline void context_set_address_width(struct context_entry *context,
254 unsigned long value)
255{
256 context->hi |= value & 7;
257}
258
259static inline void context_set_domain_id(struct context_entry *context,
260 unsigned long value)
261{
262 context->hi |= (value & ((1 << 16) - 1)) << 8;
263}
264
265static inline void context_clear_entry(struct context_entry *context)
266{
267 context->lo = 0;
268 context->hi = 0;
269}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000270
Mark McLoughlin622ba122008-11-20 15:49:46 +0000271/*
272 * 0: readable
273 * 1: writable
274 * 2-6: reserved
275 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800276 * 8-10: available
277 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000278 * 12-63: Host physcial address
279 */
280struct dma_pte {
281 u64 val;
282};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline void dma_clear_pte(struct dma_pte *pte)
285{
286 pte->val = 0;
287}
288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline u64 dma_pte_addr(struct dma_pte *pte)
290{
David Woodhousec85994e2009-07-01 19:21:24 +0100291#ifdef CONFIG_64BIT
292 return pte->val & VTD_PAGE_MASK;
293#else
294 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100296#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000297}
298
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000299static inline bool dma_pte_present(struct dma_pte *pte)
300{
301 return (pte->val & 3) != 0;
302}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000303
Allen Kay4399c8b2011-10-14 12:32:46 -0700304static inline bool dma_pte_superpage(struct dma_pte *pte)
305{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200306 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700307}
308
David Woodhouse75e6bf92009-07-02 11:21:16 +0100309static inline int first_pte_in_page(struct dma_pte *pte)
310{
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
312}
313
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700314/*
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
319 */
David Woodhouse19943b02009-08-04 16:19:20 +0100320static struct dmar_domain *si_domain;
321static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700322
Weidong Han1ce28fe2008-12-08 16:35:39 +0800323/* domain represents a virtual machine, more than one devices
324 * across iommus may be owned in one domain, e.g. kvm guest.
325 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800326#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800327
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700328/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800329#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700330
Mike Travis1b198bb2012-03-05 15:05:16 -0800331/* define the limit of IOMMUs supported in each domain */
332#ifdef CONFIG_X86
333# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
334#else
335# define IOMMU_UNITS_SUPPORTED 64
336#endif
337
Mark McLoughlin99126f72008-11-20 15:49:47 +0000338struct dmar_domain {
339 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700340 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800341 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
342 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000343
344 struct list_head devices; /* all devices' list */
345 struct iova_domain iovad; /* iova's that belong to this domain */
346
347 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000348 int gaw; /* max guest address width */
349
350 /* adjusted guest address width, 0 is level 2 30-bit */
351 int agaw;
352
Weidong Han3b5410e2008-12-08 09:17:15 +0800353 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800354
355 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800356 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800357 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100358 int iommu_superpage;/* Level of superpages supported:
359 0 == 4KiB (no superpages), 1 == 2MiB,
360 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800362 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000363};
364
Mark McLoughlina647dac2008-11-20 15:49:48 +0000365/* PCI domain-device relationship */
366struct device_domain_info {
367 struct list_head link; /* link to domain siblings */
368 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100369 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000370 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000371 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800372 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000373 struct dmar_domain *domain; /* pointer to domain */
374};
375
Jiang Liub94e4112014-02-19 14:07:25 +0800376struct dmar_rmrr_unit {
377 struct list_head list; /* list of rmrr units */
378 struct acpi_dmar_header *hdr; /* ACPI header */
379 u64 base_address; /* reserved base address*/
380 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000381 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800382 int devices_cnt; /* target device count */
383};
384
385struct dmar_atsr_unit {
386 struct list_head list; /* list of ATSR units */
387 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000388 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800389 int devices_cnt; /* target device count */
390 u8 include_all:1; /* include all ports */
391};
392
393static LIST_HEAD(dmar_atsr_units);
394static LIST_HEAD(dmar_rmrr_units);
395
396#define for_each_rmrr_units(rmrr) \
397 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
398
mark gross5e0d2a62008-03-04 15:22:08 -0800399static void flush_unmaps_timeout(unsigned long data);
400
Jiang Liub707cb02014-01-06 14:18:26 +0800401static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800402
mark gross80b20dd2008-04-18 13:53:58 -0700403#define HIGH_WATER_MARK 250
404struct deferred_flush_tables {
405 int next;
406 struct iova *iova[HIGH_WATER_MARK];
407 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000408 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700409};
410
411static struct deferred_flush_tables *deferred_flush;
412
mark gross5e0d2a62008-03-04 15:22:08 -0800413/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800414static int g_num_of_iommus;
415
416static DEFINE_SPINLOCK(async_umap_flush_lock);
417static LIST_HEAD(unmaps_to_do);
418
419static int timer_on;
420static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800421
Jiang Liu92d03cc2014-02-19 14:07:28 +0800422static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700423static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800424static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700425 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800426static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000427 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800428static int domain_detach_iommu(struct dmar_domain *domain,
429 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432int dmar_disabled = 0;
433#else
434int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200437int intel_iommu_enabled = 0;
438EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
David Woodhouse2d9e6672010-06-15 10:57:57 +0100440static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700441static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800442static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100443static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444
David Woodhousec0771df2011-10-14 20:59:46 +0100445int intel_iommu_gfx_mapped;
446EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449static DEFINE_SPINLOCK(device_domain_lock);
450static LIST_HEAD(device_domain_list);
451
Thierry Redingb22f6432014-06-27 09:03:12 +0200452static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100453
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700454static int __init intel_iommu_setup(char *str)
455{
456 if (!str)
457 return -EINVAL;
458 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800470 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481 }
482
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
486 }
487 return 0;
488}
489__setup("intel_iommu=", intel_iommu_setup);
490
491static struct kmem_cache *iommu_domain_cache;
492static struct kmem_cache *iommu_devinfo_cache;
493static struct kmem_cache *iommu_iova_cache;
494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 struct page *page;
498 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
506static inline void free_pgtable_page(void *vaddr)
507{
508 free_page((unsigned long)vaddr);
509}
510
511static inline void *alloc_domain_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
Kay, Allen M38717942008-09-09 18:37:29 +0300516static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517{
518 kmem_cache_free(iommu_domain_cache, vaddr);
519}
520
521static inline void * alloc_devinfo_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526static inline void free_devinfo_mem(void *vaddr)
527{
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
529}
530
531struct iova *alloc_iova_mem(void)
532{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700534}
535
536void free_iova_mem(struct iova *iova)
537{
538 kmem_cache_free(iommu_iova_cache, iova);
539}
540
Jiang Liuab8dfe22014-07-11 14:19:27 +0800541static inline int domain_type_is_vm(struct dmar_domain *domain)
542{
543 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
544}
545
546static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
547{
548 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
549 DOMAIN_FLAG_STATIC_IDENTITY);
550}
Weidong Han1b573682008-12-08 15:34:06 +0800551
Jiang Liu162d1b12014-07-11 14:19:35 +0800552static inline int domain_pfn_supported(struct dmar_domain *domain,
553 unsigned long pfn)
554{
555 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
556
557 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
558}
559
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700560static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800561{
562 unsigned long sagaw;
563 int agaw = -1;
564
565 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700566 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800567 agaw >= 0; agaw--) {
568 if (test_bit(agaw, &sagaw))
569 break;
570 }
571
572 return agaw;
573}
574
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700575/*
576 * Calculate max SAGAW for each iommu.
577 */
578int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
579{
580 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
581}
582
583/*
584 * calculate agaw for each iommu.
585 * "SAGAW" may be different across iommus, use a default agaw, and
586 * get a supported less agaw for iommus that don't support the default agaw.
587 */
588int iommu_calculate_agaw(struct intel_iommu *iommu)
589{
590 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
591}
592
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700593/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800594static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
595{
596 int iommu_id;
597
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700598 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800599 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800600 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800601 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
602 return NULL;
603
604 return g_iommus[iommu_id];
605}
606
Weidong Han8e6040972008-12-08 15:49:06 +0800607static void domain_update_iommu_coherency(struct dmar_domain *domain)
608{
David Woodhoused0501962014-03-11 17:10:29 -0700609 struct dmar_drhd_unit *drhd;
610 struct intel_iommu *iommu;
611 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800612
David Woodhoused0501962014-03-11 17:10:29 -0700613 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800614
Mike Travis1b198bb2012-03-05 15:05:16 -0800615 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700616 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800617 if (!ecap_coherent(g_iommus[i]->ecap)) {
618 domain->iommu_coherency = 0;
619 break;
620 }
Weidong Han8e6040972008-12-08 15:49:06 +0800621 }
David Woodhoused0501962014-03-11 17:10:29 -0700622 if (found)
623 return;
624
625 /* No hardware attached; use lowest common denominator */
626 rcu_read_lock();
627 for_each_active_iommu(iommu, drhd) {
628 if (!ecap_coherent(iommu->ecap)) {
629 domain->iommu_coherency = 0;
630 break;
631 }
632 }
633 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800634}
635
Jiang Liu161f6932014-07-11 14:19:37 +0800636static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100637{
Allen Kay8140a952011-10-14 12:32:17 -0700638 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800639 struct intel_iommu *iommu;
640 int ret = 1;
641
642 rcu_read_lock();
643 for_each_active_iommu(iommu, drhd) {
644 if (iommu != skip) {
645 if (!ecap_sc_support(iommu->ecap)) {
646 ret = 0;
647 break;
648 }
649 }
650 }
651 rcu_read_unlock();
652
653 return ret;
654}
655
656static int domain_update_iommu_superpage(struct intel_iommu *skip)
657{
658 struct dmar_drhd_unit *drhd;
659 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700660 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100661
662 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800663 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100664 }
665
Allen Kay8140a952011-10-14 12:32:17 -0700666 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800667 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700668 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800669 if (iommu != skip) {
670 mask &= cap_super_page_val(iommu->cap);
671 if (!mask)
672 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100673 }
674 }
Jiang Liu0e242612014-02-19 14:07:34 +0800675 rcu_read_unlock();
676
Jiang Liu161f6932014-07-11 14:19:37 +0800677 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100678}
679
Sheng Yang58c610b2009-03-18 15:33:05 +0800680/* Some capabilities may be different across iommus */
681static void domain_update_iommu_cap(struct dmar_domain *domain)
682{
683 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800684 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
685 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800686}
687
David Woodhouse156baca2014-03-09 14:00:57 -0700688static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800689{
690 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800691 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700692 struct device *tmp;
693 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800694 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800695 int i;
696
David Woodhouse156baca2014-03-09 14:00:57 -0700697 if (dev_is_pci(dev)) {
698 pdev = to_pci_dev(dev);
699 segment = pci_domain_nr(pdev->bus);
700 } else if (ACPI_COMPANION(dev))
701 dev = &ACPI_COMPANION(dev)->dev;
702
Jiang Liu0e242612014-02-19 14:07:34 +0800703 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800704 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700705 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100706 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800707
Jiang Liub683b232014-02-19 14:07:32 +0800708 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700709 drhd->devices_cnt, i, tmp) {
710 if (tmp == dev) {
711 *bus = drhd->devices[i].bus;
712 *devfn = drhd->devices[i].devfn;
713 goto out;
714 }
715
716 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000717 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700718
719 ptmp = to_pci_dev(tmp);
720 if (ptmp->subordinate &&
721 ptmp->subordinate->number <= pdev->bus->number &&
722 ptmp->subordinate->busn_res.end >= pdev->bus->number)
723 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100724 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800725
David Woodhouse156baca2014-03-09 14:00:57 -0700726 if (pdev && drhd->include_all) {
727 got_pdev:
728 *bus = pdev->bus->number;
729 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800730 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700731 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800732 }
Jiang Liub683b232014-02-19 14:07:32 +0800733 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700734 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800735 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800736
Jiang Liub683b232014-02-19 14:07:32 +0800737 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800738}
739
Weidong Han5331fe62008-12-08 23:00:00 +0800740static void domain_flush_cache(struct dmar_domain *domain,
741 void *addr, int size)
742{
743 if (!domain->iommu_coherency)
744 clflush_cache_range(addr, size);
745}
746
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747/* Gets context entry for a given bus and devfn */
748static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
749 u8 bus, u8 devfn)
750{
751 struct root_entry *root;
752 struct context_entry *context;
753 unsigned long phy_addr;
754 unsigned long flags;
755
756 spin_lock_irqsave(&iommu->lock, flags);
757 root = &iommu->root_entry[bus];
758 context = get_context_addr_from_root(root);
759 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700760 context = (struct context_entry *)
761 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700762 if (!context) {
763 spin_unlock_irqrestore(&iommu->lock, flags);
764 return NULL;
765 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700766 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700767 phy_addr = virt_to_phys((void *)context);
768 set_root_value(root, phy_addr);
769 set_root_present(root);
770 __iommu_flush_cache(iommu, root, sizeof(*root));
771 }
772 spin_unlock_irqrestore(&iommu->lock, flags);
773 return &context[devfn];
774}
775
776static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
777{
778 struct root_entry *root;
779 struct context_entry *context;
780 int ret;
781 unsigned long flags;
782
783 spin_lock_irqsave(&iommu->lock, flags);
784 root = &iommu->root_entry[bus];
785 context = get_context_addr_from_root(root);
786 if (!context) {
787 ret = 0;
788 goto out;
789 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000790 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700791out:
792 spin_unlock_irqrestore(&iommu->lock, flags);
793 return ret;
794}
795
796static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
797{
798 struct root_entry *root;
799 struct context_entry *context;
800 unsigned long flags;
801
802 spin_lock_irqsave(&iommu->lock, flags);
803 root = &iommu->root_entry[bus];
804 context = get_context_addr_from_root(root);
805 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000806 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 __iommu_flush_cache(iommu, &context[devfn], \
808 sizeof(*context));
809 }
810 spin_unlock_irqrestore(&iommu->lock, flags);
811}
812
813static void free_context_table(struct intel_iommu *iommu)
814{
815 struct root_entry *root;
816 int i;
817 unsigned long flags;
818 struct context_entry *context;
819
820 spin_lock_irqsave(&iommu->lock, flags);
821 if (!iommu->root_entry) {
822 goto out;
823 }
824 for (i = 0; i < ROOT_ENTRY_NR; i++) {
825 root = &iommu->root_entry[i];
826 context = get_context_addr_from_root(root);
827 if (context)
828 free_pgtable_page(context);
829 }
830 free_pgtable_page(iommu->root_entry);
831 iommu->root_entry = NULL;
832out:
833 spin_unlock_irqrestore(&iommu->lock, flags);
834}
835
David Woodhouseb026fd22009-06-28 10:37:25 +0100836static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000837 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 struct dma_pte *parent, *pte = NULL;
840 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700841 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842
843 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200844
Jiang Liu162d1b12014-07-11 14:19:35 +0800845 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200846 /* Address beyond IOMMU's addressing capabilities. */
847 return NULL;
848
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849 parent = domain->pgd;
850
David Woodhouse5cf0a762014-03-19 16:07:49 +0000851 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852 void *tmp_page;
853
David Woodhouseb026fd22009-06-28 10:37:25 +0100854 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000856 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100857 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000858 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 break;
860
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000861 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100862 uint64_t pteval;
863
Suresh Siddha4c923d42009-10-02 11:01:24 -0700864 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700865
David Woodhouse206a73c12009-07-01 19:30:28 +0100866 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100868
David Woodhousec85994e2009-07-01 19:21:24 +0100869 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400870 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800871 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100872 /* Someone else set it while we were thinking; use theirs. */
873 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800874 else
David Woodhousec85994e2009-07-01 19:21:24 +0100875 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000877 if (level == 1)
878 break;
879
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000880 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700881 level--;
882 }
883
David Woodhouse5cf0a762014-03-19 16:07:49 +0000884 if (!*target_level)
885 *target_level = level;
886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887 return pte;
888}
889
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100892static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
893 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895{
896 struct dma_pte *parent, *pte = NULL;
897 int total = agaw_to_level(domain->agaw);
898 int offset;
899
900 parent = domain->pgd;
901 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100902 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903 pte = &parent[offset];
904 if (level == total)
905 return pte;
906
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100907 if (!dma_pte_present(pte)) {
908 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700909 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100910 }
911
Yijing Wange16922a2014-05-20 20:37:51 +0800912 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100913 *large_page = total;
914 return pte;
915 }
916
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000917 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918 total--;
919 }
920 return NULL;
921}
922
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000924static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100925 unsigned long start_pfn,
926 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100928 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100929 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930
Jiang Liu162d1b12014-07-11 14:19:35 +0800931 BUG_ON(!domain_pfn_supported(domain, start_pfn));
932 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700933 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100934
David Woodhouse04b18e62009-06-27 19:15:01 +0100935 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700936 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100937 large_page = 1;
938 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100939 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100940 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100941 continue;
942 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100943 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100944 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100945 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100946 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100947 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
948
David Woodhouse310a5ab2009-06-28 18:52:20 +0100949 domain_flush_cache(domain, first_pte,
950 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700951
952 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953}
954
Alex Williamson3269ee02013-06-15 10:27:19 -0600955static void dma_pte_free_level(struct dmar_domain *domain, int level,
956 struct dma_pte *pte, unsigned long pfn,
957 unsigned long start_pfn, unsigned long last_pfn)
958{
959 pfn = max(start_pfn, pfn);
960 pte = &pte[pfn_level_offset(pfn, level)];
961
962 do {
963 unsigned long level_pfn;
964 struct dma_pte *level_pte;
965
966 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
967 goto next;
968
969 level_pfn = pfn & level_mask(level - 1);
970 level_pte = phys_to_virt(dma_pte_addr(pte));
971
972 if (level > 2)
973 dma_pte_free_level(domain, level - 1, level_pte,
974 level_pfn, start_pfn, last_pfn);
975
976 /* If range covers entire pagetable, free it */
977 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800978 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600979 dma_clear_pte(pte);
980 domain_flush_cache(domain, pte, sizeof(*pte));
981 free_pgtable_page(level_pte);
982 }
983next:
984 pfn += level_size(level);
985 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
986}
987
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988/* free page table pages. last level pte should already be cleared */
989static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100990 unsigned long start_pfn,
991 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992{
Jiang Liu162d1b12014-07-11 14:19:35 +0800993 BUG_ON(!domain_pfn_supported(domain, start_pfn));
994 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700995 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996
Jiang Liud41a4ad2014-07-11 14:19:34 +0800997 dma_pte_clear_range(domain, start_pfn, last_pfn);
998
David Woodhousef3a0a522009-06-30 03:40:07 +0100999 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001000 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1001 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001002
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001004 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005 free_pgtable_page(domain->pgd);
1006 domain->pgd = NULL;
1007 }
1008}
1009
David Woodhouseea8ea462014-03-05 17:09:32 +00001010/* When a page at a given level is being unlinked from its parent, we don't
1011 need to *modify* it at all. All we need to do is make a list of all the
1012 pages which can be freed just as soon as we've flushed the IOTLB and we
1013 know the hardware page-walk will no longer touch them.
1014 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1015 be freed. */
1016static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1017 int level, struct dma_pte *pte,
1018 struct page *freelist)
1019{
1020 struct page *pg;
1021
1022 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1023 pg->freelist = freelist;
1024 freelist = pg;
1025
1026 if (level == 1)
1027 return freelist;
1028
Jiang Liuadeb2592014-04-09 10:20:39 +08001029 pte = page_address(pg);
1030 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001031 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1032 freelist = dma_pte_list_pagetables(domain, level - 1,
1033 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001034 pte++;
1035 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001036
1037 return freelist;
1038}
1039
1040static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1041 struct dma_pte *pte, unsigned long pfn,
1042 unsigned long start_pfn,
1043 unsigned long last_pfn,
1044 struct page *freelist)
1045{
1046 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1047
1048 pfn = max(start_pfn, pfn);
1049 pte = &pte[pfn_level_offset(pfn, level)];
1050
1051 do {
1052 unsigned long level_pfn;
1053
1054 if (!dma_pte_present(pte))
1055 goto next;
1056
1057 level_pfn = pfn & level_mask(level);
1058
1059 /* If range covers entire pagetable, free it */
1060 if (start_pfn <= level_pfn &&
1061 last_pfn >= level_pfn + level_size(level) - 1) {
1062 /* These suborbinate page tables are going away entirely. Don't
1063 bother to clear them; we're just going to *free* them. */
1064 if (level > 1 && !dma_pte_superpage(pte))
1065 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1066
1067 dma_clear_pte(pte);
1068 if (!first_pte)
1069 first_pte = pte;
1070 last_pte = pte;
1071 } else if (level > 1) {
1072 /* Recurse down into a level that isn't *entirely* obsolete */
1073 freelist = dma_pte_clear_level(domain, level - 1,
1074 phys_to_virt(dma_pte_addr(pte)),
1075 level_pfn, start_pfn, last_pfn,
1076 freelist);
1077 }
1078next:
1079 pfn += level_size(level);
1080 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1081
1082 if (first_pte)
1083 domain_flush_cache(domain, first_pte,
1084 (void *)++last_pte - (void *)first_pte);
1085
1086 return freelist;
1087}
1088
1089/* We can't just free the pages because the IOMMU may still be walking
1090 the page tables, and may have cached the intermediate levels. The
1091 pages can only be freed after the IOTLB flush has been done. */
1092struct page *domain_unmap(struct dmar_domain *domain,
1093 unsigned long start_pfn,
1094 unsigned long last_pfn)
1095{
David Woodhouseea8ea462014-03-05 17:09:32 +00001096 struct page *freelist = NULL;
1097
Jiang Liu162d1b12014-07-11 14:19:35 +08001098 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1099 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001100 BUG_ON(start_pfn > last_pfn);
1101
1102 /* we don't need lock here; nobody else touches the iova range */
1103 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1104 domain->pgd, 0, start_pfn, last_pfn, NULL);
1105
1106 /* free pgd */
1107 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1108 struct page *pgd_page = virt_to_page(domain->pgd);
1109 pgd_page->freelist = freelist;
1110 freelist = pgd_page;
1111
1112 domain->pgd = NULL;
1113 }
1114
1115 return freelist;
1116}
1117
1118void dma_free_pagelist(struct page *freelist)
1119{
1120 struct page *pg;
1121
1122 while ((pg = freelist)) {
1123 freelist = pg->freelist;
1124 free_pgtable_page(page_address(pg));
1125 }
1126}
1127
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001128/* iommu handling */
1129static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1130{
1131 struct root_entry *root;
1132 unsigned long flags;
1133
Suresh Siddha4c923d42009-10-02 11:01:24 -07001134 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001135 if (!root)
1136 return -ENOMEM;
1137
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001138 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001139
1140 spin_lock_irqsave(&iommu->lock, flags);
1141 iommu->root_entry = root;
1142 spin_unlock_irqrestore(&iommu->lock, flags);
1143
1144 return 0;
1145}
1146
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001147static void iommu_set_root_entry(struct intel_iommu *iommu)
1148{
1149 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001150 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001151 unsigned long flag;
1152
1153 addr = iommu->root_entry;
1154
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001155 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1157
David Woodhousec416daa2009-05-10 20:30:58 +01001158 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
1160 /* Make sure hardware complete it */
1161 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001162 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001164 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165}
1166
1167static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1168{
1169 u32 val;
1170 unsigned long flag;
1171
David Woodhouse9af88142009-02-13 23:18:03 +00001172 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001176 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177
1178 /* Make sure hardware complete it */
1179 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001180 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001182 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183}
1184
1185/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001186static void __iommu_flush_context(struct intel_iommu *iommu,
1187 u16 did, u16 source_id, u8 function_mask,
1188 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189{
1190 u64 val = 0;
1191 unsigned long flag;
1192
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193 switch (type) {
1194 case DMA_CCMD_GLOBAL_INVL:
1195 val = DMA_CCMD_GLOBAL_INVL;
1196 break;
1197 case DMA_CCMD_DOMAIN_INVL:
1198 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1199 break;
1200 case DMA_CCMD_DEVICE_INVL:
1201 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1202 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1203 break;
1204 default:
1205 BUG();
1206 }
1207 val |= DMA_CCMD_ICC;
1208
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001209 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001210 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1211
1212 /* Make sure hardware complete it */
1213 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1214 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1215
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001216 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217}
1218
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001219/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001220static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1221 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222{
1223 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1224 u64 val = 0, val_iva = 0;
1225 unsigned long flag;
1226
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 switch (type) {
1228 case DMA_TLB_GLOBAL_FLUSH:
1229 /* global flush doesn't need set IVA_REG */
1230 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1231 break;
1232 case DMA_TLB_DSI_FLUSH:
1233 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1234 break;
1235 case DMA_TLB_PSI_FLUSH:
1236 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001237 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 val_iva = size_order | addr;
1239 break;
1240 default:
1241 BUG();
1242 }
1243 /* Note: set drain read/write */
1244#if 0
1245 /*
1246 * This is probably to be super secure.. Looks like we can
1247 * ignore it without any impact.
1248 */
1249 if (cap_read_drain(iommu->cap))
1250 val |= DMA_TLB_READ_DRAIN;
1251#endif
1252 if (cap_write_drain(iommu->cap))
1253 val |= DMA_TLB_WRITE_DRAIN;
1254
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001255 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 /* Note: Only uses first TLB reg currently */
1257 if (val_iva)
1258 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1259 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1260
1261 /* Make sure hardware complete it */
1262 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1263 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1264
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001265 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001266
1267 /* check IOTLB invalidation granularity */
1268 if (DMA_TLB_IAIG(val) == 0)
1269 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1270 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1271 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001272 (unsigned long long)DMA_TLB_IIRG(type),
1273 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274}
1275
David Woodhouse64ae8922014-03-09 12:52:30 -07001276static struct device_domain_info *
1277iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1278 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279{
Yu Zhao93a23a72009-05-18 13:51:37 +08001280 int found = 0;
1281 unsigned long flags;
1282 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001283 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001284
1285 if (!ecap_dev_iotlb_support(iommu->ecap))
1286 return NULL;
1287
1288 if (!iommu->qi)
1289 return NULL;
1290
1291 spin_lock_irqsave(&device_domain_lock, flags);
1292 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001293 if (info->iommu == iommu && info->bus == bus &&
1294 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001295 found = 1;
1296 break;
1297 }
1298 spin_unlock_irqrestore(&device_domain_lock, flags);
1299
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return NULL;
1302
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001303 pdev = to_pci_dev(info->dev);
1304
1305 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001306 return NULL;
1307
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001308 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001309 return NULL;
1310
Yu Zhao93a23a72009-05-18 13:51:37 +08001311 return info;
1312}
1313
1314static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1315{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001316 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001317 return;
1318
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001319 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001320}
1321
1322static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1323{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001324 if (!info->dev || !dev_is_pci(info->dev) ||
1325 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001326 return;
1327
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001328 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001329}
1330
1331static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1332 u64 addr, unsigned mask)
1333{
1334 u16 sid, qdep;
1335 unsigned long flags;
1336 struct device_domain_info *info;
1337
1338 spin_lock_irqsave(&device_domain_lock, flags);
1339 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001340 struct pci_dev *pdev;
1341 if (!info->dev || !dev_is_pci(info->dev))
1342 continue;
1343
1344 pdev = to_pci_dev(info->dev);
1345 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001346 continue;
1347
1348 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001349 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001350 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1351 }
1352 spin_unlock_irqrestore(&device_domain_lock, flags);
1353}
1354
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001355static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001356 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001359 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001360
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361 BUG_ON(pages == 0);
1362
David Woodhouseea8ea462014-03-05 17:09:32 +00001363 if (ih)
1364 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001366 * Fallback to domain selective flush if no PSI support or the size is
1367 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368 * PSI requires page size to be 2 ^ x, and the base address is naturally
1369 * aligned to the size
1370 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001371 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1372 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001373 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001374 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001375 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001376 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001377
1378 /*
Nadav Amit82653632010-04-01 13:24:40 +03001379 * In caching mode, changes of pages from non-present to present require
1380 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001381 */
Nadav Amit82653632010-04-01 13:24:40 +03001382 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001383 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384}
1385
mark grossf8bab732008-02-08 04:18:38 -08001386static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1387{
1388 u32 pmen;
1389 unsigned long flags;
1390
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001391 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001392 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1393 pmen &= ~DMA_PMEN_EPM;
1394 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1395
1396 /* wait for the protected region status bit to clear */
1397 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1398 readl, !(pmen & DMA_PMEN_PRS), pmen);
1399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001401}
1402
Jiang Liu2a41cce2014-07-11 14:19:33 +08001403static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404{
1405 u32 sts;
1406 unsigned long flags;
1407
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001408 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001409 iommu->gcmd |= DMA_GCMD_TE;
1410 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411
1412 /* Make sure hardware complete it */
1413 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001414 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001416 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417}
1418
Jiang Liu2a41cce2014-07-11 14:19:33 +08001419static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420{
1421 u32 sts;
1422 unsigned long flag;
1423
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001424 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001425 iommu->gcmd &= ~DMA_GCMD_TE;
1426 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1427
1428 /* Make sure hardware complete it */
1429 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001430 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001431
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001432 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433}
1434
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001435
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436static int iommu_init_domains(struct intel_iommu *iommu)
1437{
1438 unsigned long ndomains;
1439 unsigned long nlongs;
1440
1441 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001442 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1443 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 nlongs = BITS_TO_LONGS(ndomains);
1445
Donald Dutile94a91b52009-08-20 16:51:34 -04001446 spin_lock_init(&iommu->lock);
1447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 /* TBD: there might be 64K domains,
1449 * consider other allocation for future chip
1450 */
1451 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1452 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001453 pr_err("IOMMU%d: allocating domain id array failed\n",
1454 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001455 return -ENOMEM;
1456 }
1457 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1458 GFP_KERNEL);
1459 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001460 pr_err("IOMMU%d: allocating domain array failed\n",
1461 iommu->seq_id);
1462 kfree(iommu->domain_ids);
1463 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 return -ENOMEM;
1465 }
1466
1467 /*
1468 * if Caching mode is set, then invalid translations are tagged
1469 * with domainid 0. Hence we need to pre-allocate it.
1470 */
1471 if (cap_caching_mode(iommu->cap))
1472 set_bit(0, iommu->domain_ids);
1473 return 0;
1474}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475
Jiang Liua868e6b2014-01-06 14:18:20 +08001476static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477{
1478 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001479 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001480
Donald Dutile94a91b52009-08-20 16:51:34 -04001481 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001482 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001483 /*
1484 * Domain id 0 is reserved for invalid translation
1485 * if hardware supports caching mode.
1486 */
1487 if (cap_caching_mode(iommu->cap) && i == 0)
1488 continue;
1489
Donald Dutile94a91b52009-08-20 16:51:34 -04001490 domain = iommu->domains[i];
1491 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001492 if (domain_detach_iommu(domain, iommu) == 0 &&
1493 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001494 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001495 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496 }
1497
1498 if (iommu->gcmd & DMA_GCMD_TE)
1499 iommu_disable_translation(iommu);
1500
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001501 kfree(iommu->domains);
1502 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001503 iommu->domains = NULL;
1504 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505
Weidong Hand9630fe2008-12-08 11:06:32 +08001506 g_iommus[iommu->seq_id] = NULL;
1507
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508 /* free context mapping */
1509 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510}
1511
Jiang Liuab8dfe22014-07-11 14:19:27 +08001512static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001513{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001514 /* domain id for virtual machine, it won't be set in context */
1515 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517
1518 domain = alloc_domain_mem();
1519 if (!domain)
1520 return NULL;
1521
Jiang Liuab8dfe22014-07-11 14:19:27 +08001522 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001523 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001524 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001525 spin_lock_init(&domain->iommu_lock);
1526 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001527 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001528 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529
1530 return domain;
1531}
1532
Jiang Liufb170fb2014-07-11 14:19:28 +08001533static int __iommu_attach_domain(struct dmar_domain *domain,
1534 struct intel_iommu *iommu)
1535{
1536 int num;
1537 unsigned long ndomains;
1538
1539 ndomains = cap_ndoms(iommu->cap);
1540 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1541 if (num < ndomains) {
1542 set_bit(num, iommu->domain_ids);
1543 iommu->domains[num] = domain;
1544 } else {
1545 num = -ENOSPC;
1546 }
1547
1548 return num;
1549}
1550
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001551static int iommu_attach_domain(struct dmar_domain *domain,
1552 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001553{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001554 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 unsigned long flags;
1556
Weidong Han8c11e792008-12-08 15:29:22 +08001557 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001558 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001559 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001560 if (num < 0)
1561 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001562
Jiang Liufb170fb2014-07-11 14:19:28 +08001563 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001564}
1565
Jiang Liu44bde612014-07-11 14:19:29 +08001566static int iommu_attach_vm_domain(struct dmar_domain *domain,
1567 struct intel_iommu *iommu)
1568{
1569 int num;
1570 unsigned long ndomains;
1571
1572 ndomains = cap_ndoms(iommu->cap);
1573 for_each_set_bit(num, iommu->domain_ids, ndomains)
1574 if (iommu->domains[num] == domain)
1575 return num;
1576
1577 return __iommu_attach_domain(domain, iommu);
1578}
1579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001580static void iommu_detach_domain(struct dmar_domain *domain,
1581 struct intel_iommu *iommu)
1582{
1583 unsigned long flags;
1584 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001585
1586 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001587 if (domain_type_is_vm_or_si(domain)) {
1588 ndomains = cap_ndoms(iommu->cap);
1589 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1590 if (iommu->domains[num] == domain) {
1591 clear_bit(num, iommu->domain_ids);
1592 iommu->domains[num] = NULL;
1593 break;
1594 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001595 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001596 } else {
1597 clear_bit(domain->id, iommu->domain_ids);
1598 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001599 }
Weidong Han8c11e792008-12-08 15:29:22 +08001600 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601}
1602
Jiang Liufb170fb2014-07-11 14:19:28 +08001603static void domain_attach_iommu(struct dmar_domain *domain,
1604 struct intel_iommu *iommu)
1605{
1606 unsigned long flags;
1607
1608 spin_lock_irqsave(&domain->iommu_lock, flags);
1609 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1610 domain->iommu_count++;
1611 if (domain->iommu_count == 1)
1612 domain->nid = iommu->node;
1613 domain_update_iommu_cap(domain);
1614 }
1615 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1616}
1617
1618static int domain_detach_iommu(struct dmar_domain *domain,
1619 struct intel_iommu *iommu)
1620{
1621 unsigned long flags;
1622 int count = INT_MAX;
1623
1624 spin_lock_irqsave(&domain->iommu_lock, flags);
1625 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1626 count = --domain->iommu_count;
1627 domain_update_iommu_cap(domain);
1628 }
1629 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1630
1631 return count;
1632}
1633
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001635static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636
Joseph Cihula51a63e62011-03-21 11:04:24 -07001637static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638{
1639 struct pci_dev *pdev = NULL;
1640 struct iova *iova;
1641 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642
David Millerf6611972008-02-06 01:36:23 -08001643 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644
Mark Gross8a443df2008-03-04 14:59:31 -08001645 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1646 &reserved_rbtree_key);
1647
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648 /* IOAPIC ranges shouldn't be accessed by DMA */
1649 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1650 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001651 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001653 return -ENODEV;
1654 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655
1656 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1657 for_each_pci_dev(pdev) {
1658 struct resource *r;
1659
1660 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1661 r = &pdev->resource[i];
1662 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1663 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001664 iova = reserve_iova(&reserved_iova_list,
1665 IOVA_PFN(r->start),
1666 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001667 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001668 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001669 return -ENODEV;
1670 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671 }
1672 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001673 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674}
1675
1676static void domain_reserve_special_ranges(struct dmar_domain *domain)
1677{
1678 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1679}
1680
1681static inline int guestwidth_to_adjustwidth(int gaw)
1682{
1683 int agaw;
1684 int r = (gaw - 12) % 9;
1685
1686 if (r == 0)
1687 agaw = gaw;
1688 else
1689 agaw = gaw + 9 - r;
1690 if (agaw > 64)
1691 agaw = 64;
1692 return agaw;
1693}
1694
1695static int domain_init(struct dmar_domain *domain, int guest_width)
1696{
1697 struct intel_iommu *iommu;
1698 int adjust_width, agaw;
1699 unsigned long sagaw;
1700
David Millerf6611972008-02-06 01:36:23 -08001701 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001702 domain_reserve_special_ranges(domain);
1703
1704 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001705 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706 if (guest_width > cap_mgaw(iommu->cap))
1707 guest_width = cap_mgaw(iommu->cap);
1708 domain->gaw = guest_width;
1709 adjust_width = guestwidth_to_adjustwidth(guest_width);
1710 agaw = width_to_agaw(adjust_width);
1711 sagaw = cap_sagaw(iommu->cap);
1712 if (!test_bit(agaw, &sagaw)) {
1713 /* hardware doesn't support it, choose a bigger one */
1714 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1715 agaw = find_next_bit(&sagaw, 5, agaw);
1716 if (agaw >= 5)
1717 return -ENODEV;
1718 }
1719 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001720
Weidong Han8e6040972008-12-08 15:49:06 +08001721 if (ecap_coherent(iommu->ecap))
1722 domain->iommu_coherency = 1;
1723 else
1724 domain->iommu_coherency = 0;
1725
Sheng Yang58c610b2009-03-18 15:33:05 +08001726 if (ecap_sc_support(iommu->ecap))
1727 domain->iommu_snooping = 1;
1728 else
1729 domain->iommu_snooping = 0;
1730
David Woodhouse214e39a2014-03-19 10:38:49 +00001731 if (intel_iommu_superpage)
1732 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1733 else
1734 domain->iommu_superpage = 0;
1735
Suresh Siddha4c923d42009-10-02 11:01:24 -07001736 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001737
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001738 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001739 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740 if (!domain->pgd)
1741 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001742 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743 return 0;
1744}
1745
1746static void domain_exit(struct dmar_domain *domain)
1747{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001748 struct dmar_drhd_unit *drhd;
1749 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001750 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751
1752 /* Domain 0 is reserved, so dont process it */
1753 if (!domain)
1754 return;
1755
Alex Williamson7b668352011-05-24 12:02:41 +01001756 /* Flush any lazy unmaps that may reference this domain */
1757 if (!intel_iommu_strict)
1758 flush_unmaps_timeout(0);
1759
Jiang Liu92d03cc2014-02-19 14:07:28 +08001760 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001762
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763 /* destroy iovas */
1764 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765
David Woodhouseea8ea462014-03-05 17:09:32 +00001766 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767
Jiang Liu92d03cc2014-02-19 14:07:28 +08001768 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001769 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001770 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001771 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001772 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001773
David Woodhouseea8ea462014-03-05 17:09:32 +00001774 dma_free_pagelist(freelist);
1775
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001776 free_domain_mem(domain);
1777}
1778
David Woodhouse64ae8922014-03-09 12:52:30 -07001779static int domain_context_mapping_one(struct dmar_domain *domain,
1780 struct intel_iommu *iommu,
1781 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001782{
1783 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001785 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001786 int id;
1787 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001788 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001789
1790 pr_debug("Set context mapping for %02x:%02x.%d\n",
1791 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001794 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1795 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001796
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 context = device_to_context_entry(iommu, bus, devfn);
1798 if (!context)
1799 return -ENOMEM;
1800 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001801 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802 spin_unlock_irqrestore(&iommu->lock, flags);
1803 return 0;
1804 }
1805
Weidong Hanea6606b2008-12-08 23:08:15 +08001806 id = domain->id;
1807 pgd = domain->pgd;
1808
Jiang Liuab8dfe22014-07-11 14:19:27 +08001809 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001810 if (domain_type_is_vm(domain)) {
1811 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001812 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001813 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001814 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001815 return -EFAULT;
1816 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001817 }
1818
1819 /* Skip top levels of page tables for
1820 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001821 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001822 */
Chris Wright1672af12009-12-02 12:06:34 -08001823 if (translation != CONTEXT_TT_PASS_THROUGH) {
1824 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1825 pgd = phys_to_virt(dma_pte_addr(pgd));
1826 if (!dma_pte_present(pgd)) {
1827 spin_unlock_irqrestore(&iommu->lock, flags);
1828 return -ENOMEM;
1829 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001830 }
1831 }
1832 }
1833
1834 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001835
Yu Zhao93a23a72009-05-18 13:51:37 +08001836 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001837 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001838 translation = info ? CONTEXT_TT_DEV_IOTLB :
1839 CONTEXT_TT_MULTI_LEVEL;
1840 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001841 /*
1842 * In pass through mode, AW must be programmed to indicate the largest
1843 * AGAW value supported by hardware. And ASR is ignored by hardware.
1844 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001845 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001846 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001847 else {
1848 context_set_address_root(context, virt_to_phys(pgd));
1849 context_set_address_width(context, iommu->agaw);
1850 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001851
1852 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001853 context_set_fault_enable(context);
1854 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001855 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001857 /*
1858 * It's a non-present to present mapping. If hardware doesn't cache
1859 * non-present entry we only need to flush the write-buffer. If the
1860 * _does_ cache non-present entries, then it does so in the special
1861 * domain #0, which we have to flush:
1862 */
1863 if (cap_caching_mode(iommu->cap)) {
1864 iommu->flush.flush_context(iommu, 0,
1865 (((u16)bus) << 8) | devfn,
1866 DMA_CCMD_MASK_NOBIT,
1867 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001868 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001869 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001871 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001872 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001874
Jiang Liufb170fb2014-07-11 14:19:28 +08001875 domain_attach_iommu(domain, iommu);
1876
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877 return 0;
1878}
1879
Alex Williamson579305f2014-07-03 09:51:43 -06001880struct domain_context_mapping_data {
1881 struct dmar_domain *domain;
1882 struct intel_iommu *iommu;
1883 int translation;
1884};
1885
1886static int domain_context_mapping_cb(struct pci_dev *pdev,
1887 u16 alias, void *opaque)
1888{
1889 struct domain_context_mapping_data *data = opaque;
1890
1891 return domain_context_mapping_one(data->domain, data->iommu,
1892 PCI_BUS_NUM(alias), alias & 0xff,
1893 data->translation);
1894}
1895
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001896static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001897domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1898 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899{
David Woodhouse64ae8922014-03-09 12:52:30 -07001900 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001901 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001902 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903
David Woodhousee1f167f2014-03-09 15:24:46 -07001904 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001905 if (!iommu)
1906 return -ENODEV;
1907
Alex Williamson579305f2014-07-03 09:51:43 -06001908 if (!dev_is_pci(dev))
1909 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001910 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001911
1912 data.domain = domain;
1913 data.iommu = iommu;
1914 data.translation = translation;
1915
1916 return pci_for_each_dma_alias(to_pci_dev(dev),
1917 &domain_context_mapping_cb, &data);
1918}
1919
1920static int domain_context_mapped_cb(struct pci_dev *pdev,
1921 u16 alias, void *opaque)
1922{
1923 struct intel_iommu *iommu = opaque;
1924
1925 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926}
1927
David Woodhousee1f167f2014-03-09 15:24:46 -07001928static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929{
Weidong Han5331fe62008-12-08 23:00:00 +08001930 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001931 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001932
David Woodhousee1f167f2014-03-09 15:24:46 -07001933 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001934 if (!iommu)
1935 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936
Alex Williamson579305f2014-07-03 09:51:43 -06001937 if (!dev_is_pci(dev))
1938 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001939
Alex Williamson579305f2014-07-03 09:51:43 -06001940 return !pci_for_each_dma_alias(to_pci_dev(dev),
1941 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942}
1943
Fenghua Yuf5329592009-08-04 15:09:37 -07001944/* Returns a number of VTD pages, but aligned to MM page size */
1945static inline unsigned long aligned_nrpages(unsigned long host_addr,
1946 size_t size)
1947{
1948 host_addr &= ~PAGE_MASK;
1949 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1950}
1951
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001952/* Return largest possible superpage level for a given mapping */
1953static inline int hardware_largepage_caps(struct dmar_domain *domain,
1954 unsigned long iov_pfn,
1955 unsigned long phy_pfn,
1956 unsigned long pages)
1957{
1958 int support, level = 1;
1959 unsigned long pfnmerge;
1960
1961 support = domain->iommu_superpage;
1962
1963 /* To use a large page, the virtual *and* physical addresses
1964 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1965 of them will mean we have to use smaller pages. So just
1966 merge them and check both at once. */
1967 pfnmerge = iov_pfn | phy_pfn;
1968
1969 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1970 pages >>= VTD_STRIDE_SHIFT;
1971 if (!pages)
1972 break;
1973 pfnmerge >>= VTD_STRIDE_SHIFT;
1974 level++;
1975 support--;
1976 }
1977 return level;
1978}
1979
David Woodhouse9051aa02009-06-29 12:30:54 +01001980static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1981 struct scatterlist *sg, unsigned long phys_pfn,
1982 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001983{
1984 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001985 phys_addr_t uninitialized_var(pteval);
David Woodhouse9051aa02009-06-29 12:30:54 +01001986 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 unsigned int largepage_lvl = 0;
1988 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001989
Jiang Liu162d1b12014-07-11 14:19:35 +08001990 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001991
1992 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1993 return -EINVAL;
1994
1995 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1996
David Woodhouse9051aa02009-06-29 12:30:54 +01001997 if (sg)
1998 sg_res = 0;
1999 else {
2000 sg_res = nr_pages + 1;
2001 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2002 }
2003
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002004 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002005 uint64_t tmp;
2006
David Woodhousee1605492009-06-29 11:17:38 +01002007 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002008 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002009 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2010 sg->dma_length = sg->length;
2011 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002012 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002013 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014
David Woodhousee1605492009-06-29 11:17:38 +01002015 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2017
David Woodhouse5cf0a762014-03-19 16:07:49 +00002018 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002019 if (!pte)
2020 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002021 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002022 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002023 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002024 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2025 /*
2026 * Ensure that old small page tables are
2027 * removed to make room for superpage,
2028 * if they exist.
2029 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002030 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002031 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002032 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002033 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002034 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002035
David Woodhousee1605492009-06-29 11:17:38 +01002036 }
2037 /* We don't need lock here, nobody else
2038 * touches the iova range
2039 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002040 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002041 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002042 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002043 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2044 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002045 if (dumps) {
2046 dumps--;
2047 debug_dma_dump_mappings(NULL);
2048 }
2049 WARN_ON(1);
2050 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002051
2052 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2053
2054 BUG_ON(nr_pages < lvl_pages);
2055 BUG_ON(sg_res < lvl_pages);
2056
2057 nr_pages -= lvl_pages;
2058 iov_pfn += lvl_pages;
2059 phys_pfn += lvl_pages;
2060 pteval += lvl_pages * VTD_PAGE_SIZE;
2061 sg_res -= lvl_pages;
2062
2063 /* If the next PTE would be the first in a new page, then we
2064 need to flush the cache on the entries we've just written.
2065 And then we'll need to recalculate 'pte', so clear it and
2066 let it get set again in the if (!pte) block above.
2067
2068 If we're done (!nr_pages) we need to flush the cache too.
2069
2070 Also if we've been setting superpages, we may need to
2071 recalculate 'pte' and switch back to smaller pages for the
2072 end of the mapping, if the trailing size is not enough to
2073 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002074 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002075 if (!nr_pages || first_pte_in_page(pte) ||
2076 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002077 domain_flush_cache(domain, first_pte,
2078 (void *)pte - (void *)first_pte);
2079 pte = NULL;
2080 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002081
2082 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002083 sg = sg_next(sg);
2084 }
2085 return 0;
2086}
2087
David Woodhouse9051aa02009-06-29 12:30:54 +01002088static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2089 struct scatterlist *sg, unsigned long nr_pages,
2090 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002091{
David Woodhouse9051aa02009-06-29 12:30:54 +01002092 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2093}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002094
David Woodhouse9051aa02009-06-29 12:30:54 +01002095static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2096 unsigned long phys_pfn, unsigned long nr_pages,
2097 int prot)
2098{
2099 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002100}
2101
Weidong Hanc7151a82008-12-08 22:51:37 +08002102static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002103{
Weidong Hanc7151a82008-12-08 22:51:37 +08002104 if (!iommu)
2105 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002106
2107 clear_context_table(iommu, bus, devfn);
2108 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002109 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002110 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002111}
2112
David Woodhouse109b9b02012-05-25 17:43:02 +01002113static inline void unlink_domain_info(struct device_domain_info *info)
2114{
2115 assert_spin_locked(&device_domain_lock);
2116 list_del(&info->link);
2117 list_del(&info->global);
2118 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002119 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002120}
2121
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122static void domain_remove_dev_info(struct dmar_domain *domain)
2123{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002124 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002125 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002126
2127 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002128 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002129 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130 spin_unlock_irqrestore(&device_domain_lock, flags);
2131
Yu Zhao93a23a72009-05-18 13:51:37 +08002132 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002133 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002134
Jiang Liuab8dfe22014-07-11 14:19:27 +08002135 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002136 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002137 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002138 }
2139
2140 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141 spin_lock_irqsave(&device_domain_lock, flags);
2142 }
2143 spin_unlock_irqrestore(&device_domain_lock, flags);
2144}
2145
2146/*
2147 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002148 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149 */
David Woodhouse1525a292014-03-06 16:19:30 +00002150static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151{
2152 struct device_domain_info *info;
2153
2154 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002155 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002156 if (info)
2157 return info->domain;
2158 return NULL;
2159}
2160
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002161static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002162dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2163{
2164 struct device_domain_info *info;
2165
2166 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002167 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002168 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002169 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002170
2171 return NULL;
2172}
2173
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002174static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002175 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002176 struct device *dev,
2177 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002178{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002179 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002180 struct device_domain_info *info;
2181 unsigned long flags;
2182
2183 info = alloc_devinfo_mem();
2184 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002185 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002186
Jiang Liu745f2582014-02-19 14:07:26 +08002187 info->bus = bus;
2188 info->devfn = devfn;
2189 info->dev = dev;
2190 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002191 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002192
2193 spin_lock_irqsave(&device_domain_lock, flags);
2194 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002195 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002196 else {
2197 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002198 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002199 if (info2)
2200 found = info2->domain;
2201 }
Jiang Liu745f2582014-02-19 14:07:26 +08002202 if (found) {
2203 spin_unlock_irqrestore(&device_domain_lock, flags);
2204 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002205 /* Caller must free the original domain */
2206 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002207 }
2208
David Woodhouseb718cd32014-03-09 13:11:33 -07002209 list_add(&info->link, &domain->devices);
2210 list_add(&info->global, &device_domain_list);
2211 if (dev)
2212 dev->archdata.iommu = info;
2213 spin_unlock_irqrestore(&device_domain_lock, flags);
2214
2215 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002216}
2217
Alex Williamson579305f2014-07-03 09:51:43 -06002218static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2219{
2220 *(u16 *)opaque = alias;
2221 return 0;
2222}
2223
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002224/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002225static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002226{
Alex Williamson579305f2014-07-03 09:51:43 -06002227 struct dmar_domain *domain, *tmp;
2228 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002229 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002230 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002231 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002232 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002233
David Woodhouse146922e2014-03-09 15:44:17 -07002234 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002235 if (domain)
2236 return domain;
2237
David Woodhouse146922e2014-03-09 15:44:17 -07002238 iommu = device_to_iommu(dev, &bus, &devfn);
2239 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002240 return NULL;
2241
2242 if (dev_is_pci(dev)) {
2243 struct pci_dev *pdev = to_pci_dev(dev);
2244
2245 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2246
2247 spin_lock_irqsave(&device_domain_lock, flags);
2248 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2249 PCI_BUS_NUM(dma_alias),
2250 dma_alias & 0xff);
2251 if (info) {
2252 iommu = info->iommu;
2253 domain = info->domain;
2254 }
2255 spin_unlock_irqrestore(&device_domain_lock, flags);
2256
2257 /* DMA alias already has a domain, uses it */
2258 if (info)
2259 goto found_domain;
2260 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002261
David Woodhouse146922e2014-03-09 15:44:17 -07002262 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002263 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002264 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002265 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002266 domain->id = iommu_attach_domain(domain, iommu);
2267 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002268 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002269 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002270 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002271 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002272 if (domain_init(domain, gaw)) {
2273 domain_exit(domain);
2274 return NULL;
2275 }
2276
2277 /* register PCI DMA alias device */
2278 if (dev_is_pci(dev)) {
2279 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2280 dma_alias & 0xff, NULL, domain);
2281
2282 if (!tmp || tmp != domain) {
2283 domain_exit(domain);
2284 domain = tmp;
2285 }
2286
David Woodhouseb718cd32014-03-09 13:11:33 -07002287 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002288 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002289 }
2290
2291found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002292 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2293
2294 if (!tmp || tmp != domain) {
2295 domain_exit(domain);
2296 domain = tmp;
2297 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002298
2299 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002300}
2301
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002302static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002303#define IDENTMAP_ALL 1
2304#define IDENTMAP_GFX 2
2305#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002306
David Woodhouseb2132032009-06-26 18:50:28 +01002307static int iommu_domain_identity_map(struct dmar_domain *domain,
2308 unsigned long long start,
2309 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002310{
David Woodhousec5395d52009-06-28 16:35:56 +01002311 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2312 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002313
David Woodhousec5395d52009-06-28 16:35:56 +01002314 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2315 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002316 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002317 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002318 }
2319
David Woodhousec5395d52009-06-28 16:35:56 +01002320 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2321 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002322 /*
2323 * RMRR range might have overlap with physical memory range,
2324 * clear it first
2325 */
David Woodhousec5395d52009-06-28 16:35:56 +01002326 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002327
David Woodhousec5395d52009-06-28 16:35:56 +01002328 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2329 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002330 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002331}
2332
David Woodhouse0b9d9752014-03-09 15:48:15 -07002333static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002334 unsigned long long start,
2335 unsigned long long end)
2336{
2337 struct dmar_domain *domain;
2338 int ret;
2339
David Woodhouse0b9d9752014-03-09 15:48:15 -07002340 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002341 if (!domain)
2342 return -ENOMEM;
2343
David Woodhouse19943b02009-08-04 16:19:20 +01002344 /* For _hardware_ passthrough, don't bother. But for software
2345 passthrough, we do it anyway -- it may indicate a memory
2346 range which is reserved in E820, so which didn't get set
2347 up to start with in si_domain */
2348 if (domain == si_domain && hw_pass_through) {
2349 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002350 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002351 return 0;
2352 }
2353
2354 printk(KERN_INFO
2355 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002356 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002357
David Woodhouse5595b522009-12-02 09:21:55 +00002358 if (end < start) {
2359 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2360 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2361 dmi_get_system_info(DMI_BIOS_VENDOR),
2362 dmi_get_system_info(DMI_BIOS_VERSION),
2363 dmi_get_system_info(DMI_PRODUCT_VERSION));
2364 ret = -EIO;
2365 goto error;
2366 }
2367
David Woodhouse2ff729f2009-08-26 14:25:41 +01002368 if (end >> agaw_to_width(domain->agaw)) {
2369 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2370 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2371 agaw_to_width(domain->agaw),
2372 dmi_get_system_info(DMI_BIOS_VENDOR),
2373 dmi_get_system_info(DMI_BIOS_VERSION),
2374 dmi_get_system_info(DMI_PRODUCT_VERSION));
2375 ret = -EIO;
2376 goto error;
2377 }
David Woodhouse19943b02009-08-04 16:19:20 +01002378
David Woodhouseb2132032009-06-26 18:50:28 +01002379 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380 if (ret)
2381 goto error;
2382
2383 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002384 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002385 if (ret)
2386 goto error;
2387
2388 return 0;
2389
2390 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002391 domain_exit(domain);
2392 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393}
2394
2395static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002396 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002398 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002400 return iommu_prepare_identity_map(dev, rmrr->base_address,
2401 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002402}
2403
Suresh Siddhad3f13812011-08-23 17:05:25 -07002404#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002405static inline void iommu_prepare_isa(void)
2406{
2407 struct pci_dev *pdev;
2408 int ret;
2409
2410 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2411 if (!pdev)
2412 return;
2413
David Woodhousec7ab48d2009-06-26 19:10:36 +01002414 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002415 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002416
2417 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002418 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2419 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002420
Yijing Wang9b27e822014-05-20 20:37:52 +08002421 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002422}
2423#else
2424static inline void iommu_prepare_isa(void)
2425{
2426 return;
2427}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002428#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002429
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002430static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002431
Matt Kraai071e1372009-08-23 22:30:22 -07002432static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433{
2434 struct dmar_drhd_unit *drhd;
2435 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002436 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002437 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002438
Jiang Liuab8dfe22014-07-11 14:19:27 +08002439 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002440 if (!si_domain)
2441 return -EFAULT;
2442
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443 for_each_active_iommu(iommu, drhd) {
2444 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002445 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002446 domain_exit(si_domain);
2447 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002448 } else if (first) {
2449 si_domain->id = ret;
2450 first = false;
2451 } else if (si_domain->id != ret) {
2452 domain_exit(si_domain);
2453 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002454 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002455 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002456 }
2457
2458 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2459 domain_exit(si_domain);
2460 return -EFAULT;
2461 }
2462
Jiang Liu9544c002014-01-06 14:18:13 +08002463 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2464 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002465
David Woodhouse19943b02009-08-04 16:19:20 +01002466 if (hw)
2467 return 0;
2468
David Woodhousec7ab48d2009-06-26 19:10:36 +01002469 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002470 unsigned long start_pfn, end_pfn;
2471 int i;
2472
2473 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2474 ret = iommu_domain_identity_map(si_domain,
2475 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2476 if (ret)
2477 return ret;
2478 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002479 }
2480
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002481 return 0;
2482}
2483
David Woodhouse9b226622014-03-09 14:03:28 -07002484static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002485{
2486 struct device_domain_info *info;
2487
2488 if (likely(!iommu_identity_mapping))
2489 return 0;
2490
David Woodhouse9b226622014-03-09 14:03:28 -07002491 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002492 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2493 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495 return 0;
2496}
2497
2498static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002499 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002500{
David Woodhouse0ac72662014-03-09 13:19:22 -07002501 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002502 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002503 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002504 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002505
David Woodhouse5913c9b2014-03-09 16:27:31 -07002506 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002507 if (!iommu)
2508 return -ENODEV;
2509
David Woodhouse5913c9b2014-03-09 16:27:31 -07002510 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002511 if (ndomain != domain)
2512 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002513
David Woodhouse5913c9b2014-03-09 16:27:31 -07002514 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002515 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002516 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002517 return ret;
2518 }
2519
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002520 return 0;
2521}
2522
David Woodhouse0b9d9752014-03-09 15:48:15 -07002523static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002524{
2525 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002526 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002527 int i;
2528
Jiang Liu0e242612014-02-19 14:07:34 +08002529 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002530 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002531 /*
2532 * Return TRUE if this RMRR contains the device that
2533 * is passed in.
2534 */
2535 for_each_active_dev_scope(rmrr->devices,
2536 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002537 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002538 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002539 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002540 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002541 }
Jiang Liu0e242612014-02-19 14:07:34 +08002542 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002543 return false;
2544}
2545
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002546/*
2547 * There are a couple cases where we need to restrict the functionality of
2548 * devices associated with RMRRs. The first is when evaluating a device for
2549 * identity mapping because problems exist when devices are moved in and out
2550 * of domains and their respective RMRR information is lost. This means that
2551 * a device with associated RMRRs will never be in a "passthrough" domain.
2552 * The second is use of the device through the IOMMU API. This interface
2553 * expects to have full control of the IOVA space for the device. We cannot
2554 * satisfy both the requirement that RMRR access is maintained and have an
2555 * unencumbered IOVA space. We also have no ability to quiesce the device's
2556 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2557 * We therefore prevent devices associated with an RMRR from participating in
2558 * the IOMMU API, which eliminates them from device assignment.
2559 *
2560 * In both cases we assume that PCI USB devices with RMRRs have them largely
2561 * for historical reasons and that the RMRR space is not actively used post
2562 * boot. This exclusion may change if vendors begin to abuse it.
2563 */
2564static bool device_is_rmrr_locked(struct device *dev)
2565{
2566 if (!device_has_rmrr(dev))
2567 return false;
2568
2569 if (dev_is_pci(dev)) {
2570 struct pci_dev *pdev = to_pci_dev(dev);
2571
2572 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2573 return false;
2574 }
2575
2576 return true;
2577}
2578
David Woodhouse3bdb2592014-03-09 16:03:08 -07002579static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002580{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002581
David Woodhouse3bdb2592014-03-09 16:03:08 -07002582 if (dev_is_pci(dev)) {
2583 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002584
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002585 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002586 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002587
David Woodhouse3bdb2592014-03-09 16:03:08 -07002588 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2589 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002590
David Woodhouse3bdb2592014-03-09 16:03:08 -07002591 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2592 return 1;
2593
2594 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2595 return 0;
2596
2597 /*
2598 * We want to start off with all devices in the 1:1 domain, and
2599 * take them out later if we find they can't access all of memory.
2600 *
2601 * However, we can't do this for PCI devices behind bridges,
2602 * because all PCI devices behind the same bridge will end up
2603 * with the same source-id on their transactions.
2604 *
2605 * Practically speaking, we can't change things around for these
2606 * devices at run-time, because we can't be sure there'll be no
2607 * DMA transactions in flight for any of their siblings.
2608 *
2609 * So PCI devices (unless they're on the root bus) as well as
2610 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2611 * the 1:1 domain, just in _case_ one of their siblings turns out
2612 * not to be able to map all of memory.
2613 */
2614 if (!pci_is_pcie(pdev)) {
2615 if (!pci_is_root_bus(pdev->bus))
2616 return 0;
2617 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2618 return 0;
2619 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2620 return 0;
2621 } else {
2622 if (device_has_rmrr(dev))
2623 return 0;
2624 }
David Woodhouse6941af22009-07-04 18:24:27 +01002625
David Woodhouse3dfc8132009-07-04 19:11:08 +01002626 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002627 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002628 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002629 * take them out of the 1:1 domain later.
2630 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002631 if (!startup) {
2632 /*
2633 * If the device's dma_mask is less than the system's memory
2634 * size then this is not a candidate for identity mapping.
2635 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002636 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002637
David Woodhouse3bdb2592014-03-09 16:03:08 -07002638 if (dev->coherent_dma_mask &&
2639 dev->coherent_dma_mask < dma_mask)
2640 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002641
David Woodhouse3bdb2592014-03-09 16:03:08 -07002642 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002643 }
David Woodhouse6941af22009-07-04 18:24:27 +01002644
2645 return 1;
2646}
2647
David Woodhousecf04eee2014-03-21 16:49:04 +00002648static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2649{
2650 int ret;
2651
2652 if (!iommu_should_identity_map(dev, 1))
2653 return 0;
2654
2655 ret = domain_add_dev_info(si_domain, dev,
2656 hw ? CONTEXT_TT_PASS_THROUGH :
2657 CONTEXT_TT_MULTI_LEVEL);
2658 if (!ret)
2659 pr_info("IOMMU: %s identity mapping for device %s\n",
2660 hw ? "hardware" : "software", dev_name(dev));
2661 else if (ret == -ENODEV)
2662 /* device not associated with an iommu */
2663 ret = 0;
2664
2665 return ret;
2666}
2667
2668
Matt Kraai071e1372009-08-23 22:30:22 -07002669static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002670{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002671 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002672 struct dmar_drhd_unit *drhd;
2673 struct intel_iommu *iommu;
2674 struct device *dev;
2675 int i;
2676 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002677
David Woodhouse19943b02009-08-04 16:19:20 +01002678 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002679 if (ret)
2680 return -EFAULT;
2681
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002682 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002683 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2684 if (ret)
2685 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002686 }
2687
David Woodhousecf04eee2014-03-21 16:49:04 +00002688 for_each_active_iommu(iommu, drhd)
2689 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2690 struct acpi_device_physical_node *pn;
2691 struct acpi_device *adev;
2692
2693 if (dev->bus != &acpi_bus_type)
2694 continue;
2695
2696 adev= to_acpi_device(dev);
2697 mutex_lock(&adev->physical_node_lock);
2698 list_for_each_entry(pn, &adev->physical_node_list, node) {
2699 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2700 if (ret)
2701 break;
2702 }
2703 mutex_unlock(&adev->physical_node_lock);
2704 if (ret)
2705 return ret;
2706 }
2707
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002708 return 0;
2709}
2710
Joseph Cihulab7792602011-05-03 00:08:37 -07002711static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002712{
2713 struct dmar_drhd_unit *drhd;
2714 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002715 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002717 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002718
2719 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002720 * for each drhd
2721 * allocate root
2722 * initialize and program root entry to not present
2723 * endfor
2724 */
2725 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002726 /*
2727 * lock not needed as this is only incremented in the single
2728 * threaded kernel __init code path all other access are read
2729 * only
2730 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002731 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2732 g_num_of_iommus++;
2733 continue;
2734 }
2735 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2736 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002737 }
2738
Weidong Hand9630fe2008-12-08 11:06:32 +08002739 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2740 GFP_KERNEL);
2741 if (!g_iommus) {
2742 printk(KERN_ERR "Allocating global iommu array failed\n");
2743 ret = -ENOMEM;
2744 goto error;
2745 }
2746
mark gross80b20dd2008-04-18 13:53:58 -07002747 deferred_flush = kzalloc(g_num_of_iommus *
2748 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2749 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002750 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002751 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002752 }
2753
Jiang Liu7c919772014-01-06 14:18:18 +08002754 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002755 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002756
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002757 ret = iommu_init_domains(iommu);
2758 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002759 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002760
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761 /*
2762 * TBD:
2763 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002764 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002765 */
2766 ret = iommu_alloc_root_entry(iommu);
2767 if (ret) {
2768 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002769 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002770 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002771 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002772 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002773 }
2774
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002775 /*
2776 * Start from the sane iommu hardware state.
2777 */
Jiang Liu7c919772014-01-06 14:18:18 +08002778 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002779 /*
2780 * If the queued invalidation is already initialized by us
2781 * (for example, while enabling interrupt-remapping) then
2782 * we got the things already rolling from a sane state.
2783 */
2784 if (iommu->qi)
2785 continue;
2786
2787 /*
2788 * Clear any previous faults.
2789 */
2790 dmar_fault(-1, iommu);
2791 /*
2792 * Disable queued invalidation if supported and already enabled
2793 * before OS handover.
2794 */
2795 dmar_disable_qi(iommu);
2796 }
2797
Jiang Liu7c919772014-01-06 14:18:18 +08002798 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002799 if (dmar_enable_qi(iommu)) {
2800 /*
2801 * Queued Invalidate not enabled, use Register Based
2802 * Invalidate
2803 */
2804 iommu->flush.flush_context = __iommu_flush_context;
2805 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002806 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002807 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002808 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002809 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002810 } else {
2811 iommu->flush.flush_context = qi_flush_context;
2812 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002813 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002814 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002815 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002816 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002817 }
2818 }
2819
David Woodhouse19943b02009-08-04 16:19:20 +01002820 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002821 iommu_identity_mapping |= IDENTMAP_ALL;
2822
Suresh Siddhad3f13812011-08-23 17:05:25 -07002823#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002824 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002825#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002826
2827 check_tylersburg_isoch();
2828
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002829 /*
2830 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002831 * identity mappings for rmrr, gfx, and isa and may fall back to static
2832 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002833 */
David Woodhouse19943b02009-08-04 16:19:20 +01002834 if (iommu_identity_mapping) {
2835 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2836 if (ret) {
2837 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002838 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002839 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002840 }
David Woodhouse19943b02009-08-04 16:19:20 +01002841 /*
2842 * For each rmrr
2843 * for each dev attached to rmrr
2844 * do
2845 * locate drhd for dev, alloc domain for dev
2846 * allocate free domain
2847 * allocate page table entries for rmrr
2848 * if context not allocated for bus
2849 * allocate and init context
2850 * set present in root table for this bus
2851 * init context with domain, translation etc
2852 * endfor
2853 * endfor
2854 */
2855 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2856 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002857 /* some BIOS lists non-exist devices in DMAR table. */
2858 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002859 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002860 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002861 if (ret)
2862 printk(KERN_ERR
2863 "IOMMU: mapping reserved region failed\n");
2864 }
2865 }
2866
2867 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002868
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002869 /*
2870 * for each drhd
2871 * enable fault log
2872 * global invalidate context cache
2873 * global invalidate iotlb
2874 * enable translation
2875 */
Jiang Liu7c919772014-01-06 14:18:18 +08002876 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002877 if (drhd->ignored) {
2878 /*
2879 * we always have to disable PMRs or DMA may fail on
2880 * this device
2881 */
2882 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002883 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002884 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002885 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002886
2887 iommu_flush_write_buffer(iommu);
2888
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002889 ret = dmar_set_interrupt(iommu);
2890 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002891 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002892
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002893 iommu_set_root_entry(iommu);
2894
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002895 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002896 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002897 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002898 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002899 }
2900
2901 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002902
2903free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002904 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002905 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002906 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002907free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002908 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002909error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002910 return ret;
2911}
2912
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002913/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002914static struct iova *intel_alloc_iova(struct device *dev,
2915 struct dmar_domain *domain,
2916 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002917{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002918 struct iova *iova = NULL;
2919
David Woodhouse875764d2009-06-28 21:20:51 +01002920 /* Restrict dma_mask to the width that the iommu can handle */
2921 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2922
2923 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002924 /*
2925 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002926 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002927 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002928 */
David Woodhouse875764d2009-06-28 21:20:51 +01002929 iova = alloc_iova(&domain->iovad, nrpages,
2930 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2931 if (iova)
2932 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002933 }
David Woodhouse875764d2009-06-28 21:20:51 +01002934 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2935 if (unlikely(!iova)) {
2936 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002937 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002938 return NULL;
2939 }
2940
2941 return iova;
2942}
2943
David Woodhoused4b709f2014-03-09 16:07:40 -07002944static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002945{
2946 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002947 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948
David Woodhoused4b709f2014-03-09 16:07:40 -07002949 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002950 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002951 printk(KERN_ERR "Allocating domain for %s failed",
2952 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002953 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002954 }
2955
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002957 if (unlikely(!domain_context_mapped(dev))) {
2958 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002959 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002960 printk(KERN_ERR "Domain context map for %s failed",
2961 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002962 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002963 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964 }
2965
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002966 return domain;
2967}
2968
David Woodhoused4b709f2014-03-09 16:07:40 -07002969static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002970{
2971 struct device_domain_info *info;
2972
2973 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002974 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002975 if (likely(info))
2976 return info->domain;
2977
2978 return __get_valid_domain_for_dev(dev);
2979}
2980
David Woodhouse3d891942014-03-06 15:59:26 +00002981static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002982{
David Woodhouse3d891942014-03-06 15:59:26 +00002983 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002984}
2985
David Woodhouseecb509e2014-03-09 16:29:55 -07002986/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002987static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002988{
2989 int found;
2990
David Woodhouse3d891942014-03-06 15:59:26 +00002991 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002992 return 1;
2993
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002994 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002995 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002996
David Woodhouse9b226622014-03-09 14:03:28 -07002997 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002998 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002999 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003000 return 1;
3001 else {
3002 /*
3003 * 32 bit DMA is removed from si_domain and fall back
3004 * to non-identity mapping.
3005 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003006 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003007 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003008 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003009 return 0;
3010 }
3011 } else {
3012 /*
3013 * In case of a detached 64 bit DMA device from vm, the device
3014 * is put into si_domain for identity mapping.
3015 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003016 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003017 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003018 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003019 hw_pass_through ?
3020 CONTEXT_TT_PASS_THROUGH :
3021 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003022 if (!ret) {
3023 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003024 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003025 return 1;
3026 }
3027 }
3028 }
3029
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003030 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003031}
3032
David Woodhouse5040a912014-03-09 16:14:00 -07003033static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003034 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003035{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003036 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003037 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038 struct iova *iova;
3039 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003040 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003041 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003042 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003043
3044 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003045
David Woodhouse5040a912014-03-09 16:14:00 -07003046 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003047 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003048
David Woodhouse5040a912014-03-09 16:14:00 -07003049 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003050 if (!domain)
3051 return 0;
3052
Weidong Han8c11e792008-12-08 15:29:22 +08003053 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003054 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003055
David Woodhouse5040a912014-03-09 16:14:00 -07003056 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003057 if (!iova)
3058 goto error;
3059
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003060 /*
3061 * Check if DMAR supports zero-length reads on write only
3062 * mappings..
3063 */
3064 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003065 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003066 prot |= DMA_PTE_READ;
3067 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3068 prot |= DMA_PTE_WRITE;
3069 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003070 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003071 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003072 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003073 * is not a big problem
3074 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003075 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003076 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003077 if (ret)
3078 goto error;
3079
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003080 /* it's a non-present to present mapping. Only flush if caching mode */
3081 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003082 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003083 else
Weidong Han8c11e792008-12-08 15:29:22 +08003084 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003085
David Woodhouse03d6a242009-06-28 15:33:46 +01003086 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3087 start_paddr += paddr & ~PAGE_MASK;
3088 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003089
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003090error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003091 if (iova)
3092 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003093 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003094 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003095 return 0;
3096}
3097
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003098static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3099 unsigned long offset, size_t size,
3100 enum dma_data_direction dir,
3101 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003102{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003103 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003104 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003105}
3106
mark gross5e0d2a62008-03-04 15:22:08 -08003107static void flush_unmaps(void)
3108{
mark gross80b20dd2008-04-18 13:53:58 -07003109 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003110
mark gross5e0d2a62008-03-04 15:22:08 -08003111 timer_on = 0;
3112
3113 /* just flush them all */
3114 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003115 struct intel_iommu *iommu = g_iommus[i];
3116 if (!iommu)
3117 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003118
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003119 if (!deferred_flush[i].next)
3120 continue;
3121
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003122 /* In caching mode, global flushes turn emulation expensive */
3123 if (!cap_caching_mode(iommu->cap))
3124 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003125 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003126 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003127 unsigned long mask;
3128 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003129 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003130
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003131 /* On real hardware multiple invalidations are expensive */
3132 if (cap_caching_mode(iommu->cap))
3133 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003134 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003135 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003136 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003137 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003138 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3139 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3140 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003141 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003142 if (deferred_flush[i].freelist[j])
3143 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003144 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003145 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003146 }
3147
mark gross5e0d2a62008-03-04 15:22:08 -08003148 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003149}
3150
3151static void flush_unmaps_timeout(unsigned long data)
3152{
mark gross80b20dd2008-04-18 13:53:58 -07003153 unsigned long flags;
3154
3155 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003156 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003157 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003158}
3159
David Woodhouseea8ea462014-03-05 17:09:32 +00003160static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003161{
3162 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003163 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003164 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003165
3166 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003167 if (list_size == HIGH_WATER_MARK)
3168 flush_unmaps();
3169
Weidong Han8c11e792008-12-08 15:29:22 +08003170 iommu = domain_get_iommu(dom);
3171 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003172
mark gross80b20dd2008-04-18 13:53:58 -07003173 next = deferred_flush[iommu_id].next;
3174 deferred_flush[iommu_id].domain[next] = dom;
3175 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003176 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003177 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003178
3179 if (!timer_on) {
3180 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3181 timer_on = 1;
3182 }
3183 list_size++;
3184 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3185}
3186
Jiang Liud41a4ad2014-07-11 14:19:34 +08003187static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003189 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003190 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003191 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003192 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003193 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003194
David Woodhouse73676832009-07-04 14:08:36 +01003195 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003196 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003197
David Woodhouse1525a292014-03-06 16:19:30 +00003198 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003199 BUG_ON(!domain);
3200
Weidong Han8c11e792008-12-08 15:29:22 +08003201 iommu = domain_get_iommu(domain);
3202
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003203 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003204 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3205 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003206 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003207
David Woodhoused794dc92009-06-28 00:27:49 +01003208 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3209 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003210
David Woodhoused794dc92009-06-28 00:27:49 +01003211 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003212 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003213
David Woodhouseea8ea462014-03-05 17:09:32 +00003214 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003215
mark gross5e0d2a62008-03-04 15:22:08 -08003216 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003217 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003218 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003219 /* free iova */
3220 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003221 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003222 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003223 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003224 /*
3225 * queue up the release of the unmap to save the 1/6th of the
3226 * cpu used up by the iotlb flush operation...
3227 */
mark gross5e0d2a62008-03-04 15:22:08 -08003228 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003229}
3230
Jiang Liud41a4ad2014-07-11 14:19:34 +08003231static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3232 size_t size, enum dma_data_direction dir,
3233 struct dma_attrs *attrs)
3234{
3235 intel_unmap(dev, dev_addr);
3236}
3237
David Woodhouse5040a912014-03-09 16:14:00 -07003238static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003239 dma_addr_t *dma_handle, gfp_t flags,
3240 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003241{
Akinobu Mita36746432014-06-04 16:06:51 -07003242 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003243 int order;
3244
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003245 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003246 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003247
David Woodhouse5040a912014-03-09 16:14:00 -07003248 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003249 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003250 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3251 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003252 flags |= GFP_DMA;
3253 else
3254 flags |= GFP_DMA32;
3255 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003256
Akinobu Mita36746432014-06-04 16:06:51 -07003257 if (flags & __GFP_WAIT) {
3258 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003259
Akinobu Mita36746432014-06-04 16:06:51 -07003260 page = dma_alloc_from_contiguous(dev, count, order);
3261 if (page && iommu_no_mapping(dev) &&
3262 page_to_phys(page) + size > dev->coherent_dma_mask) {
3263 dma_release_from_contiguous(dev, page, count);
3264 page = NULL;
3265 }
3266 }
3267
3268 if (!page)
3269 page = alloc_pages(flags, order);
3270 if (!page)
3271 return NULL;
3272 memset(page_address(page), 0, size);
3273
3274 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003275 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003276 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003277 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003278 return page_address(page);
3279 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3280 __free_pages(page, order);
3281
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003282 return NULL;
3283}
3284
David Woodhouse5040a912014-03-09 16:14:00 -07003285static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003286 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003287{
3288 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003289 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003290
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003291 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003292 order = get_order(size);
3293
Jiang Liud41a4ad2014-07-11 14:19:34 +08003294 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003295 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3296 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003297}
3298
David Woodhouse5040a912014-03-09 16:14:00 -07003299static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003300 int nelems, enum dma_data_direction dir,
3301 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003303 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003304}
3305
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003306static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003307 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308{
3309 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003310 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003311
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003312 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003313 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003314 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003315 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316 }
3317 return nelems;
3318}
3319
David Woodhouse5040a912014-03-09 16:14:00 -07003320static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003321 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003322{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003324 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003325 size_t size = 0;
3326 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003327 struct iova *iova = NULL;
3328 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003329 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003330 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003331 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003332
3333 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003334 if (iommu_no_mapping(dev))
3335 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003336
David Woodhouse5040a912014-03-09 16:14:00 -07003337 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003338 if (!domain)
3339 return 0;
3340
Weidong Han8c11e792008-12-08 15:29:22 +08003341 iommu = domain_get_iommu(domain);
3342
David Woodhouseb536d242009-06-28 14:49:31 +01003343 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003344 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003345
David Woodhouse5040a912014-03-09 16:14:00 -07003346 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3347 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003348 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003349 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003350 return 0;
3351 }
3352
3353 /*
3354 * Check if DMAR supports zero-length reads on write only
3355 * mappings..
3356 */
3357 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003358 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003359 prot |= DMA_PTE_READ;
3360 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3361 prot |= DMA_PTE_WRITE;
3362
David Woodhouseb536d242009-06-28 14:49:31 +01003363 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003364
Fenghua Yuf5329592009-08-04 15:09:37 -07003365 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003366 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003367 dma_pte_free_pagetable(domain, start_vpfn,
3368 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003369 __free_iova(&domain->iovad, iova);
3370 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003371 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003372
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003373 /* it's a non-present to present mapping. Only flush if caching mode */
3374 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003375 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003376 else
Weidong Han8c11e792008-12-08 15:29:22 +08003377 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003378
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003379 return nelems;
3380}
3381
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003382static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3383{
3384 return !dma_addr;
3385}
3386
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003387struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003388 .alloc = intel_alloc_coherent,
3389 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003390 .map_sg = intel_map_sg,
3391 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003392 .map_page = intel_map_page,
3393 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003394 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003395};
3396
3397static inline int iommu_domain_cache_init(void)
3398{
3399 int ret = 0;
3400
3401 iommu_domain_cache = kmem_cache_create("iommu_domain",
3402 sizeof(struct dmar_domain),
3403 0,
3404 SLAB_HWCACHE_ALIGN,
3405
3406 NULL);
3407 if (!iommu_domain_cache) {
3408 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3409 ret = -ENOMEM;
3410 }
3411
3412 return ret;
3413}
3414
3415static inline int iommu_devinfo_cache_init(void)
3416{
3417 int ret = 0;
3418
3419 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3420 sizeof(struct device_domain_info),
3421 0,
3422 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003423 NULL);
3424 if (!iommu_devinfo_cache) {
3425 printk(KERN_ERR "Couldn't create devinfo cache\n");
3426 ret = -ENOMEM;
3427 }
3428
3429 return ret;
3430}
3431
3432static inline int iommu_iova_cache_init(void)
3433{
3434 int ret = 0;
3435
3436 iommu_iova_cache = kmem_cache_create("iommu_iova",
3437 sizeof(struct iova),
3438 0,
3439 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003440 NULL);
3441 if (!iommu_iova_cache) {
3442 printk(KERN_ERR "Couldn't create iova cache\n");
3443 ret = -ENOMEM;
3444 }
3445
3446 return ret;
3447}
3448
3449static int __init iommu_init_mempool(void)
3450{
3451 int ret;
3452 ret = iommu_iova_cache_init();
3453 if (ret)
3454 return ret;
3455
3456 ret = iommu_domain_cache_init();
3457 if (ret)
3458 goto domain_error;
3459
3460 ret = iommu_devinfo_cache_init();
3461 if (!ret)
3462 return ret;
3463
3464 kmem_cache_destroy(iommu_domain_cache);
3465domain_error:
3466 kmem_cache_destroy(iommu_iova_cache);
3467
3468 return -ENOMEM;
3469}
3470
3471static void __init iommu_exit_mempool(void)
3472{
3473 kmem_cache_destroy(iommu_devinfo_cache);
3474 kmem_cache_destroy(iommu_domain_cache);
3475 kmem_cache_destroy(iommu_iova_cache);
3476
3477}
3478
Dan Williams556ab452010-07-23 15:47:56 -07003479static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3480{
3481 struct dmar_drhd_unit *drhd;
3482 u32 vtbar;
3483 int rc;
3484
3485 /* We know that this device on this chipset has its own IOMMU.
3486 * If we find it under a different IOMMU, then the BIOS is lying
3487 * to us. Hope that the IOMMU for this device is actually
3488 * disabled, and it needs no translation...
3489 */
3490 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3491 if (rc) {
3492 /* "can't" happen */
3493 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3494 return;
3495 }
3496 vtbar &= 0xffff0000;
3497
3498 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3499 drhd = dmar_find_matched_drhd_unit(pdev);
3500 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3501 TAINT_FIRMWARE_WORKAROUND,
3502 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3503 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3504}
3505DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3506
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003507static void __init init_no_remapping_devices(void)
3508{
3509 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003510 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003511 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003512
3513 for_each_drhd_unit(drhd) {
3514 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003515 for_each_active_dev_scope(drhd->devices,
3516 drhd->devices_cnt, i, dev)
3517 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003518 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003519 if (i == drhd->devices_cnt)
3520 drhd->ignored = 1;
3521 }
3522 }
3523
Jiang Liu7c919772014-01-06 14:18:18 +08003524 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003525 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003526 continue;
3527
Jiang Liub683b232014-02-19 14:07:32 +08003528 for_each_active_dev_scope(drhd->devices,
3529 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003530 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003531 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003532 if (i < drhd->devices_cnt)
3533 continue;
3534
David Woodhousec0771df2011-10-14 20:59:46 +01003535 /* This IOMMU has *only* gfx devices. Either bypass it or
3536 set the gfx_mapped flag, as appropriate */
3537 if (dmar_map_gfx) {
3538 intel_iommu_gfx_mapped = 1;
3539 } else {
3540 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003541 for_each_active_dev_scope(drhd->devices,
3542 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003543 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003544 }
3545 }
3546}
3547
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003548#ifdef CONFIG_SUSPEND
3549static int init_iommu_hw(void)
3550{
3551 struct dmar_drhd_unit *drhd;
3552 struct intel_iommu *iommu = NULL;
3553
3554 for_each_active_iommu(iommu, drhd)
3555 if (iommu->qi)
3556 dmar_reenable_qi(iommu);
3557
Joseph Cihulab7792602011-05-03 00:08:37 -07003558 for_each_iommu(iommu, drhd) {
3559 if (drhd->ignored) {
3560 /*
3561 * we always have to disable PMRs or DMA may fail on
3562 * this device
3563 */
3564 if (force_on)
3565 iommu_disable_protect_mem_regions(iommu);
3566 continue;
3567 }
3568
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003569 iommu_flush_write_buffer(iommu);
3570
3571 iommu_set_root_entry(iommu);
3572
3573 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003574 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003575 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3576 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003577 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003578 }
3579
3580 return 0;
3581}
3582
3583static void iommu_flush_all(void)
3584{
3585 struct dmar_drhd_unit *drhd;
3586 struct intel_iommu *iommu;
3587
3588 for_each_active_iommu(iommu, drhd) {
3589 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003590 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003591 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003592 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003593 }
3594}
3595
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003596static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003597{
3598 struct dmar_drhd_unit *drhd;
3599 struct intel_iommu *iommu = NULL;
3600 unsigned long flag;
3601
3602 for_each_active_iommu(iommu, drhd) {
3603 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3604 GFP_ATOMIC);
3605 if (!iommu->iommu_state)
3606 goto nomem;
3607 }
3608
3609 iommu_flush_all();
3610
3611 for_each_active_iommu(iommu, drhd) {
3612 iommu_disable_translation(iommu);
3613
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003614 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003615
3616 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3617 readl(iommu->reg + DMAR_FECTL_REG);
3618 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3619 readl(iommu->reg + DMAR_FEDATA_REG);
3620 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3621 readl(iommu->reg + DMAR_FEADDR_REG);
3622 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3623 readl(iommu->reg + DMAR_FEUADDR_REG);
3624
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003625 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003626 }
3627 return 0;
3628
3629nomem:
3630 for_each_active_iommu(iommu, drhd)
3631 kfree(iommu->iommu_state);
3632
3633 return -ENOMEM;
3634}
3635
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003636static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003637{
3638 struct dmar_drhd_unit *drhd;
3639 struct intel_iommu *iommu = NULL;
3640 unsigned long flag;
3641
3642 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003643 if (force_on)
3644 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3645 else
3646 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003647 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003648 }
3649
3650 for_each_active_iommu(iommu, drhd) {
3651
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003652 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003653
3654 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3655 iommu->reg + DMAR_FECTL_REG);
3656 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3657 iommu->reg + DMAR_FEDATA_REG);
3658 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3659 iommu->reg + DMAR_FEADDR_REG);
3660 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3661 iommu->reg + DMAR_FEUADDR_REG);
3662
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003663 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003664 }
3665
3666 for_each_active_iommu(iommu, drhd)
3667 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003668}
3669
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003670static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003671 .resume = iommu_resume,
3672 .suspend = iommu_suspend,
3673};
3674
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003675static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003676{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003677 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003678}
3679
3680#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003681static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003682#endif /* CONFIG_PM */
3683
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003684
3685int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3686{
3687 struct acpi_dmar_reserved_memory *rmrr;
3688 struct dmar_rmrr_unit *rmrru;
3689
3690 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3691 if (!rmrru)
3692 return -ENOMEM;
3693
3694 rmrru->hdr = header;
3695 rmrr = (struct acpi_dmar_reserved_memory *)header;
3696 rmrru->base_address = rmrr->base_address;
3697 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003698 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3699 ((void *)rmrr) + rmrr->header.length,
3700 &rmrru->devices_cnt);
3701 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3702 kfree(rmrru);
3703 return -ENOMEM;
3704 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003705
Jiang Liu2e455282014-02-19 14:07:36 +08003706 list_add(&rmrru->list, &dmar_rmrr_units);
3707
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003708 return 0;
3709}
3710
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003711int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3712{
3713 struct acpi_dmar_atsr *atsr;
3714 struct dmar_atsr_unit *atsru;
3715
3716 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3717 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3718 if (!atsru)
3719 return -ENOMEM;
3720
3721 atsru->hdr = hdr;
3722 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003723 if (!atsru->include_all) {
3724 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3725 (void *)atsr + atsr->header.length,
3726 &atsru->devices_cnt);
3727 if (atsru->devices_cnt && atsru->devices == NULL) {
3728 kfree(atsru);
3729 return -ENOMEM;
3730 }
3731 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003732
Jiang Liu0e242612014-02-19 14:07:34 +08003733 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003734
3735 return 0;
3736}
3737
Jiang Liu9bdc5312014-01-06 14:18:27 +08003738static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3739{
3740 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3741 kfree(atsru);
3742}
3743
3744static void intel_iommu_free_dmars(void)
3745{
3746 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3747 struct dmar_atsr_unit *atsru, *atsr_n;
3748
3749 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3750 list_del(&rmrru->list);
3751 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3752 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003753 }
3754
Jiang Liu9bdc5312014-01-06 14:18:27 +08003755 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3756 list_del(&atsru->list);
3757 intel_iommu_free_atsr(atsru);
3758 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003759}
3760
3761int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3762{
Jiang Liub683b232014-02-19 14:07:32 +08003763 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003764 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003765 struct pci_dev *bridge = NULL;
3766 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003767 struct acpi_dmar_atsr *atsr;
3768 struct dmar_atsr_unit *atsru;
3769
3770 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003771 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003772 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003773 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003774 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003775 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003776 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003777 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003778 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003779 if (!bridge)
3780 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003781
Jiang Liu0e242612014-02-19 14:07:34 +08003782 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003783 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3784 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3785 if (atsr->segment != pci_domain_nr(dev->bus))
3786 continue;
3787
Jiang Liub683b232014-02-19 14:07:32 +08003788 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003789 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003790 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003791
3792 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003793 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003794 }
Jiang Liub683b232014-02-19 14:07:32 +08003795 ret = 0;
3796out:
Jiang Liu0e242612014-02-19 14:07:34 +08003797 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003798
Jiang Liub683b232014-02-19 14:07:32 +08003799 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003800}
3801
Jiang Liu59ce0512014-02-19 14:07:35 +08003802int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3803{
3804 int ret = 0;
3805 struct dmar_rmrr_unit *rmrru;
3806 struct dmar_atsr_unit *atsru;
3807 struct acpi_dmar_atsr *atsr;
3808 struct acpi_dmar_reserved_memory *rmrr;
3809
3810 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3811 return 0;
3812
3813 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3814 rmrr = container_of(rmrru->hdr,
3815 struct acpi_dmar_reserved_memory, header);
3816 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3817 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3818 ((void *)rmrr) + rmrr->header.length,
3819 rmrr->segment, rmrru->devices,
3820 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003821 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003822 return ret;
3823 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003824 dmar_remove_dev_scope(info, rmrr->segment,
3825 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003826 }
3827 }
3828
3829 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3830 if (atsru->include_all)
3831 continue;
3832
3833 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3834 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3835 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3836 (void *)atsr + atsr->header.length,
3837 atsr->segment, atsru->devices,
3838 atsru->devices_cnt);
3839 if (ret > 0)
3840 break;
3841 else if(ret < 0)
3842 return ret;
3843 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3844 if (dmar_remove_dev_scope(info, atsr->segment,
3845 atsru->devices, atsru->devices_cnt))
3846 break;
3847 }
3848 }
3849
3850 return 0;
3851}
3852
Fenghua Yu99dcade2009-11-11 07:23:06 -08003853/*
3854 * Here we only respond to action of unbound device from driver.
3855 *
3856 * Added device is not attached to its DMAR domain here yet. That will happen
3857 * when mapping the device to iova.
3858 */
3859static int device_notifier(struct notifier_block *nb,
3860 unsigned long action, void *data)
3861{
3862 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003863 struct dmar_domain *domain;
3864
David Woodhouse3d891942014-03-06 15:59:26 +00003865 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003866 return 0;
3867
Joerg Roedel1196c2f2014-09-30 13:02:03 +02003868 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003869 return 0;
3870
David Woodhouse1525a292014-03-06 16:19:30 +00003871 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003872 if (!domain)
3873 return 0;
3874
Jiang Liu3a5670e2014-02-19 14:07:33 +08003875 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003876 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08003877 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003878 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003879 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003880
Fenghua Yu99dcade2009-11-11 07:23:06 -08003881 return 0;
3882}
3883
3884static struct notifier_block device_nb = {
3885 .notifier_call = device_notifier,
3886};
3887
Jiang Liu75f05562014-02-19 14:07:37 +08003888static int intel_iommu_memory_notifier(struct notifier_block *nb,
3889 unsigned long val, void *v)
3890{
3891 struct memory_notify *mhp = v;
3892 unsigned long long start, end;
3893 unsigned long start_vpfn, last_vpfn;
3894
3895 switch (val) {
3896 case MEM_GOING_ONLINE:
3897 start = mhp->start_pfn << PAGE_SHIFT;
3898 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3899 if (iommu_domain_identity_map(si_domain, start, end)) {
3900 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3901 start, end);
3902 return NOTIFY_BAD;
3903 }
3904 break;
3905
3906 case MEM_OFFLINE:
3907 case MEM_CANCEL_ONLINE:
3908 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3909 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3910 while (start_vpfn <= last_vpfn) {
3911 struct iova *iova;
3912 struct dmar_drhd_unit *drhd;
3913 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003914 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08003915
3916 iova = find_iova(&si_domain->iovad, start_vpfn);
3917 if (iova == NULL) {
3918 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3919 start_vpfn);
3920 break;
3921 }
3922
3923 iova = split_and_remove_iova(&si_domain->iovad, iova,
3924 start_vpfn, last_vpfn);
3925 if (iova == NULL) {
3926 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3927 start_vpfn, last_vpfn);
3928 return NOTIFY_BAD;
3929 }
3930
David Woodhouseea8ea462014-03-05 17:09:32 +00003931 freelist = domain_unmap(si_domain, iova->pfn_lo,
3932 iova->pfn_hi);
3933
Jiang Liu75f05562014-02-19 14:07:37 +08003934 rcu_read_lock();
3935 for_each_active_iommu(iommu, drhd)
3936 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003937 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003938 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08003939 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00003940 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08003941
3942 start_vpfn = iova->pfn_hi + 1;
3943 free_iova_mem(iova);
3944 }
3945 break;
3946 }
3947
3948 return NOTIFY_OK;
3949}
3950
3951static struct notifier_block intel_iommu_memory_nb = {
3952 .notifier_call = intel_iommu_memory_notifier,
3953 .priority = 0
3954};
3955
Alex Williamsona5459cf2014-06-12 16:12:31 -06003956
3957static ssize_t intel_iommu_show_version(struct device *dev,
3958 struct device_attribute *attr,
3959 char *buf)
3960{
3961 struct intel_iommu *iommu = dev_get_drvdata(dev);
3962 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3963 return sprintf(buf, "%d:%d\n",
3964 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3965}
3966static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3967
3968static ssize_t intel_iommu_show_address(struct device *dev,
3969 struct device_attribute *attr,
3970 char *buf)
3971{
3972 struct intel_iommu *iommu = dev_get_drvdata(dev);
3973 return sprintf(buf, "%llx\n", iommu->reg_phys);
3974}
3975static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3976
3977static ssize_t intel_iommu_show_cap(struct device *dev,
3978 struct device_attribute *attr,
3979 char *buf)
3980{
3981 struct intel_iommu *iommu = dev_get_drvdata(dev);
3982 return sprintf(buf, "%llx\n", iommu->cap);
3983}
3984static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3985
3986static ssize_t intel_iommu_show_ecap(struct device *dev,
3987 struct device_attribute *attr,
3988 char *buf)
3989{
3990 struct intel_iommu *iommu = dev_get_drvdata(dev);
3991 return sprintf(buf, "%llx\n", iommu->ecap);
3992}
3993static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
3994
3995static struct attribute *intel_iommu_attrs[] = {
3996 &dev_attr_version.attr,
3997 &dev_attr_address.attr,
3998 &dev_attr_cap.attr,
3999 &dev_attr_ecap.attr,
4000 NULL,
4001};
4002
4003static struct attribute_group intel_iommu_group = {
4004 .name = "intel-iommu",
4005 .attrs = intel_iommu_attrs,
4006};
4007
4008const struct attribute_group *intel_iommu_groups[] = {
4009 &intel_iommu_group,
4010 NULL,
4011};
4012
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004013int __init intel_iommu_init(void)
4014{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004015 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004016 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004017 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004018
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004019 /* VT-d is required for a TXT/tboot launch, so enforce that */
4020 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004021
Jiang Liu3a5670e2014-02-19 14:07:33 +08004022 if (iommu_init_mempool()) {
4023 if (force_on)
4024 panic("tboot: Failed to initialize iommu memory\n");
4025 return -ENOMEM;
4026 }
4027
4028 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004029 if (dmar_table_init()) {
4030 if (force_on)
4031 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004032 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004033 }
4034
Takao Indoh3a93c842013-04-23 17:35:03 +09004035 /*
4036 * Disable translation if already enabled prior to OS handover.
4037 */
Jiang Liu7c919772014-01-06 14:18:18 +08004038 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004039 if (iommu->gcmd & DMA_GCMD_TE)
4040 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004041
Suresh Siddhac2c72862011-08-23 17:05:19 -07004042 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004043 if (force_on)
4044 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004045 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004046 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004047
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004048 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004049 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004050
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004051 if (list_empty(&dmar_rmrr_units))
4052 printk(KERN_INFO "DMAR: No RMRR found\n");
4053
4054 if (list_empty(&dmar_atsr_units))
4055 printk(KERN_INFO "DMAR: No ATSR found\n");
4056
Joseph Cihula51a63e62011-03-21 11:04:24 -07004057 if (dmar_init_reserved_ranges()) {
4058 if (force_on)
4059 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004060 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004061 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004062
4063 init_no_remapping_devices();
4064
Joseph Cihulab7792602011-05-03 00:08:37 -07004065 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004066 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004067 if (force_on)
4068 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004069 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004070 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004071 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004072 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004073 printk(KERN_INFO
4074 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4075
mark gross5e0d2a62008-03-04 15:22:08 -08004076 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004077#ifdef CONFIG_SWIOTLB
4078 swiotlb = 0;
4079#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004080 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004081
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004082 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004083
Alex Williamsona5459cf2014-06-12 16:12:31 -06004084 for_each_active_iommu(iommu, drhd)
4085 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4086 intel_iommu_groups,
4087 iommu->name);
4088
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004089 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004090 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004091 if (si_domain && !hw_pass_through)
4092 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004093
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004094 intel_iommu_enabled = 1;
4095
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004096 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004097
4098out_free_reserved_range:
4099 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004100out_free_dmar:
4101 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004102 up_write(&dmar_global_lock);
4103 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004104 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004105}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004106
Alex Williamson579305f2014-07-03 09:51:43 -06004107static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4108{
4109 struct intel_iommu *iommu = opaque;
4110
4111 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4112 return 0;
4113}
4114
4115/*
4116 * NB - intel-iommu lacks any sort of reference counting for the users of
4117 * dependent devices. If multiple endpoints have intersecting dependent
4118 * devices, unbinding the driver from any one of them will possibly leave
4119 * the others unable to operate.
4120 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004121static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004122 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004123{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004124 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004125 return;
4126
Alex Williamson579305f2014-07-03 09:51:43 -06004127 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004128}
4129
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004130static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004131 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004132{
Yijing Wangbca2b912013-10-31 17:26:04 +08004133 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004134 struct intel_iommu *iommu;
4135 unsigned long flags;
4136 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004137 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004138
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004139 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004140 if (!iommu)
4141 return;
4142
4143 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004144 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004145 if (info->iommu == iommu && info->bus == bus &&
4146 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004147 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004148 spin_unlock_irqrestore(&device_domain_lock, flags);
4149
Yu Zhao93a23a72009-05-18 13:51:37 +08004150 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004151 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004152 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004153 free_devinfo_mem(info);
4154
4155 spin_lock_irqsave(&device_domain_lock, flags);
4156
4157 if (found)
4158 break;
4159 else
4160 continue;
4161 }
4162
4163 /* if there is no other devices under the same iommu
4164 * owned by this domain, clear this iommu in iommu_bmp
4165 * update iommu count and coherency
4166 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004167 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004168 found = 1;
4169 }
4170
Roland Dreier3e7abe22011-07-20 06:22:21 -07004171 spin_unlock_irqrestore(&device_domain_lock, flags);
4172
Weidong Hanc7151a82008-12-08 22:51:37 +08004173 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004174 domain_detach_iommu(domain, iommu);
4175 if (!domain_type_is_vm_or_si(domain))
4176 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004177 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004178}
4179
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004180static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004181{
4182 int adjust_width;
4183
4184 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004185 domain_reserve_special_ranges(domain);
4186
4187 /* calculate AGAW */
4188 domain->gaw = guest_width;
4189 adjust_width = guestwidth_to_adjustwidth(guest_width);
4190 domain->agaw = width_to_agaw(adjust_width);
4191
Weidong Han5e98c4b2008-12-08 23:03:27 +08004192 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004193 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004194 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004195 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004196
4197 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004198 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004199 if (!domain->pgd)
4200 return -ENOMEM;
4201 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4202 return 0;
4203}
4204
Joerg Roedel5d450802008-12-03 14:52:32 +01004205static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004206{
Joerg Roedel5d450802008-12-03 14:52:32 +01004207 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004208
Jiang Liuab8dfe22014-07-11 14:19:27 +08004209 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004210 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004211 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004212 "intel_iommu_domain_init: dmar_domain == NULL\n");
4213 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004214 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004215 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004216 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004217 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004218 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004219 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004220 }
Allen Kay8140a952011-10-14 12:32:17 -07004221 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004222 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004223
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004224 domain->geometry.aperture_start = 0;
4225 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4226 domain->geometry.force_aperture = true;
4227
Joerg Roedel5d450802008-12-03 14:52:32 +01004228 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004229}
Kay, Allen M38717942008-09-09 18:37:29 +03004230
Joerg Roedel5d450802008-12-03 14:52:32 +01004231static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004232{
Joerg Roedel5d450802008-12-03 14:52:32 +01004233 struct dmar_domain *dmar_domain = domain->priv;
4234
4235 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004236 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004237}
Kay, Allen M38717942008-09-09 18:37:29 +03004238
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004239static int intel_iommu_attach_device(struct iommu_domain *domain,
4240 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004241{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004242 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004243 struct intel_iommu *iommu;
4244 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004245 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004246
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004247 if (device_is_rmrr_locked(dev)) {
4248 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4249 return -EPERM;
4250 }
4251
David Woodhouse7207d8f2014-03-09 16:31:06 -07004252 /* normally dev is not mapped */
4253 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004254 struct dmar_domain *old_domain;
4255
David Woodhouse1525a292014-03-06 16:19:30 +00004256 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004257 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004258 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004259 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004260 else
4261 domain_remove_dev_info(old_domain);
4262 }
4263 }
4264
David Woodhouse156baca2014-03-09 14:00:57 -07004265 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004266 if (!iommu)
4267 return -ENODEV;
4268
4269 /* check if this iommu agaw is sufficient for max mapped address */
4270 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004271 if (addr_width > cap_mgaw(iommu->cap))
4272 addr_width = cap_mgaw(iommu->cap);
4273
4274 if (dmar_domain->max_addr > (1LL << addr_width)) {
4275 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004276 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004277 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004278 return -EFAULT;
4279 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004280 dmar_domain->gaw = addr_width;
4281
4282 /*
4283 * Knock out extra levels of page tables if necessary
4284 */
4285 while (iommu->agaw < dmar_domain->agaw) {
4286 struct dma_pte *pte;
4287
4288 pte = dmar_domain->pgd;
4289 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004290 dmar_domain->pgd = (struct dma_pte *)
4291 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004292 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004293 }
4294 dmar_domain->agaw--;
4295 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004296
David Woodhouse5913c9b2014-03-09 16:27:31 -07004297 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004298}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004299
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004300static void intel_iommu_detach_device(struct iommu_domain *domain,
4301 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004302{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004303 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004304
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004305 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004306}
Kay, Allen M38717942008-09-09 18:37:29 +03004307
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004308static int intel_iommu_map(struct iommu_domain *domain,
4309 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004310 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004311{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004312 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004313 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004314 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004315 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004316
Joerg Roedeldde57a22008-12-03 15:04:09 +01004317 if (iommu_prot & IOMMU_READ)
4318 prot |= DMA_PTE_READ;
4319 if (iommu_prot & IOMMU_WRITE)
4320 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004321 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4322 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004323
David Woodhouse163cc522009-06-28 00:51:17 +01004324 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004325 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004326 u64 end;
4327
4328 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004329 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004330 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004331 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004332 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004333 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004334 return -EFAULT;
4335 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004336 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004337 }
David Woodhousead051222009-06-28 14:22:28 +01004338 /* Round up size to next multiple of PAGE_SIZE, if it and
4339 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004340 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004341 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4342 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004343 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004344}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004345
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004346static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004347 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004348{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004349 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004350 struct page *freelist = NULL;
4351 struct intel_iommu *iommu;
4352 unsigned long start_pfn, last_pfn;
4353 unsigned int npages;
4354 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004355
David Woodhouse5cf0a762014-03-19 16:07:49 +00004356 /* Cope with horrid API which requires us to unmap more than the
4357 size argument if it happens to be a large-page mapping. */
4358 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4359 BUG();
4360
4361 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4362 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4363
David Woodhouseea8ea462014-03-05 17:09:32 +00004364 start_pfn = iova >> VTD_PAGE_SHIFT;
4365 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4366
4367 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4368
4369 npages = last_pfn - start_pfn + 1;
4370
4371 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4372 iommu = g_iommus[iommu_id];
4373
4374 /*
4375 * find bit position of dmar_domain
4376 */
4377 ndomains = cap_ndoms(iommu->cap);
4378 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4379 if (iommu->domains[num] == dmar_domain)
4380 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4381 npages, !freelist, 0);
4382 }
4383
4384 }
4385
4386 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004387
David Woodhouse163cc522009-06-28 00:51:17 +01004388 if (dmar_domain->max_addr == iova + size)
4389 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004390
David Woodhouse5cf0a762014-03-19 16:07:49 +00004391 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004392}
Kay, Allen M38717942008-09-09 18:37:29 +03004393
Joerg Roedeld14d6572008-12-03 15:06:57 +01004394static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304395 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004396{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004397 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004398 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004399 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004400 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004401
David Woodhouse5cf0a762014-03-19 16:07:49 +00004402 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004403 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004404 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004405
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004406 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004407}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004408
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004409static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4410 unsigned long cap)
4411{
4412 struct dmar_domain *dmar_domain = domain->priv;
4413
4414 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4415 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004416 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004417 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004418
4419 return 0;
4420}
4421
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004422static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004423{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004424 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004425 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004426 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004427
Alex Williamsona5459cf2014-06-12 16:12:31 -06004428 iommu = device_to_iommu(dev, &bus, &devfn);
4429 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004430 return -ENODEV;
4431
Alex Williamsona5459cf2014-06-12 16:12:31 -06004432 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004433
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004434 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004435
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004436 if (IS_ERR(group))
4437 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004438
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004439 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004440 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004441}
4442
4443static void intel_iommu_remove_device(struct device *dev)
4444{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004445 struct intel_iommu *iommu;
4446 u8 bus, devfn;
4447
4448 iommu = device_to_iommu(dev, &bus, &devfn);
4449 if (!iommu)
4450 return;
4451
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004452 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004453
4454 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004455}
4456
Thierry Redingb22f6432014-06-27 09:03:12 +02004457static const struct iommu_ops intel_iommu_ops = {
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004458 .domain_init = intel_iommu_domain_init,
4459 .domain_destroy = intel_iommu_domain_destroy,
4460 .attach_dev = intel_iommu_attach_device,
4461 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004462 .map = intel_iommu_map,
4463 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004464 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004465 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004466 .add_device = intel_iommu_add_device,
4467 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004468 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004469};
David Woodhouse9af88142009-02-13 23:18:03 +00004470
Daniel Vetter94526182013-01-20 23:50:13 +01004471static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4472{
4473 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4474 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4475 dmar_map_gfx = 0;
4476}
4477
4478DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4479DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4480DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4481DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4482DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4483DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4484DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4485
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004486static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004487{
4488 /*
4489 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004490 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004491 */
4492 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4493 rwbf_quirk = 1;
4494}
4495
4496DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004497DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4498DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4499DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4500DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4501DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4502DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004503
Adam Jacksoneecfd572010-08-25 21:17:34 +01004504#define GGC 0x52
4505#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4506#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4507#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4508#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4509#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4510#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4511#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4512#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4513
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004514static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004515{
4516 unsigned short ggc;
4517
Adam Jacksoneecfd572010-08-25 21:17:34 +01004518 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004519 return;
4520
Adam Jacksoneecfd572010-08-25 21:17:34 +01004521 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004522 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4523 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004524 } else if (dmar_map_gfx) {
4525 /* we have to ensure the gfx device is idle before we flush */
4526 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4527 intel_iommu_strict = 1;
4528 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004529}
4530DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4531DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4532DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4533DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4534
David Woodhousee0fc7e02009-09-30 09:12:17 -07004535/* On Tylersburg chipsets, some BIOSes have been known to enable the
4536 ISOCH DMAR unit for the Azalia sound device, but not give it any
4537 TLB entries, which causes it to deadlock. Check for that. We do
4538 this in a function called from init_dmars(), instead of in a PCI
4539 quirk, because we don't want to print the obnoxious "BIOS broken"
4540 message if VT-d is actually disabled.
4541*/
4542static void __init check_tylersburg_isoch(void)
4543{
4544 struct pci_dev *pdev;
4545 uint32_t vtisochctrl;
4546
4547 /* If there's no Azalia in the system anyway, forget it. */
4548 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4549 if (!pdev)
4550 return;
4551 pci_dev_put(pdev);
4552
4553 /* System Management Registers. Might be hidden, in which case
4554 we can't do the sanity check. But that's OK, because the
4555 known-broken BIOSes _don't_ actually hide it, so far. */
4556 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4557 if (!pdev)
4558 return;
4559
4560 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4561 pci_dev_put(pdev);
4562 return;
4563 }
4564
4565 pci_dev_put(pdev);
4566
4567 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4568 if (vtisochctrl & 1)
4569 return;
4570
4571 /* Drop all bits other than the number of TLB entries */
4572 vtisochctrl &= 0x1c;
4573
4574 /* If we have the recommended number of TLB entries (16), fine. */
4575 if (vtisochctrl == 0x10)
4576 return;
4577
4578 /* Zero TLB entries? You get to ride the short bus to school. */
4579 if (!vtisochctrl) {
4580 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4581 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4582 dmi_get_system_info(DMI_BIOS_VENDOR),
4583 dmi_get_system_info(DMI_BIOS_VERSION),
4584 dmi_get_system_info(DMI_PRODUCT_VERSION));
4585 iommu_identity_mapping |= IDENTMAP_AZALIA;
4586 return;
4587 }
4588
4589 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4590 vtisochctrl);
4591}