blob: 58cc2b75d7aeb4268b7822b9703e5086c6376389 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Mark McLoughlinf27be032008-11-20 15:49:43 +000074#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070075#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070076#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080077
Andrew Mortondf08cdc2010-09-22 13:05:11 -070078/* page table handling */
79#define LEVEL_STRIDE (9)
80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020082/*
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
87 *
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
91 *
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
94 *
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
97 */
98#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700100static inline int agaw_to_level(int agaw)
101{
102 return agaw + 2;
103}
104
105static inline int agaw_to_width(int agaw)
106{
Jiang Liu5c645b32014-01-06 14:18:12 +0800107 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108}
109
110static inline int width_to_agaw(int width)
111{
Jiang Liu5c645b32014-01-06 14:18:12 +0800112 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700113}
114
115static inline unsigned int level_to_offset_bits(int level)
116{
117 return (level - 1) * LEVEL_STRIDE;
118}
119
120static inline int pfn_level_offset(unsigned long pfn, int level)
121{
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123}
124
125static inline unsigned long level_mask(int level)
126{
127 return -1UL << level_to_offset_bits(level);
128}
129
130static inline unsigned long level_size(int level)
131{
132 return 1UL << level_to_offset_bits(level);
133}
134
135static inline unsigned long align_to_level(unsigned long pfn, int level)
136{
137 return (pfn + level_size(level) - 1) & level_mask(level);
138}
David Woodhousefd18de52009-05-10 23:57:41 +0100139
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100140static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141{
Jiang Liu5c645b32014-01-06 14:18:12 +0800142 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143}
144
David Woodhousedd4e8312009-06-27 16:21:20 +0100145/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148{
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150}
151
152static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153{
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155}
156static inline unsigned long page_to_dma_pfn(struct page *pg)
157{
158 return mm_to_dma_pfn(page_to_pfn(pg));
159}
160static inline unsigned long virt_to_dma_pfn(void *p)
161{
162 return page_to_dma_pfn(virt_to_page(p));
163}
164
Weidong Hand9630fe2008-12-08 11:06:32 +0800165/* global iommu list, set NULL for ignored DMAR units */
166static struct intel_iommu **g_iommus;
167
David Woodhousee0fc7e02009-09-30 09:12:17 -0700168static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000169static int rwbf_quirk;
170
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000171/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
174 */
175static int force_on = 0;
176
177/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000178 * 0: Present
179 * 1-11: Reserved
180 * 12-63: Context Ptr (12 - (haw-1))
181 * 64-127: Reserved
182 */
183struct root_entry {
184 u64 val;
185 u64 rsvd1;
186};
187#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188static inline bool root_present(struct root_entry *root)
189{
190 return (root->val & 1);
191}
192static inline void set_root_present(struct root_entry *root)
193{
194 root->val |= 1;
195}
196static inline void set_root_value(struct root_entry *root, unsigned long value)
197{
198 root->val |= value & VTD_PAGE_MASK;
199}
200
201static inline struct context_entry *
202get_context_addr_from_root(struct root_entry *root)
203{
204 return (struct context_entry *)
205 (root_present(root)?phys_to_virt(
206 root->val & VTD_PAGE_MASK) :
207 NULL);
208}
209
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000210/*
211 * low 64 bits:
212 * 0: present
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
216 * high 64 bits:
217 * 0-2: address width
218 * 3-6: aval
219 * 8-23: domain id
220 */
221struct context_entry {
222 u64 lo;
223 u64 hi;
224};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000225
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000226static inline bool context_present(struct context_entry *context)
227{
228 return (context->lo & 1);
229}
230static inline void context_set_present(struct context_entry *context)
231{
232 context->lo |= 1;
233}
234
235static inline void context_set_fault_enable(struct context_entry *context)
236{
237 context->lo &= (((u64)-1) << 2) | 1;
238}
239
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000240static inline void context_set_translation_type(struct context_entry *context,
241 unsigned long value)
242{
243 context->lo &= (((u64)-1) << 4) | 3;
244 context->lo |= (value & 3) << 2;
245}
246
247static inline void context_set_address_root(struct context_entry *context,
248 unsigned long value)
249{
250 context->lo |= value & VTD_PAGE_MASK;
251}
252
253static inline void context_set_address_width(struct context_entry *context,
254 unsigned long value)
255{
256 context->hi |= value & 7;
257}
258
259static inline void context_set_domain_id(struct context_entry *context,
260 unsigned long value)
261{
262 context->hi |= (value & ((1 << 16) - 1)) << 8;
263}
264
265static inline void context_clear_entry(struct context_entry *context)
266{
267 context->lo = 0;
268 context->hi = 0;
269}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000270
Mark McLoughlin622ba122008-11-20 15:49:46 +0000271/*
272 * 0: readable
273 * 1: writable
274 * 2-6: reserved
275 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800276 * 8-10: available
277 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000278 * 12-63: Host physcial address
279 */
280struct dma_pte {
281 u64 val;
282};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline void dma_clear_pte(struct dma_pte *pte)
285{
286 pte->val = 0;
287}
288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline u64 dma_pte_addr(struct dma_pte *pte)
290{
David Woodhousec85994e2009-07-01 19:21:24 +0100291#ifdef CONFIG_64BIT
292 return pte->val & VTD_PAGE_MASK;
293#else
294 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100296#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000297}
298
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000299static inline bool dma_pte_present(struct dma_pte *pte)
300{
301 return (pte->val & 3) != 0;
302}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000303
Allen Kay4399c8b2011-10-14 12:32:46 -0700304static inline bool dma_pte_superpage(struct dma_pte *pte)
305{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200306 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700307}
308
David Woodhouse75e6bf92009-07-02 11:21:16 +0100309static inline int first_pte_in_page(struct dma_pte *pte)
310{
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
312}
313
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700314/*
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
319 */
David Woodhouse19943b02009-08-04 16:19:20 +0100320static struct dmar_domain *si_domain;
321static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700322
Weidong Han1ce28fe2008-12-08 16:35:39 +0800323/* domain represents a virtual machine, more than one devices
324 * across iommus may be owned in one domain, e.g. kvm guest.
325 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800326#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800327
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700328/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800329#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700330
Mike Travis1b198bb2012-03-05 15:05:16 -0800331/* define the limit of IOMMUs supported in each domain */
332#ifdef CONFIG_X86
333# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
334#else
335# define IOMMU_UNITS_SUPPORTED 64
336#endif
337
Mark McLoughlin99126f72008-11-20 15:49:47 +0000338struct dmar_domain {
339 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700340 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800341 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
342 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000343
344 struct list_head devices; /* all devices' list */
345 struct iova_domain iovad; /* iova's that belong to this domain */
346
347 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000348 int gaw; /* max guest address width */
349
350 /* adjusted guest address width, 0 is level 2 30-bit */
351 int agaw;
352
Weidong Han3b5410e2008-12-08 09:17:15 +0800353 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800354
355 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800356 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800357 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100358 int iommu_superpage;/* Level of superpages supported:
359 0 == 4KiB (no superpages), 1 == 2MiB,
360 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800362 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000363};
364
Mark McLoughlina647dac2008-11-20 15:49:48 +0000365/* PCI domain-device relationship */
366struct device_domain_info {
367 struct list_head link; /* link to domain siblings */
368 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100369 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000370 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000371 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800372 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000373 struct dmar_domain *domain; /* pointer to domain */
374};
375
Jiang Liub94e4112014-02-19 14:07:25 +0800376struct dmar_rmrr_unit {
377 struct list_head list; /* list of rmrr units */
378 struct acpi_dmar_header *hdr; /* ACPI header */
379 u64 base_address; /* reserved base address*/
380 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000381 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800382 int devices_cnt; /* target device count */
383};
384
385struct dmar_atsr_unit {
386 struct list_head list; /* list of ATSR units */
387 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000388 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800389 int devices_cnt; /* target device count */
390 u8 include_all:1; /* include all ports */
391};
392
393static LIST_HEAD(dmar_atsr_units);
394static LIST_HEAD(dmar_rmrr_units);
395
396#define for_each_rmrr_units(rmrr) \
397 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
398
mark gross5e0d2a62008-03-04 15:22:08 -0800399static void flush_unmaps_timeout(unsigned long data);
400
Jiang Liub707cb02014-01-06 14:18:26 +0800401static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800402
mark gross80b20dd2008-04-18 13:53:58 -0700403#define HIGH_WATER_MARK 250
404struct deferred_flush_tables {
405 int next;
406 struct iova *iova[HIGH_WATER_MARK];
407 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000408 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700409};
410
411static struct deferred_flush_tables *deferred_flush;
412
mark gross5e0d2a62008-03-04 15:22:08 -0800413/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800414static int g_num_of_iommus;
415
416static DEFINE_SPINLOCK(async_umap_flush_lock);
417static LIST_HEAD(unmaps_to_do);
418
419static int timer_on;
420static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800421
Jiang Liu92d03cc2014-02-19 14:07:28 +0800422static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700423static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800424static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700425 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800426static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000427 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800428static int domain_detach_iommu(struct dmar_domain *domain,
429 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432int dmar_disabled = 0;
433#else
434int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700435#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800436
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200437int intel_iommu_enabled = 0;
438EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
David Woodhouse2d9e6672010-06-15 10:57:57 +0100440static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700441static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800442static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100443static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444
David Woodhousec0771df2011-10-14 20:59:46 +0100445int intel_iommu_gfx_mapped;
446EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449static DEFINE_SPINLOCK(device_domain_lock);
450static LIST_HEAD(device_domain_list);
451
Thierry Redingb22f6432014-06-27 09:03:12 +0200452static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100453
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700454static int __init intel_iommu_setup(char *str)
455{
456 if (!str)
457 return -EINVAL;
458 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700469 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800470 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481 }
482
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
486 }
487 return 0;
488}
489__setup("intel_iommu=", intel_iommu_setup);
490
491static struct kmem_cache *iommu_domain_cache;
492static struct kmem_cache *iommu_devinfo_cache;
493static struct kmem_cache *iommu_iova_cache;
494
Suresh Siddha4c923d42009-10-02 11:01:24 -0700495static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700496{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700497 struct page *page;
498 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700503 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700504}
505
506static inline void free_pgtable_page(void *vaddr)
507{
508 free_page((unsigned long)vaddr);
509}
510
511static inline void *alloc_domain_mem(void)
512{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514}
515
Kay, Allen M38717942008-09-09 18:37:29 +0300516static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517{
518 kmem_cache_free(iommu_domain_cache, vaddr);
519}
520
521static inline void * alloc_devinfo_mem(void)
522{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524}
525
526static inline void free_devinfo_mem(void *vaddr)
527{
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
529}
530
531struct iova *alloc_iova_mem(void)
532{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700534}
535
536void free_iova_mem(struct iova *iova)
537{
538 kmem_cache_free(iommu_iova_cache, iova);
539}
540
Jiang Liuab8dfe22014-07-11 14:19:27 +0800541static inline int domain_type_is_vm(struct dmar_domain *domain)
542{
543 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
544}
545
546static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
547{
548 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
549 DOMAIN_FLAG_STATIC_IDENTITY);
550}
Weidong Han1b573682008-12-08 15:34:06 +0800551
Jiang Liu162d1b12014-07-11 14:19:35 +0800552static inline int domain_pfn_supported(struct dmar_domain *domain,
553 unsigned long pfn)
554{
555 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
556
557 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
558}
559
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700560static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800561{
562 unsigned long sagaw;
563 int agaw = -1;
564
565 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700566 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800567 agaw >= 0; agaw--) {
568 if (test_bit(agaw, &sagaw))
569 break;
570 }
571
572 return agaw;
573}
574
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700575/*
576 * Calculate max SAGAW for each iommu.
577 */
578int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
579{
580 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
581}
582
583/*
584 * calculate agaw for each iommu.
585 * "SAGAW" may be different across iommus, use a default agaw, and
586 * get a supported less agaw for iommus that don't support the default agaw.
587 */
588int iommu_calculate_agaw(struct intel_iommu *iommu)
589{
590 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
591}
592
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700593/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800594static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
595{
596 int iommu_id;
597
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700598 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800599 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800600 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800601 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
602 return NULL;
603
604 return g_iommus[iommu_id];
605}
606
Weidong Han8e6040972008-12-08 15:49:06 +0800607static void domain_update_iommu_coherency(struct dmar_domain *domain)
608{
David Woodhoused0501962014-03-11 17:10:29 -0700609 struct dmar_drhd_unit *drhd;
610 struct intel_iommu *iommu;
611 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800612
David Woodhoused0501962014-03-11 17:10:29 -0700613 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800614
Mike Travis1b198bb2012-03-05 15:05:16 -0800615 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700616 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800617 if (!ecap_coherent(g_iommus[i]->ecap)) {
618 domain->iommu_coherency = 0;
619 break;
620 }
Weidong Han8e6040972008-12-08 15:49:06 +0800621 }
David Woodhoused0501962014-03-11 17:10:29 -0700622 if (found)
623 return;
624
625 /* No hardware attached; use lowest common denominator */
626 rcu_read_lock();
627 for_each_active_iommu(iommu, drhd) {
628 if (!ecap_coherent(iommu->ecap)) {
629 domain->iommu_coherency = 0;
630 break;
631 }
632 }
633 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800634}
635
Jiang Liu161f6932014-07-11 14:19:37 +0800636static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100637{
Allen Kay8140a952011-10-14 12:32:17 -0700638 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800639 struct intel_iommu *iommu;
640 int ret = 1;
641
642 rcu_read_lock();
643 for_each_active_iommu(iommu, drhd) {
644 if (iommu != skip) {
645 if (!ecap_sc_support(iommu->ecap)) {
646 ret = 0;
647 break;
648 }
649 }
650 }
651 rcu_read_unlock();
652
653 return ret;
654}
655
656static int domain_update_iommu_superpage(struct intel_iommu *skip)
657{
658 struct dmar_drhd_unit *drhd;
659 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700660 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100661
662 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800663 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100664 }
665
Allen Kay8140a952011-10-14 12:32:17 -0700666 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800667 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700668 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800669 if (iommu != skip) {
670 mask &= cap_super_page_val(iommu->cap);
671 if (!mask)
672 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100673 }
674 }
Jiang Liu0e242612014-02-19 14:07:34 +0800675 rcu_read_unlock();
676
Jiang Liu161f6932014-07-11 14:19:37 +0800677 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100678}
679
Sheng Yang58c610b2009-03-18 15:33:05 +0800680/* Some capabilities may be different across iommus */
681static void domain_update_iommu_cap(struct dmar_domain *domain)
682{
683 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800684 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
685 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800686}
687
David Woodhouse156baca2014-03-09 14:00:57 -0700688static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800689{
690 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800691 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700692 struct device *tmp;
693 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800694 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800695 int i;
696
David Woodhouse156baca2014-03-09 14:00:57 -0700697 if (dev_is_pci(dev)) {
698 pdev = to_pci_dev(dev);
699 segment = pci_domain_nr(pdev->bus);
700 } else if (ACPI_COMPANION(dev))
701 dev = &ACPI_COMPANION(dev)->dev;
702
Jiang Liu0e242612014-02-19 14:07:34 +0800703 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800704 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700705 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100706 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800707
Jiang Liub683b232014-02-19 14:07:32 +0800708 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700709 drhd->devices_cnt, i, tmp) {
710 if (tmp == dev) {
711 *bus = drhd->devices[i].bus;
712 *devfn = drhd->devices[i].devfn;
713 goto out;
714 }
715
716 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000717 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700718
719 ptmp = to_pci_dev(tmp);
720 if (ptmp->subordinate &&
721 ptmp->subordinate->number <= pdev->bus->number &&
722 ptmp->subordinate->busn_res.end >= pdev->bus->number)
723 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100724 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800725
David Woodhouse156baca2014-03-09 14:00:57 -0700726 if (pdev && drhd->include_all) {
727 got_pdev:
728 *bus = pdev->bus->number;
729 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800730 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700731 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800732 }
Jiang Liub683b232014-02-19 14:07:32 +0800733 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700734 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800735 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800736
Jiang Liub683b232014-02-19 14:07:32 +0800737 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800738}
739
Weidong Han5331fe62008-12-08 23:00:00 +0800740static void domain_flush_cache(struct dmar_domain *domain,
741 void *addr, int size)
742{
743 if (!domain->iommu_coherency)
744 clflush_cache_range(addr, size);
745}
746
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747/* Gets context entry for a given bus and devfn */
748static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
749 u8 bus, u8 devfn)
750{
751 struct root_entry *root;
752 struct context_entry *context;
753 unsigned long phy_addr;
754 unsigned long flags;
755
756 spin_lock_irqsave(&iommu->lock, flags);
757 root = &iommu->root_entry[bus];
758 context = get_context_addr_from_root(root);
759 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700760 context = (struct context_entry *)
761 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700762 if (!context) {
763 spin_unlock_irqrestore(&iommu->lock, flags);
764 return NULL;
765 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700766 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700767 phy_addr = virt_to_phys((void *)context);
768 set_root_value(root, phy_addr);
769 set_root_present(root);
770 __iommu_flush_cache(iommu, root, sizeof(*root));
771 }
772 spin_unlock_irqrestore(&iommu->lock, flags);
773 return &context[devfn];
774}
775
776static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
777{
778 struct root_entry *root;
779 struct context_entry *context;
780 int ret;
781 unsigned long flags;
782
783 spin_lock_irqsave(&iommu->lock, flags);
784 root = &iommu->root_entry[bus];
785 context = get_context_addr_from_root(root);
786 if (!context) {
787 ret = 0;
788 goto out;
789 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000790 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700791out:
792 spin_unlock_irqrestore(&iommu->lock, flags);
793 return ret;
794}
795
796static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
797{
798 struct root_entry *root;
799 struct context_entry *context;
800 unsigned long flags;
801
802 spin_lock_irqsave(&iommu->lock, flags);
803 root = &iommu->root_entry[bus];
804 context = get_context_addr_from_root(root);
805 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000806 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 __iommu_flush_cache(iommu, &context[devfn], \
808 sizeof(*context));
809 }
810 spin_unlock_irqrestore(&iommu->lock, flags);
811}
812
813static void free_context_table(struct intel_iommu *iommu)
814{
815 struct root_entry *root;
816 int i;
817 unsigned long flags;
818 struct context_entry *context;
819
820 spin_lock_irqsave(&iommu->lock, flags);
821 if (!iommu->root_entry) {
822 goto out;
823 }
824 for (i = 0; i < ROOT_ENTRY_NR; i++) {
825 root = &iommu->root_entry[i];
826 context = get_context_addr_from_root(root);
827 if (context)
828 free_pgtable_page(context);
829 }
830 free_pgtable_page(iommu->root_entry);
831 iommu->root_entry = NULL;
832out:
833 spin_unlock_irqrestore(&iommu->lock, flags);
834}
835
David Woodhouseb026fd22009-06-28 10:37:25 +0100836static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000837 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 struct dma_pte *parent, *pte = NULL;
840 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700841 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842
843 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200844
Jiang Liu162d1b12014-07-11 14:19:35 +0800845 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200846 /* Address beyond IOMMU's addressing capabilities. */
847 return NULL;
848
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849 parent = domain->pgd;
850
David Woodhouse5cf0a762014-03-19 16:07:49 +0000851 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852 void *tmp_page;
853
David Woodhouseb026fd22009-06-28 10:37:25 +0100854 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000856 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100857 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000858 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859 break;
860
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000861 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100862 uint64_t pteval;
863
Suresh Siddha4c923d42009-10-02 11:01:24 -0700864 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700865
David Woodhouse206a73c12009-07-01 19:30:28 +0100866 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100868
David Woodhousec85994e2009-07-01 19:21:24 +0100869 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400870 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800871 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100872 /* Someone else set it while we were thinking; use theirs. */
873 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800874 else
David Woodhousec85994e2009-07-01 19:21:24 +0100875 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000877 if (level == 1)
878 break;
879
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000880 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700881 level--;
882 }
883
David Woodhouse5cf0a762014-03-19 16:07:49 +0000884 if (!*target_level)
885 *target_level = level;
886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887 return pte;
888}
889
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100892static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
893 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895{
896 struct dma_pte *parent, *pte = NULL;
897 int total = agaw_to_level(domain->agaw);
898 int offset;
899
900 parent = domain->pgd;
901 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100902 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903 pte = &parent[offset];
904 if (level == total)
905 return pte;
906
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100907 if (!dma_pte_present(pte)) {
908 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700909 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100910 }
911
Yijing Wange16922a2014-05-20 20:37:51 +0800912 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100913 *large_page = total;
914 return pte;
915 }
916
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000917 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918 total--;
919 }
920 return NULL;
921}
922
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000924static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100925 unsigned long start_pfn,
926 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100928 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100929 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930
Jiang Liu162d1b12014-07-11 14:19:35 +0800931 BUG_ON(!domain_pfn_supported(domain, start_pfn));
932 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700933 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100934
David Woodhouse04b18e62009-06-27 19:15:01 +0100935 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700936 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100937 large_page = 1;
938 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100939 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100940 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100941 continue;
942 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100943 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100944 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100945 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100946 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100947 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
948
David Woodhouse310a5ab2009-06-28 18:52:20 +0100949 domain_flush_cache(domain, first_pte,
950 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700951
952 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953}
954
Alex Williamson3269ee02013-06-15 10:27:19 -0600955static void dma_pte_free_level(struct dmar_domain *domain, int level,
956 struct dma_pte *pte, unsigned long pfn,
957 unsigned long start_pfn, unsigned long last_pfn)
958{
959 pfn = max(start_pfn, pfn);
960 pte = &pte[pfn_level_offset(pfn, level)];
961
962 do {
963 unsigned long level_pfn;
964 struct dma_pte *level_pte;
965
966 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
967 goto next;
968
969 level_pfn = pfn & level_mask(level - 1);
970 level_pte = phys_to_virt(dma_pte_addr(pte));
971
972 if (level > 2)
973 dma_pte_free_level(domain, level - 1, level_pte,
974 level_pfn, start_pfn, last_pfn);
975
976 /* If range covers entire pagetable, free it */
977 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800978 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600979 dma_clear_pte(pte);
980 domain_flush_cache(domain, pte, sizeof(*pte));
981 free_pgtable_page(level_pte);
982 }
983next:
984 pfn += level_size(level);
985 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
986}
987
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988/* free page table pages. last level pte should already be cleared */
989static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100990 unsigned long start_pfn,
991 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992{
Jiang Liu162d1b12014-07-11 14:19:35 +0800993 BUG_ON(!domain_pfn_supported(domain, start_pfn));
994 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700995 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996
Jiang Liud41a4ad2014-07-11 14:19:34 +0800997 dma_pte_clear_range(domain, start_pfn, last_pfn);
998
David Woodhousef3a0a522009-06-30 03:40:07 +0100999 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001000 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1001 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001002
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001004 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005 free_pgtable_page(domain->pgd);
1006 domain->pgd = NULL;
1007 }
1008}
1009
David Woodhouseea8ea462014-03-05 17:09:32 +00001010/* When a page at a given level is being unlinked from its parent, we don't
1011 need to *modify* it at all. All we need to do is make a list of all the
1012 pages which can be freed just as soon as we've flushed the IOTLB and we
1013 know the hardware page-walk will no longer touch them.
1014 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1015 be freed. */
1016static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1017 int level, struct dma_pte *pte,
1018 struct page *freelist)
1019{
1020 struct page *pg;
1021
1022 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1023 pg->freelist = freelist;
1024 freelist = pg;
1025
1026 if (level == 1)
1027 return freelist;
1028
Jiang Liuadeb2592014-04-09 10:20:39 +08001029 pte = page_address(pg);
1030 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001031 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1032 freelist = dma_pte_list_pagetables(domain, level - 1,
1033 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001034 pte++;
1035 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001036
1037 return freelist;
1038}
1039
1040static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1041 struct dma_pte *pte, unsigned long pfn,
1042 unsigned long start_pfn,
1043 unsigned long last_pfn,
1044 struct page *freelist)
1045{
1046 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1047
1048 pfn = max(start_pfn, pfn);
1049 pte = &pte[pfn_level_offset(pfn, level)];
1050
1051 do {
1052 unsigned long level_pfn;
1053
1054 if (!dma_pte_present(pte))
1055 goto next;
1056
1057 level_pfn = pfn & level_mask(level);
1058
1059 /* If range covers entire pagetable, free it */
1060 if (start_pfn <= level_pfn &&
1061 last_pfn >= level_pfn + level_size(level) - 1) {
1062 /* These suborbinate page tables are going away entirely. Don't
1063 bother to clear them; we're just going to *free* them. */
1064 if (level > 1 && !dma_pte_superpage(pte))
1065 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1066
1067 dma_clear_pte(pte);
1068 if (!first_pte)
1069 first_pte = pte;
1070 last_pte = pte;
1071 } else if (level > 1) {
1072 /* Recurse down into a level that isn't *entirely* obsolete */
1073 freelist = dma_pte_clear_level(domain, level - 1,
1074 phys_to_virt(dma_pte_addr(pte)),
1075 level_pfn, start_pfn, last_pfn,
1076 freelist);
1077 }
1078next:
1079 pfn += level_size(level);
1080 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1081
1082 if (first_pte)
1083 domain_flush_cache(domain, first_pte,
1084 (void *)++last_pte - (void *)first_pte);
1085
1086 return freelist;
1087}
1088
1089/* We can't just free the pages because the IOMMU may still be walking
1090 the page tables, and may have cached the intermediate levels. The
1091 pages can only be freed after the IOTLB flush has been done. */
1092struct page *domain_unmap(struct dmar_domain *domain,
1093 unsigned long start_pfn,
1094 unsigned long last_pfn)
1095{
David Woodhouseea8ea462014-03-05 17:09:32 +00001096 struct page *freelist = NULL;
1097
Jiang Liu162d1b12014-07-11 14:19:35 +08001098 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1099 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001100 BUG_ON(start_pfn > last_pfn);
1101
1102 /* we don't need lock here; nobody else touches the iova range */
1103 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1104 domain->pgd, 0, start_pfn, last_pfn, NULL);
1105
1106 /* free pgd */
1107 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1108 struct page *pgd_page = virt_to_page(domain->pgd);
1109 pgd_page->freelist = freelist;
1110 freelist = pgd_page;
1111
1112 domain->pgd = NULL;
1113 }
1114
1115 return freelist;
1116}
1117
1118void dma_free_pagelist(struct page *freelist)
1119{
1120 struct page *pg;
1121
1122 while ((pg = freelist)) {
1123 freelist = pg->freelist;
1124 free_pgtable_page(page_address(pg));
1125 }
1126}
1127
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001128/* iommu handling */
1129static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1130{
1131 struct root_entry *root;
1132 unsigned long flags;
1133
Suresh Siddha4c923d42009-10-02 11:01:24 -07001134 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001135 if (!root)
1136 return -ENOMEM;
1137
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001138 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001139
1140 spin_lock_irqsave(&iommu->lock, flags);
1141 iommu->root_entry = root;
1142 spin_unlock_irqrestore(&iommu->lock, flags);
1143
1144 return 0;
1145}
1146
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001147static void iommu_set_root_entry(struct intel_iommu *iommu)
1148{
1149 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001150 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001151 unsigned long flag;
1152
1153 addr = iommu->root_entry;
1154
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001155 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1157
David Woodhousec416daa2009-05-10 20:30:58 +01001158 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
1160 /* Make sure hardware complete it */
1161 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001162 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001164 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165}
1166
1167static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1168{
1169 u32 val;
1170 unsigned long flag;
1171
David Woodhouse9af88142009-02-13 23:18:03 +00001172 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001176 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177
1178 /* Make sure hardware complete it */
1179 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001180 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001182 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183}
1184
1185/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001186static void __iommu_flush_context(struct intel_iommu *iommu,
1187 u16 did, u16 source_id, u8 function_mask,
1188 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189{
1190 u64 val = 0;
1191 unsigned long flag;
1192
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193 switch (type) {
1194 case DMA_CCMD_GLOBAL_INVL:
1195 val = DMA_CCMD_GLOBAL_INVL;
1196 break;
1197 case DMA_CCMD_DOMAIN_INVL:
1198 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1199 break;
1200 case DMA_CCMD_DEVICE_INVL:
1201 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1202 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1203 break;
1204 default:
1205 BUG();
1206 }
1207 val |= DMA_CCMD_ICC;
1208
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001209 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001210 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1211
1212 /* Make sure hardware complete it */
1213 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1214 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1215
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001216 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217}
1218
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001219/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001220static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1221 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222{
1223 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1224 u64 val = 0, val_iva = 0;
1225 unsigned long flag;
1226
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 switch (type) {
1228 case DMA_TLB_GLOBAL_FLUSH:
1229 /* global flush doesn't need set IVA_REG */
1230 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1231 break;
1232 case DMA_TLB_DSI_FLUSH:
1233 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1234 break;
1235 case DMA_TLB_PSI_FLUSH:
1236 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001237 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 val_iva = size_order | addr;
1239 break;
1240 default:
1241 BUG();
1242 }
1243 /* Note: set drain read/write */
1244#if 0
1245 /*
1246 * This is probably to be super secure.. Looks like we can
1247 * ignore it without any impact.
1248 */
1249 if (cap_read_drain(iommu->cap))
1250 val |= DMA_TLB_READ_DRAIN;
1251#endif
1252 if (cap_write_drain(iommu->cap))
1253 val |= DMA_TLB_WRITE_DRAIN;
1254
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001255 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 /* Note: Only uses first TLB reg currently */
1257 if (val_iva)
1258 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1259 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1260
1261 /* Make sure hardware complete it */
1262 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1263 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1264
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001265 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001266
1267 /* check IOTLB invalidation granularity */
1268 if (DMA_TLB_IAIG(val) == 0)
1269 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1270 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1271 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001272 (unsigned long long)DMA_TLB_IIRG(type),
1273 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274}
1275
David Woodhouse64ae8922014-03-09 12:52:30 -07001276static struct device_domain_info *
1277iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1278 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279{
Yu Zhao93a23a72009-05-18 13:51:37 +08001280 int found = 0;
1281 unsigned long flags;
1282 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001283 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001284
1285 if (!ecap_dev_iotlb_support(iommu->ecap))
1286 return NULL;
1287
1288 if (!iommu->qi)
1289 return NULL;
1290
1291 spin_lock_irqsave(&device_domain_lock, flags);
1292 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001293 if (info->iommu == iommu && info->bus == bus &&
1294 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001295 found = 1;
1296 break;
1297 }
1298 spin_unlock_irqrestore(&device_domain_lock, flags);
1299
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return NULL;
1302
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001303 pdev = to_pci_dev(info->dev);
1304
1305 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001306 return NULL;
1307
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001308 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001309 return NULL;
1310
Yu Zhao93a23a72009-05-18 13:51:37 +08001311 return info;
1312}
1313
1314static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1315{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001316 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001317 return;
1318
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001319 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001320}
1321
1322static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1323{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001324 if (!info->dev || !dev_is_pci(info->dev) ||
1325 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001326 return;
1327
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001328 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001329}
1330
1331static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1332 u64 addr, unsigned mask)
1333{
1334 u16 sid, qdep;
1335 unsigned long flags;
1336 struct device_domain_info *info;
1337
1338 spin_lock_irqsave(&device_domain_lock, flags);
1339 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001340 struct pci_dev *pdev;
1341 if (!info->dev || !dev_is_pci(info->dev))
1342 continue;
1343
1344 pdev = to_pci_dev(info->dev);
1345 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001346 continue;
1347
1348 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001349 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001350 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1351 }
1352 spin_unlock_irqrestore(&device_domain_lock, flags);
1353}
1354
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001355static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001356 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001359 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001360
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361 BUG_ON(pages == 0);
1362
David Woodhouseea8ea462014-03-05 17:09:32 +00001363 if (ih)
1364 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001366 * Fallback to domain selective flush if no PSI support or the size is
1367 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001368 * PSI requires page size to be 2 ^ x, and the base address is naturally
1369 * aligned to the size
1370 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001371 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1372 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001373 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001374 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001375 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001376 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001377
1378 /*
Nadav Amit82653632010-04-01 13:24:40 +03001379 * In caching mode, changes of pages from non-present to present require
1380 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001381 */
Nadav Amit82653632010-04-01 13:24:40 +03001382 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001383 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384}
1385
mark grossf8bab732008-02-08 04:18:38 -08001386static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1387{
1388 u32 pmen;
1389 unsigned long flags;
1390
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001391 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001392 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1393 pmen &= ~DMA_PMEN_EPM;
1394 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1395
1396 /* wait for the protected region status bit to clear */
1397 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1398 readl, !(pmen & DMA_PMEN_PRS), pmen);
1399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001401}
1402
Jiang Liu2a41cce2014-07-11 14:19:33 +08001403static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404{
1405 u32 sts;
1406 unsigned long flags;
1407
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001408 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001409 iommu->gcmd |= DMA_GCMD_TE;
1410 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411
1412 /* Make sure hardware complete it */
1413 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001414 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001416 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417}
1418
Jiang Liu2a41cce2014-07-11 14:19:33 +08001419static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420{
1421 u32 sts;
1422 unsigned long flag;
1423
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001424 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001425 iommu->gcmd &= ~DMA_GCMD_TE;
1426 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1427
1428 /* Make sure hardware complete it */
1429 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001430 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001431
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001432 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433}
1434
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001435
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436static int iommu_init_domains(struct intel_iommu *iommu)
1437{
1438 unsigned long ndomains;
1439 unsigned long nlongs;
1440
1441 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001442 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1443 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 nlongs = BITS_TO_LONGS(ndomains);
1445
Donald Dutile94a91b52009-08-20 16:51:34 -04001446 spin_lock_init(&iommu->lock);
1447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 /* TBD: there might be 64K domains,
1449 * consider other allocation for future chip
1450 */
1451 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1452 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001453 pr_err("IOMMU%d: allocating domain id array failed\n",
1454 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001455 return -ENOMEM;
1456 }
1457 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1458 GFP_KERNEL);
1459 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001460 pr_err("IOMMU%d: allocating domain array failed\n",
1461 iommu->seq_id);
1462 kfree(iommu->domain_ids);
1463 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 return -ENOMEM;
1465 }
1466
1467 /*
1468 * if Caching mode is set, then invalid translations are tagged
1469 * with domainid 0. Hence we need to pre-allocate it.
1470 */
1471 if (cap_caching_mode(iommu->cap))
1472 set_bit(0, iommu->domain_ids);
1473 return 0;
1474}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475
Jiang Liua868e6b2014-01-06 14:18:20 +08001476static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477{
1478 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001479 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001480
Donald Dutile94a91b52009-08-20 16:51:34 -04001481 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001482 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001483 /*
1484 * Domain id 0 is reserved for invalid translation
1485 * if hardware supports caching mode.
1486 */
1487 if (cap_caching_mode(iommu->cap) && i == 0)
1488 continue;
1489
Donald Dutile94a91b52009-08-20 16:51:34 -04001490 domain = iommu->domains[i];
1491 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001492 if (domain_detach_iommu(domain, iommu) == 0 &&
1493 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001494 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001495 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496 }
1497
1498 if (iommu->gcmd & DMA_GCMD_TE)
1499 iommu_disable_translation(iommu);
1500
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001501 kfree(iommu->domains);
1502 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001503 iommu->domains = NULL;
1504 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505
Weidong Hand9630fe2008-12-08 11:06:32 +08001506 g_iommus[iommu->seq_id] = NULL;
1507
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001508 /* free context mapping */
1509 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510}
1511
Jiang Liuab8dfe22014-07-11 14:19:27 +08001512static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001513{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001514 /* domain id for virtual machine, it won't be set in context */
1515 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517
1518 domain = alloc_domain_mem();
1519 if (!domain)
1520 return NULL;
1521
Jiang Liuab8dfe22014-07-11 14:19:27 +08001522 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001523 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001524 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001525 spin_lock_init(&domain->iommu_lock);
1526 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001527 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001528 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529
1530 return domain;
1531}
1532
Jiang Liufb170fb2014-07-11 14:19:28 +08001533static int __iommu_attach_domain(struct dmar_domain *domain,
1534 struct intel_iommu *iommu)
1535{
1536 int num;
1537 unsigned long ndomains;
1538
1539 ndomains = cap_ndoms(iommu->cap);
1540 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1541 if (num < ndomains) {
1542 set_bit(num, iommu->domain_ids);
1543 iommu->domains[num] = domain;
1544 } else {
1545 num = -ENOSPC;
1546 }
1547
1548 return num;
1549}
1550
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001551static int iommu_attach_domain(struct dmar_domain *domain,
1552 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001553{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001554 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 unsigned long flags;
1556
Weidong Han8c11e792008-12-08 15:29:22 +08001557 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001558 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001559 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001560 if (num < 0)
1561 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001562
Jiang Liufb170fb2014-07-11 14:19:28 +08001563 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001564}
1565
Jiang Liu44bde612014-07-11 14:19:29 +08001566static int iommu_attach_vm_domain(struct dmar_domain *domain,
1567 struct intel_iommu *iommu)
1568{
1569 int num;
1570 unsigned long ndomains;
1571
1572 ndomains = cap_ndoms(iommu->cap);
1573 for_each_set_bit(num, iommu->domain_ids, ndomains)
1574 if (iommu->domains[num] == domain)
1575 return num;
1576
1577 return __iommu_attach_domain(domain, iommu);
1578}
1579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001580static void iommu_detach_domain(struct dmar_domain *domain,
1581 struct intel_iommu *iommu)
1582{
1583 unsigned long flags;
1584 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001585
1586 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001587 if (domain_type_is_vm_or_si(domain)) {
1588 ndomains = cap_ndoms(iommu->cap);
1589 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1590 if (iommu->domains[num] == domain) {
1591 clear_bit(num, iommu->domain_ids);
1592 iommu->domains[num] = NULL;
1593 break;
1594 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001595 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001596 } else {
1597 clear_bit(domain->id, iommu->domain_ids);
1598 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001599 }
Weidong Han8c11e792008-12-08 15:29:22 +08001600 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601}
1602
Jiang Liufb170fb2014-07-11 14:19:28 +08001603static void domain_attach_iommu(struct dmar_domain *domain,
1604 struct intel_iommu *iommu)
1605{
1606 unsigned long flags;
1607
1608 spin_lock_irqsave(&domain->iommu_lock, flags);
1609 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1610 domain->iommu_count++;
1611 if (domain->iommu_count == 1)
1612 domain->nid = iommu->node;
1613 domain_update_iommu_cap(domain);
1614 }
1615 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1616}
1617
1618static int domain_detach_iommu(struct dmar_domain *domain,
1619 struct intel_iommu *iommu)
1620{
1621 unsigned long flags;
1622 int count = INT_MAX;
1623
1624 spin_lock_irqsave(&domain->iommu_lock, flags);
1625 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1626 count = --domain->iommu_count;
1627 domain_update_iommu_cap(domain);
1628 }
1629 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1630
1631 return count;
1632}
1633
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001635static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636
Joseph Cihula51a63e62011-03-21 11:04:24 -07001637static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638{
1639 struct pci_dev *pdev = NULL;
1640 struct iova *iova;
1641 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642
David Millerf6611972008-02-06 01:36:23 -08001643 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644
Mark Gross8a443df2008-03-04 14:59:31 -08001645 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1646 &reserved_rbtree_key);
1647
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648 /* IOAPIC ranges shouldn't be accessed by DMA */
1649 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1650 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001651 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001653 return -ENODEV;
1654 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655
1656 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1657 for_each_pci_dev(pdev) {
1658 struct resource *r;
1659
1660 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1661 r = &pdev->resource[i];
1662 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1663 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001664 iova = reserve_iova(&reserved_iova_list,
1665 IOVA_PFN(r->start),
1666 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001667 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001668 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001669 return -ENODEV;
1670 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671 }
1672 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001673 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674}
1675
1676static void domain_reserve_special_ranges(struct dmar_domain *domain)
1677{
1678 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1679}
1680
1681static inline int guestwidth_to_adjustwidth(int gaw)
1682{
1683 int agaw;
1684 int r = (gaw - 12) % 9;
1685
1686 if (r == 0)
1687 agaw = gaw;
1688 else
1689 agaw = gaw + 9 - r;
1690 if (agaw > 64)
1691 agaw = 64;
1692 return agaw;
1693}
1694
1695static int domain_init(struct dmar_domain *domain, int guest_width)
1696{
1697 struct intel_iommu *iommu;
1698 int adjust_width, agaw;
1699 unsigned long sagaw;
1700
David Millerf6611972008-02-06 01:36:23 -08001701 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001702 domain_reserve_special_ranges(domain);
1703
1704 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001705 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706 if (guest_width > cap_mgaw(iommu->cap))
1707 guest_width = cap_mgaw(iommu->cap);
1708 domain->gaw = guest_width;
1709 adjust_width = guestwidth_to_adjustwidth(guest_width);
1710 agaw = width_to_agaw(adjust_width);
1711 sagaw = cap_sagaw(iommu->cap);
1712 if (!test_bit(agaw, &sagaw)) {
1713 /* hardware doesn't support it, choose a bigger one */
1714 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1715 agaw = find_next_bit(&sagaw, 5, agaw);
1716 if (agaw >= 5)
1717 return -ENODEV;
1718 }
1719 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001720
Weidong Han8e6040972008-12-08 15:49:06 +08001721 if (ecap_coherent(iommu->ecap))
1722 domain->iommu_coherency = 1;
1723 else
1724 domain->iommu_coherency = 0;
1725
Sheng Yang58c610b2009-03-18 15:33:05 +08001726 if (ecap_sc_support(iommu->ecap))
1727 domain->iommu_snooping = 1;
1728 else
1729 domain->iommu_snooping = 0;
1730
David Woodhouse214e39a2014-03-19 10:38:49 +00001731 if (intel_iommu_superpage)
1732 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1733 else
1734 domain->iommu_superpage = 0;
1735
Suresh Siddha4c923d42009-10-02 11:01:24 -07001736 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001737
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001738 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001739 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001740 if (!domain->pgd)
1741 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001742 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743 return 0;
1744}
1745
1746static void domain_exit(struct dmar_domain *domain)
1747{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001748 struct dmar_drhd_unit *drhd;
1749 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001750 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751
1752 /* Domain 0 is reserved, so dont process it */
1753 if (!domain)
1754 return;
1755
Alex Williamson7b668352011-05-24 12:02:41 +01001756 /* Flush any lazy unmaps that may reference this domain */
1757 if (!intel_iommu_strict)
1758 flush_unmaps_timeout(0);
1759
Jiang Liu92d03cc2014-02-19 14:07:28 +08001760 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001762
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763 /* destroy iovas */
1764 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765
David Woodhouseea8ea462014-03-05 17:09:32 +00001766 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767
Jiang Liu92d03cc2014-02-19 14:07:28 +08001768 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001769 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001770 for_each_active_iommu(iommu, drhd)
Jiang Liufb170fb2014-07-11 14:19:28 +08001771 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001772 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001773
David Woodhouseea8ea462014-03-05 17:09:32 +00001774 dma_free_pagelist(freelist);
1775
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001776 free_domain_mem(domain);
1777}
1778
David Woodhouse64ae8922014-03-09 12:52:30 -07001779static int domain_context_mapping_one(struct dmar_domain *domain,
1780 struct intel_iommu *iommu,
1781 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001782{
1783 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001785 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001786 int id;
1787 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001788 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001789
1790 pr_debug("Set context mapping for %02x:%02x.%d\n",
1791 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001794 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1795 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001796
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 context = device_to_context_entry(iommu, bus, devfn);
1798 if (!context)
1799 return -ENOMEM;
1800 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001801 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802 spin_unlock_irqrestore(&iommu->lock, flags);
1803 return 0;
1804 }
1805
Weidong Hanea6606b2008-12-08 23:08:15 +08001806 id = domain->id;
1807 pgd = domain->pgd;
1808
Jiang Liuab8dfe22014-07-11 14:19:27 +08001809 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001810 if (domain_type_is_vm(domain)) {
1811 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001812 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001813 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001814 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001815 return -EFAULT;
1816 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001817 }
1818
1819 /* Skip top levels of page tables for
1820 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001821 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001822 */
Chris Wright1672af12009-12-02 12:06:34 -08001823 if (translation != CONTEXT_TT_PASS_THROUGH) {
1824 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1825 pgd = phys_to_virt(dma_pte_addr(pgd));
1826 if (!dma_pte_present(pgd)) {
1827 spin_unlock_irqrestore(&iommu->lock, flags);
1828 return -ENOMEM;
1829 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001830 }
1831 }
1832 }
1833
1834 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001835
Yu Zhao93a23a72009-05-18 13:51:37 +08001836 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001837 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001838 translation = info ? CONTEXT_TT_DEV_IOTLB :
1839 CONTEXT_TT_MULTI_LEVEL;
1840 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001841 /*
1842 * In pass through mode, AW must be programmed to indicate the largest
1843 * AGAW value supported by hardware. And ASR is ignored by hardware.
1844 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001845 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001846 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001847 else {
1848 context_set_address_root(context, virt_to_phys(pgd));
1849 context_set_address_width(context, iommu->agaw);
1850 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001851
1852 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001853 context_set_fault_enable(context);
1854 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001855 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001857 /*
1858 * It's a non-present to present mapping. If hardware doesn't cache
1859 * non-present entry we only need to flush the write-buffer. If the
1860 * _does_ cache non-present entries, then it does so in the special
1861 * domain #0, which we have to flush:
1862 */
1863 if (cap_caching_mode(iommu->cap)) {
1864 iommu->flush.flush_context(iommu, 0,
1865 (((u16)bus) << 8) | devfn,
1866 DMA_CCMD_MASK_NOBIT,
1867 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001868 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001869 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001871 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001872 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001874
Jiang Liufb170fb2014-07-11 14:19:28 +08001875 domain_attach_iommu(domain, iommu);
1876
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877 return 0;
1878}
1879
Alex Williamson579305f2014-07-03 09:51:43 -06001880struct domain_context_mapping_data {
1881 struct dmar_domain *domain;
1882 struct intel_iommu *iommu;
1883 int translation;
1884};
1885
1886static int domain_context_mapping_cb(struct pci_dev *pdev,
1887 u16 alias, void *opaque)
1888{
1889 struct domain_context_mapping_data *data = opaque;
1890
1891 return domain_context_mapping_one(data->domain, data->iommu,
1892 PCI_BUS_NUM(alias), alias & 0xff,
1893 data->translation);
1894}
1895
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001896static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001897domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1898 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899{
David Woodhouse64ae8922014-03-09 12:52:30 -07001900 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001901 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001902 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903
David Woodhousee1f167f2014-03-09 15:24:46 -07001904 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001905 if (!iommu)
1906 return -ENODEV;
1907
Alex Williamson579305f2014-07-03 09:51:43 -06001908 if (!dev_is_pci(dev))
1909 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001910 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001911
1912 data.domain = domain;
1913 data.iommu = iommu;
1914 data.translation = translation;
1915
1916 return pci_for_each_dma_alias(to_pci_dev(dev),
1917 &domain_context_mapping_cb, &data);
1918}
1919
1920static int domain_context_mapped_cb(struct pci_dev *pdev,
1921 u16 alias, void *opaque)
1922{
1923 struct intel_iommu *iommu = opaque;
1924
1925 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926}
1927
David Woodhousee1f167f2014-03-09 15:24:46 -07001928static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929{
Weidong Han5331fe62008-12-08 23:00:00 +08001930 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001931 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001932
David Woodhousee1f167f2014-03-09 15:24:46 -07001933 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001934 if (!iommu)
1935 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936
Alex Williamson579305f2014-07-03 09:51:43 -06001937 if (!dev_is_pci(dev))
1938 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001939
Alex Williamson579305f2014-07-03 09:51:43 -06001940 return !pci_for_each_dma_alias(to_pci_dev(dev),
1941 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942}
1943
Fenghua Yuf5329592009-08-04 15:09:37 -07001944/* Returns a number of VTD pages, but aligned to MM page size */
1945static inline unsigned long aligned_nrpages(unsigned long host_addr,
1946 size_t size)
1947{
1948 host_addr &= ~PAGE_MASK;
1949 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1950}
1951
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001952/* Return largest possible superpage level for a given mapping */
1953static inline int hardware_largepage_caps(struct dmar_domain *domain,
1954 unsigned long iov_pfn,
1955 unsigned long phy_pfn,
1956 unsigned long pages)
1957{
1958 int support, level = 1;
1959 unsigned long pfnmerge;
1960
1961 support = domain->iommu_superpage;
1962
1963 /* To use a large page, the virtual *and* physical addresses
1964 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1965 of them will mean we have to use smaller pages. So just
1966 merge them and check both at once. */
1967 pfnmerge = iov_pfn | phy_pfn;
1968
1969 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1970 pages >>= VTD_STRIDE_SHIFT;
1971 if (!pages)
1972 break;
1973 pfnmerge >>= VTD_STRIDE_SHIFT;
1974 level++;
1975 support--;
1976 }
1977 return level;
1978}
1979
David Woodhouse9051aa02009-06-29 12:30:54 +01001980static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1981 struct scatterlist *sg, unsigned long phys_pfn,
1982 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001983{
1984 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001985 phys_addr_t uninitialized_var(pteval);
David Woodhouse9051aa02009-06-29 12:30:54 +01001986 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 unsigned int largepage_lvl = 0;
1988 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001989
Jiang Liu162d1b12014-07-11 14:19:35 +08001990 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001991
1992 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1993 return -EINVAL;
1994
1995 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1996
David Woodhouse9051aa02009-06-29 12:30:54 +01001997 if (sg)
1998 sg_res = 0;
1999 else {
2000 sg_res = nr_pages + 1;
2001 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2002 }
2003
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002004 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002005 uint64_t tmp;
2006
David Woodhousee1605492009-06-29 11:17:38 +01002007 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002008 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002009 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2010 sg->dma_length = sg->length;
2011 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002012 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002013 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014
David Woodhousee1605492009-06-29 11:17:38 +01002015 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002016 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2017
David Woodhouse5cf0a762014-03-19 16:07:49 +00002018 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002019 if (!pte)
2020 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002021 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002022 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002023 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002024 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2025 /*
2026 * Ensure that old small page tables are
2027 * removed to make room for superpage,
2028 * if they exist.
2029 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002030 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002031 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002032 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002033 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002034 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002035
David Woodhousee1605492009-06-29 11:17:38 +01002036 }
2037 /* We don't need lock here, nobody else
2038 * touches the iova range
2039 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002040 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002041 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002042 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002043 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2044 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002045 if (dumps) {
2046 dumps--;
2047 debug_dma_dump_mappings(NULL);
2048 }
2049 WARN_ON(1);
2050 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002051
2052 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2053
2054 BUG_ON(nr_pages < lvl_pages);
2055 BUG_ON(sg_res < lvl_pages);
2056
2057 nr_pages -= lvl_pages;
2058 iov_pfn += lvl_pages;
2059 phys_pfn += lvl_pages;
2060 pteval += lvl_pages * VTD_PAGE_SIZE;
2061 sg_res -= lvl_pages;
2062
2063 /* If the next PTE would be the first in a new page, then we
2064 need to flush the cache on the entries we've just written.
2065 And then we'll need to recalculate 'pte', so clear it and
2066 let it get set again in the if (!pte) block above.
2067
2068 If we're done (!nr_pages) we need to flush the cache too.
2069
2070 Also if we've been setting superpages, we may need to
2071 recalculate 'pte' and switch back to smaller pages for the
2072 end of the mapping, if the trailing size is not enough to
2073 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002074 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002075 if (!nr_pages || first_pte_in_page(pte) ||
2076 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002077 domain_flush_cache(domain, first_pte,
2078 (void *)pte - (void *)first_pte);
2079 pte = NULL;
2080 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002081
2082 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002083 sg = sg_next(sg);
2084 }
2085 return 0;
2086}
2087
David Woodhouse9051aa02009-06-29 12:30:54 +01002088static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2089 struct scatterlist *sg, unsigned long nr_pages,
2090 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002091{
David Woodhouse9051aa02009-06-29 12:30:54 +01002092 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2093}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002094
David Woodhouse9051aa02009-06-29 12:30:54 +01002095static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2096 unsigned long phys_pfn, unsigned long nr_pages,
2097 int prot)
2098{
2099 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002100}
2101
Weidong Hanc7151a82008-12-08 22:51:37 +08002102static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002103{
Weidong Hanc7151a82008-12-08 22:51:37 +08002104 if (!iommu)
2105 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002106
2107 clear_context_table(iommu, bus, devfn);
2108 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002109 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002110 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002111}
2112
David Woodhouse109b9b02012-05-25 17:43:02 +01002113static inline void unlink_domain_info(struct device_domain_info *info)
2114{
2115 assert_spin_locked(&device_domain_lock);
2116 list_del(&info->link);
2117 list_del(&info->global);
2118 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002119 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002120}
2121
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122static void domain_remove_dev_info(struct dmar_domain *domain)
2123{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002124 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002125 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002126
2127 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002128 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002129 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130 spin_unlock_irqrestore(&device_domain_lock, flags);
2131
Yu Zhao93a23a72009-05-18 13:51:37 +08002132 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002133 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002134
Jiang Liuab8dfe22014-07-11 14:19:27 +08002135 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002136 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002137 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002138 }
2139
2140 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141 spin_lock_irqsave(&device_domain_lock, flags);
2142 }
2143 spin_unlock_irqrestore(&device_domain_lock, flags);
2144}
2145
2146/*
2147 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002148 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149 */
David Woodhouse1525a292014-03-06 16:19:30 +00002150static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151{
2152 struct device_domain_info *info;
2153
2154 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002155 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002156 if (info)
2157 return info->domain;
2158 return NULL;
2159}
2160
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002161static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002162dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2163{
2164 struct device_domain_info *info;
2165
2166 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002167 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002168 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002169 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002170
2171 return NULL;
2172}
2173
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002174static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002175 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002176 struct device *dev,
2177 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002178{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002179 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002180 struct device_domain_info *info;
2181 unsigned long flags;
2182
2183 info = alloc_devinfo_mem();
2184 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002185 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002186
Jiang Liu745f2582014-02-19 14:07:26 +08002187 info->bus = bus;
2188 info->devfn = devfn;
2189 info->dev = dev;
2190 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002191 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002192
2193 spin_lock_irqsave(&device_domain_lock, flags);
2194 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002195 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002196 else {
2197 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002198 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002199 if (info2)
2200 found = info2->domain;
2201 }
Jiang Liu745f2582014-02-19 14:07:26 +08002202 if (found) {
2203 spin_unlock_irqrestore(&device_domain_lock, flags);
2204 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002205 /* Caller must free the original domain */
2206 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002207 }
2208
David Woodhouseb718cd32014-03-09 13:11:33 -07002209 list_add(&info->link, &domain->devices);
2210 list_add(&info->global, &device_domain_list);
2211 if (dev)
2212 dev->archdata.iommu = info;
2213 spin_unlock_irqrestore(&device_domain_lock, flags);
2214
2215 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002216}
2217
Alex Williamson579305f2014-07-03 09:51:43 -06002218static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2219{
2220 *(u16 *)opaque = alias;
2221 return 0;
2222}
2223
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002224/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002225static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002226{
Alex Williamson579305f2014-07-03 09:51:43 -06002227 struct dmar_domain *domain, *tmp;
2228 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002229 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002230 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002231 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002232 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002233
David Woodhouse146922e2014-03-09 15:44:17 -07002234 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002235 if (domain)
2236 return domain;
2237
David Woodhouse146922e2014-03-09 15:44:17 -07002238 iommu = device_to_iommu(dev, &bus, &devfn);
2239 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002240 return NULL;
2241
2242 if (dev_is_pci(dev)) {
2243 struct pci_dev *pdev = to_pci_dev(dev);
2244
2245 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2246
2247 spin_lock_irqsave(&device_domain_lock, flags);
2248 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2249 PCI_BUS_NUM(dma_alias),
2250 dma_alias & 0xff);
2251 if (info) {
2252 iommu = info->iommu;
2253 domain = info->domain;
2254 }
2255 spin_unlock_irqrestore(&device_domain_lock, flags);
2256
2257 /* DMA alias already has a domain, uses it */
2258 if (info)
2259 goto found_domain;
2260 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002261
David Woodhouse146922e2014-03-09 15:44:17 -07002262 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002263 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002264 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002265 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002266 domain->id = iommu_attach_domain(domain, iommu);
2267 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002268 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002269 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002270 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002271 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002272 if (domain_init(domain, gaw)) {
2273 domain_exit(domain);
2274 return NULL;
2275 }
2276
2277 /* register PCI DMA alias device */
2278 if (dev_is_pci(dev)) {
2279 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2280 dma_alias & 0xff, NULL, domain);
2281
2282 if (!tmp || tmp != domain) {
2283 domain_exit(domain);
2284 domain = tmp;
2285 }
2286
David Woodhouseb718cd32014-03-09 13:11:33 -07002287 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002288 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002289 }
2290
2291found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002292 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2293
2294 if (!tmp || tmp != domain) {
2295 domain_exit(domain);
2296 domain = tmp;
2297 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002298
2299 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002300}
2301
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002302static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002303#define IDENTMAP_ALL 1
2304#define IDENTMAP_GFX 2
2305#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002306
David Woodhouseb2132032009-06-26 18:50:28 +01002307static int iommu_domain_identity_map(struct dmar_domain *domain,
2308 unsigned long long start,
2309 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002310{
David Woodhousec5395d52009-06-28 16:35:56 +01002311 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2312 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002313
David Woodhousec5395d52009-06-28 16:35:56 +01002314 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2315 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002316 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002317 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002318 }
2319
David Woodhousec5395d52009-06-28 16:35:56 +01002320 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2321 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002322 /*
2323 * RMRR range might have overlap with physical memory range,
2324 * clear it first
2325 */
David Woodhousec5395d52009-06-28 16:35:56 +01002326 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002327
David Woodhousec5395d52009-06-28 16:35:56 +01002328 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2329 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002330 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002331}
2332
David Woodhouse0b9d9752014-03-09 15:48:15 -07002333static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002334 unsigned long long start,
2335 unsigned long long end)
2336{
2337 struct dmar_domain *domain;
2338 int ret;
2339
David Woodhouse0b9d9752014-03-09 15:48:15 -07002340 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002341 if (!domain)
2342 return -ENOMEM;
2343
David Woodhouse19943b02009-08-04 16:19:20 +01002344 /* For _hardware_ passthrough, don't bother. But for software
2345 passthrough, we do it anyway -- it may indicate a memory
2346 range which is reserved in E820, so which didn't get set
2347 up to start with in si_domain */
2348 if (domain == si_domain && hw_pass_through) {
2349 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002350 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002351 return 0;
2352 }
2353
2354 printk(KERN_INFO
2355 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002356 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002357
David Woodhouse5595b522009-12-02 09:21:55 +00002358 if (end < start) {
2359 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2360 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2361 dmi_get_system_info(DMI_BIOS_VENDOR),
2362 dmi_get_system_info(DMI_BIOS_VERSION),
2363 dmi_get_system_info(DMI_PRODUCT_VERSION));
2364 ret = -EIO;
2365 goto error;
2366 }
2367
David Woodhouse2ff729f2009-08-26 14:25:41 +01002368 if (end >> agaw_to_width(domain->agaw)) {
2369 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2370 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2371 agaw_to_width(domain->agaw),
2372 dmi_get_system_info(DMI_BIOS_VENDOR),
2373 dmi_get_system_info(DMI_BIOS_VERSION),
2374 dmi_get_system_info(DMI_PRODUCT_VERSION));
2375 ret = -EIO;
2376 goto error;
2377 }
David Woodhouse19943b02009-08-04 16:19:20 +01002378
David Woodhouseb2132032009-06-26 18:50:28 +01002379 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380 if (ret)
2381 goto error;
2382
2383 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002384 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002385 if (ret)
2386 goto error;
2387
2388 return 0;
2389
2390 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002391 domain_exit(domain);
2392 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393}
2394
2395static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002396 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002398 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002400 return iommu_prepare_identity_map(dev, rmrr->base_address,
2401 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002402}
2403
Suresh Siddhad3f13812011-08-23 17:05:25 -07002404#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002405static inline void iommu_prepare_isa(void)
2406{
2407 struct pci_dev *pdev;
2408 int ret;
2409
2410 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2411 if (!pdev)
2412 return;
2413
David Woodhousec7ab48d2009-06-26 19:10:36 +01002414 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002415 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002416
2417 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002418 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2419 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002420
Yijing Wang9b27e822014-05-20 20:37:52 +08002421 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002422}
2423#else
2424static inline void iommu_prepare_isa(void)
2425{
2426 return;
2427}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002428#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002429
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002430static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002431
Matt Kraai071e1372009-08-23 22:30:22 -07002432static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002433{
2434 struct dmar_drhd_unit *drhd;
2435 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002436 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002437 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002438
Jiang Liuab8dfe22014-07-11 14:19:27 +08002439 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002440 if (!si_domain)
2441 return -EFAULT;
2442
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443 for_each_active_iommu(iommu, drhd) {
2444 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002445 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002446 domain_exit(si_domain);
2447 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002448 } else if (first) {
2449 si_domain->id = ret;
2450 first = false;
2451 } else if (si_domain->id != ret) {
2452 domain_exit(si_domain);
2453 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002454 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002455 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002456 }
2457
2458 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2459 domain_exit(si_domain);
2460 return -EFAULT;
2461 }
2462
Jiang Liu9544c002014-01-06 14:18:13 +08002463 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2464 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002465
David Woodhouse19943b02009-08-04 16:19:20 +01002466 if (hw)
2467 return 0;
2468
David Woodhousec7ab48d2009-06-26 19:10:36 +01002469 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002470 unsigned long start_pfn, end_pfn;
2471 int i;
2472
2473 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2474 ret = iommu_domain_identity_map(si_domain,
2475 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2476 if (ret)
2477 return ret;
2478 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002479 }
2480
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002481 return 0;
2482}
2483
David Woodhouse9b226622014-03-09 14:03:28 -07002484static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002485{
2486 struct device_domain_info *info;
2487
2488 if (likely(!iommu_identity_mapping))
2489 return 0;
2490
David Woodhouse9b226622014-03-09 14:03:28 -07002491 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002492 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2493 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495 return 0;
2496}
2497
2498static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002499 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002500{
David Woodhouse0ac72662014-03-09 13:19:22 -07002501 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002502 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002503 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002504 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002505
David Woodhouse5913c9b2014-03-09 16:27:31 -07002506 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002507 if (!iommu)
2508 return -ENODEV;
2509
David Woodhouse5913c9b2014-03-09 16:27:31 -07002510 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002511 if (ndomain != domain)
2512 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002513
David Woodhouse5913c9b2014-03-09 16:27:31 -07002514 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002515 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002516 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002517 return ret;
2518 }
2519
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002520 return 0;
2521}
2522
David Woodhouse0b9d9752014-03-09 15:48:15 -07002523static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002524{
2525 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002526 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002527 int i;
2528
Jiang Liu0e242612014-02-19 14:07:34 +08002529 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002530 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002531 /*
2532 * Return TRUE if this RMRR contains the device that
2533 * is passed in.
2534 */
2535 for_each_active_dev_scope(rmrr->devices,
2536 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002537 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002538 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002539 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002540 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002541 }
Jiang Liu0e242612014-02-19 14:07:34 +08002542 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002543 return false;
2544}
2545
David Woodhouse3bdb2592014-03-09 16:03:08 -07002546static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002547{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002548
David Woodhouse3bdb2592014-03-09 16:03:08 -07002549 if (dev_is_pci(dev)) {
2550 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002551
David Woodhouse3bdb2592014-03-09 16:03:08 -07002552 /*
2553 * We want to prevent any device associated with an RMRR from
2554 * getting placed into the SI Domain. This is done because
2555 * problems exist when devices are moved in and out of domains
2556 * and their respective RMRR info is lost. We exempt USB devices
2557 * from this process due to their usage of RMRRs that are known
2558 * to not be needed after BIOS hand-off to OS.
2559 */
2560 if (device_has_rmrr(dev) &&
2561 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2562 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002563
David Woodhouse3bdb2592014-03-09 16:03:08 -07002564 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2565 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002566
David Woodhouse3bdb2592014-03-09 16:03:08 -07002567 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2568 return 1;
2569
2570 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2571 return 0;
2572
2573 /*
2574 * We want to start off with all devices in the 1:1 domain, and
2575 * take them out later if we find they can't access all of memory.
2576 *
2577 * However, we can't do this for PCI devices behind bridges,
2578 * because all PCI devices behind the same bridge will end up
2579 * with the same source-id on their transactions.
2580 *
2581 * Practically speaking, we can't change things around for these
2582 * devices at run-time, because we can't be sure there'll be no
2583 * DMA transactions in flight for any of their siblings.
2584 *
2585 * So PCI devices (unless they're on the root bus) as well as
2586 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2587 * the 1:1 domain, just in _case_ one of their siblings turns out
2588 * not to be able to map all of memory.
2589 */
2590 if (!pci_is_pcie(pdev)) {
2591 if (!pci_is_root_bus(pdev->bus))
2592 return 0;
2593 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2594 return 0;
2595 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2596 return 0;
2597 } else {
2598 if (device_has_rmrr(dev))
2599 return 0;
2600 }
David Woodhouse6941af22009-07-04 18:24:27 +01002601
David Woodhouse3dfc8132009-07-04 19:11:08 +01002602 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002603 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002604 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002605 * take them out of the 1:1 domain later.
2606 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002607 if (!startup) {
2608 /*
2609 * If the device's dma_mask is less than the system's memory
2610 * size then this is not a candidate for identity mapping.
2611 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002612 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002613
David Woodhouse3bdb2592014-03-09 16:03:08 -07002614 if (dev->coherent_dma_mask &&
2615 dev->coherent_dma_mask < dma_mask)
2616 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002617
David Woodhouse3bdb2592014-03-09 16:03:08 -07002618 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002619 }
David Woodhouse6941af22009-07-04 18:24:27 +01002620
2621 return 1;
2622}
2623
David Woodhousecf04eee2014-03-21 16:49:04 +00002624static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2625{
2626 int ret;
2627
2628 if (!iommu_should_identity_map(dev, 1))
2629 return 0;
2630
2631 ret = domain_add_dev_info(si_domain, dev,
2632 hw ? CONTEXT_TT_PASS_THROUGH :
2633 CONTEXT_TT_MULTI_LEVEL);
2634 if (!ret)
2635 pr_info("IOMMU: %s identity mapping for device %s\n",
2636 hw ? "hardware" : "software", dev_name(dev));
2637 else if (ret == -ENODEV)
2638 /* device not associated with an iommu */
2639 ret = 0;
2640
2641 return ret;
2642}
2643
2644
Matt Kraai071e1372009-08-23 22:30:22 -07002645static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002646{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002647 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002648 struct dmar_drhd_unit *drhd;
2649 struct intel_iommu *iommu;
2650 struct device *dev;
2651 int i;
2652 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002653
David Woodhouse19943b02009-08-04 16:19:20 +01002654 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002655 if (ret)
2656 return -EFAULT;
2657
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002658 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002659 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2660 if (ret)
2661 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002662 }
2663
David Woodhousecf04eee2014-03-21 16:49:04 +00002664 for_each_active_iommu(iommu, drhd)
2665 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2666 struct acpi_device_physical_node *pn;
2667 struct acpi_device *adev;
2668
2669 if (dev->bus != &acpi_bus_type)
2670 continue;
2671
2672 adev= to_acpi_device(dev);
2673 mutex_lock(&adev->physical_node_lock);
2674 list_for_each_entry(pn, &adev->physical_node_list, node) {
2675 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2676 if (ret)
2677 break;
2678 }
2679 mutex_unlock(&adev->physical_node_lock);
2680 if (ret)
2681 return ret;
2682 }
2683
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002684 return 0;
2685}
2686
Joseph Cihulab7792602011-05-03 00:08:37 -07002687static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002688{
2689 struct dmar_drhd_unit *drhd;
2690 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002691 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002692 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002693 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002694
2695 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002696 * for each drhd
2697 * allocate root
2698 * initialize and program root entry to not present
2699 * endfor
2700 */
2701 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002702 /*
2703 * lock not needed as this is only incremented in the single
2704 * threaded kernel __init code path all other access are read
2705 * only
2706 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002707 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2708 g_num_of_iommus++;
2709 continue;
2710 }
2711 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2712 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002713 }
2714
Weidong Hand9630fe2008-12-08 11:06:32 +08002715 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2716 GFP_KERNEL);
2717 if (!g_iommus) {
2718 printk(KERN_ERR "Allocating global iommu array failed\n");
2719 ret = -ENOMEM;
2720 goto error;
2721 }
2722
mark gross80b20dd2008-04-18 13:53:58 -07002723 deferred_flush = kzalloc(g_num_of_iommus *
2724 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2725 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002726 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002727 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002728 }
2729
Jiang Liu7c919772014-01-06 14:18:18 +08002730 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002731 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002732
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002733 ret = iommu_init_domains(iommu);
2734 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002735 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002736
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002737 /*
2738 * TBD:
2739 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002740 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002741 */
2742 ret = iommu_alloc_root_entry(iommu);
2743 if (ret) {
2744 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002745 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002746 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002747 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002748 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002749 }
2750
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002751 /*
2752 * Start from the sane iommu hardware state.
2753 */
Jiang Liu7c919772014-01-06 14:18:18 +08002754 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002755 /*
2756 * If the queued invalidation is already initialized by us
2757 * (for example, while enabling interrupt-remapping) then
2758 * we got the things already rolling from a sane state.
2759 */
2760 if (iommu->qi)
2761 continue;
2762
2763 /*
2764 * Clear any previous faults.
2765 */
2766 dmar_fault(-1, iommu);
2767 /*
2768 * Disable queued invalidation if supported and already enabled
2769 * before OS handover.
2770 */
2771 dmar_disable_qi(iommu);
2772 }
2773
Jiang Liu7c919772014-01-06 14:18:18 +08002774 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002775 if (dmar_enable_qi(iommu)) {
2776 /*
2777 * Queued Invalidate not enabled, use Register Based
2778 * Invalidate
2779 */
2780 iommu->flush.flush_context = __iommu_flush_context;
2781 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002782 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002783 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002784 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002785 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002786 } else {
2787 iommu->flush.flush_context = qi_flush_context;
2788 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002789 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002790 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002791 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002792 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002793 }
2794 }
2795
David Woodhouse19943b02009-08-04 16:19:20 +01002796 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002797 iommu_identity_mapping |= IDENTMAP_ALL;
2798
Suresh Siddhad3f13812011-08-23 17:05:25 -07002799#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002800 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002801#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002802
2803 check_tylersburg_isoch();
2804
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002805 /*
2806 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002807 * identity mappings for rmrr, gfx, and isa and may fall back to static
2808 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002809 */
David Woodhouse19943b02009-08-04 16:19:20 +01002810 if (iommu_identity_mapping) {
2811 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2812 if (ret) {
2813 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002814 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002815 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002816 }
David Woodhouse19943b02009-08-04 16:19:20 +01002817 /*
2818 * For each rmrr
2819 * for each dev attached to rmrr
2820 * do
2821 * locate drhd for dev, alloc domain for dev
2822 * allocate free domain
2823 * allocate page table entries for rmrr
2824 * if context not allocated for bus
2825 * allocate and init context
2826 * set present in root table for this bus
2827 * init context with domain, translation etc
2828 * endfor
2829 * endfor
2830 */
2831 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2832 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002833 /* some BIOS lists non-exist devices in DMAR table. */
2834 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002835 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002836 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002837 if (ret)
2838 printk(KERN_ERR
2839 "IOMMU: mapping reserved region failed\n");
2840 }
2841 }
2842
2843 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002844
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002845 /*
2846 * for each drhd
2847 * enable fault log
2848 * global invalidate context cache
2849 * global invalidate iotlb
2850 * enable translation
2851 */
Jiang Liu7c919772014-01-06 14:18:18 +08002852 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002853 if (drhd->ignored) {
2854 /*
2855 * we always have to disable PMRs or DMA may fail on
2856 * this device
2857 */
2858 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002859 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002860 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002861 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002862
2863 iommu_flush_write_buffer(iommu);
2864
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002865 ret = dmar_set_interrupt(iommu);
2866 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002867 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002868
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002869 iommu_set_root_entry(iommu);
2870
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002871 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002872 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002873 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002874 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002875 }
2876
2877 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002878
2879free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002880 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002881 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002882 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002883free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002884 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002885error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002886 return ret;
2887}
2888
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002889/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002890static struct iova *intel_alloc_iova(struct device *dev,
2891 struct dmar_domain *domain,
2892 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002893{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002894 struct iova *iova = NULL;
2895
David Woodhouse875764d2009-06-28 21:20:51 +01002896 /* Restrict dma_mask to the width that the iommu can handle */
2897 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2898
2899 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002900 /*
2901 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002902 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002903 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002904 */
David Woodhouse875764d2009-06-28 21:20:51 +01002905 iova = alloc_iova(&domain->iovad, nrpages,
2906 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2907 if (iova)
2908 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002909 }
David Woodhouse875764d2009-06-28 21:20:51 +01002910 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2911 if (unlikely(!iova)) {
2912 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002913 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002914 return NULL;
2915 }
2916
2917 return iova;
2918}
2919
David Woodhoused4b709f2014-03-09 16:07:40 -07002920static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002921{
2922 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002924
David Woodhoused4b709f2014-03-09 16:07:40 -07002925 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002926 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002927 printk(KERN_ERR "Allocating domain for %s failed",
2928 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002929 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002930 }
2931
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002932 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002933 if (unlikely(!domain_context_mapped(dev))) {
2934 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002935 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002936 printk(KERN_ERR "Domain context map for %s failed",
2937 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002938 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002939 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940 }
2941
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002942 return domain;
2943}
2944
David Woodhoused4b709f2014-03-09 16:07:40 -07002945static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002946{
2947 struct device_domain_info *info;
2948
2949 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002950 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002951 if (likely(info))
2952 return info->domain;
2953
2954 return __get_valid_domain_for_dev(dev);
2955}
2956
David Woodhouse3d891942014-03-06 15:59:26 +00002957static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002958{
David Woodhouse3d891942014-03-06 15:59:26 +00002959 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002960}
2961
David Woodhouseecb509e2014-03-09 16:29:55 -07002962/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002963static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002964{
2965 int found;
2966
David Woodhouse3d891942014-03-06 15:59:26 +00002967 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002968 return 1;
2969
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002970 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002971 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002972
David Woodhouse9b226622014-03-09 14:03:28 -07002973 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002974 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002975 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002976 return 1;
2977 else {
2978 /*
2979 * 32 bit DMA is removed from si_domain and fall back
2980 * to non-identity mapping.
2981 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002982 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002983 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002984 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002985 return 0;
2986 }
2987 } else {
2988 /*
2989 * In case of a detached 64 bit DMA device from vm, the device
2990 * is put into si_domain for identity mapping.
2991 */
David Woodhouseecb509e2014-03-09 16:29:55 -07002992 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002993 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07002994 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002995 hw_pass_through ?
2996 CONTEXT_TT_PASS_THROUGH :
2997 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002998 if (!ret) {
2999 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003000 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003001 return 1;
3002 }
3003 }
3004 }
3005
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003006 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003007}
3008
David Woodhouse5040a912014-03-09 16:14:00 -07003009static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003010 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003011{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003012 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003013 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003014 struct iova *iova;
3015 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003016 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003017 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003018 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003019
3020 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003021
David Woodhouse5040a912014-03-09 16:14:00 -07003022 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003023 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003024
David Woodhouse5040a912014-03-09 16:14:00 -07003025 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003026 if (!domain)
3027 return 0;
3028
Weidong Han8c11e792008-12-08 15:29:22 +08003029 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003030 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003031
David Woodhouse5040a912014-03-09 16:14:00 -07003032 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003033 if (!iova)
3034 goto error;
3035
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003036 /*
3037 * Check if DMAR supports zero-length reads on write only
3038 * mappings..
3039 */
3040 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003041 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042 prot |= DMA_PTE_READ;
3043 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3044 prot |= DMA_PTE_WRITE;
3045 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003046 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003047 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003048 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003049 * is not a big problem
3050 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003051 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003052 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053 if (ret)
3054 goto error;
3055
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003056 /* it's a non-present to present mapping. Only flush if caching mode */
3057 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003058 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003059 else
Weidong Han8c11e792008-12-08 15:29:22 +08003060 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003061
David Woodhouse03d6a242009-06-28 15:33:46 +01003062 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3063 start_paddr += paddr & ~PAGE_MASK;
3064 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003065
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003066error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003067 if (iova)
3068 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003069 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003070 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003071 return 0;
3072}
3073
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003074static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3075 unsigned long offset, size_t size,
3076 enum dma_data_direction dir,
3077 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003078{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003079 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003080 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003081}
3082
mark gross5e0d2a62008-03-04 15:22:08 -08003083static void flush_unmaps(void)
3084{
mark gross80b20dd2008-04-18 13:53:58 -07003085 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003086
mark gross5e0d2a62008-03-04 15:22:08 -08003087 timer_on = 0;
3088
3089 /* just flush them all */
3090 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003091 struct intel_iommu *iommu = g_iommus[i];
3092 if (!iommu)
3093 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003094
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003095 if (!deferred_flush[i].next)
3096 continue;
3097
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003098 /* In caching mode, global flushes turn emulation expensive */
3099 if (!cap_caching_mode(iommu->cap))
3100 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003101 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003102 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003103 unsigned long mask;
3104 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003105 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003106
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003107 /* On real hardware multiple invalidations are expensive */
3108 if (cap_caching_mode(iommu->cap))
3109 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003110 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003111 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003112 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003113 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003114 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3115 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3116 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003117 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003118 if (deferred_flush[i].freelist[j])
3119 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003120 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003121 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003122 }
3123
mark gross5e0d2a62008-03-04 15:22:08 -08003124 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003125}
3126
3127static void flush_unmaps_timeout(unsigned long data)
3128{
mark gross80b20dd2008-04-18 13:53:58 -07003129 unsigned long flags;
3130
3131 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003132 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003133 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003134}
3135
David Woodhouseea8ea462014-03-05 17:09:32 +00003136static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003137{
3138 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003139 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003140 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003141
3142 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003143 if (list_size == HIGH_WATER_MARK)
3144 flush_unmaps();
3145
Weidong Han8c11e792008-12-08 15:29:22 +08003146 iommu = domain_get_iommu(dom);
3147 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003148
mark gross80b20dd2008-04-18 13:53:58 -07003149 next = deferred_flush[iommu_id].next;
3150 deferred_flush[iommu_id].domain[next] = dom;
3151 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003152 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003153 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003154
3155 if (!timer_on) {
3156 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3157 timer_on = 1;
3158 }
3159 list_size++;
3160 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3161}
3162
Jiang Liud41a4ad2014-07-11 14:19:34 +08003163static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003164{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003165 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003166 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003167 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003168 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003169 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003170
David Woodhouse73676832009-07-04 14:08:36 +01003171 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003172 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003173
David Woodhouse1525a292014-03-06 16:19:30 +00003174 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003175 BUG_ON(!domain);
3176
Weidong Han8c11e792008-12-08 15:29:22 +08003177 iommu = domain_get_iommu(domain);
3178
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003179 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003180 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3181 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003182 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003183
David Woodhoused794dc92009-06-28 00:27:49 +01003184 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3185 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003186
David Woodhoused794dc92009-06-28 00:27:49 +01003187 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003188 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003189
David Woodhouseea8ea462014-03-05 17:09:32 +00003190 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003191
mark gross5e0d2a62008-03-04 15:22:08 -08003192 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003193 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003194 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003195 /* free iova */
3196 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003197 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003198 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003199 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003200 /*
3201 * queue up the release of the unmap to save the 1/6th of the
3202 * cpu used up by the iotlb flush operation...
3203 */
mark gross5e0d2a62008-03-04 15:22:08 -08003204 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003205}
3206
Jiang Liud41a4ad2014-07-11 14:19:34 +08003207static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3208 size_t size, enum dma_data_direction dir,
3209 struct dma_attrs *attrs)
3210{
3211 intel_unmap(dev, dev_addr);
3212}
3213
David Woodhouse5040a912014-03-09 16:14:00 -07003214static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003215 dma_addr_t *dma_handle, gfp_t flags,
3216 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003217{
Akinobu Mita36746432014-06-04 16:06:51 -07003218 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003219 int order;
3220
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003221 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003222 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003223
David Woodhouse5040a912014-03-09 16:14:00 -07003224 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003225 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003226 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3227 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003228 flags |= GFP_DMA;
3229 else
3230 flags |= GFP_DMA32;
3231 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232
Akinobu Mita36746432014-06-04 16:06:51 -07003233 if (flags & __GFP_WAIT) {
3234 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235
Akinobu Mita36746432014-06-04 16:06:51 -07003236 page = dma_alloc_from_contiguous(dev, count, order);
3237 if (page && iommu_no_mapping(dev) &&
3238 page_to_phys(page) + size > dev->coherent_dma_mask) {
3239 dma_release_from_contiguous(dev, page, count);
3240 page = NULL;
3241 }
3242 }
3243
3244 if (!page)
3245 page = alloc_pages(flags, order);
3246 if (!page)
3247 return NULL;
3248 memset(page_address(page), 0, size);
3249
3250 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003251 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003252 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003253 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003254 return page_address(page);
3255 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3256 __free_pages(page, order);
3257
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003258 return NULL;
3259}
3260
David Woodhouse5040a912014-03-09 16:14:00 -07003261static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003262 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003263{
3264 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003265 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003266
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003267 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268 order = get_order(size);
3269
Jiang Liud41a4ad2014-07-11 14:19:34 +08003270 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003271 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3272 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003273}
3274
David Woodhouse5040a912014-03-09 16:14:00 -07003275static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003276 int nelems, enum dma_data_direction dir,
3277 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003278{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003279 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280}
3281
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003282static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003283 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284{
3285 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003286 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003287
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003288 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003289 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003290 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003291 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003292 }
3293 return nelems;
3294}
3295
David Woodhouse5040a912014-03-09 16:14:00 -07003296static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003297 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003299 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003301 size_t size = 0;
3302 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003303 struct iova *iova = NULL;
3304 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003305 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003306 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003307 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308
3309 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003310 if (iommu_no_mapping(dev))
3311 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003312
David Woodhouse5040a912014-03-09 16:14:00 -07003313 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003314 if (!domain)
3315 return 0;
3316
Weidong Han8c11e792008-12-08 15:29:22 +08003317 iommu = domain_get_iommu(domain);
3318
David Woodhouseb536d242009-06-28 14:49:31 +01003319 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003320 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003321
David Woodhouse5040a912014-03-09 16:14:00 -07003322 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3323 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003324 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003325 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003326 return 0;
3327 }
3328
3329 /*
3330 * Check if DMAR supports zero-length reads on write only
3331 * mappings..
3332 */
3333 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003334 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003335 prot |= DMA_PTE_READ;
3336 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3337 prot |= DMA_PTE_WRITE;
3338
David Woodhouseb536d242009-06-28 14:49:31 +01003339 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003340
Fenghua Yuf5329592009-08-04 15:09:37 -07003341 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003342 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003343 dma_pte_free_pagetable(domain, start_vpfn,
3344 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003345 __free_iova(&domain->iovad, iova);
3346 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003347 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003348
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003349 /* it's a non-present to present mapping. Only flush if caching mode */
3350 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003351 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003352 else
Weidong Han8c11e792008-12-08 15:29:22 +08003353 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003354
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003355 return nelems;
3356}
3357
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003358static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3359{
3360 return !dma_addr;
3361}
3362
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003363struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003364 .alloc = intel_alloc_coherent,
3365 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003366 .map_sg = intel_map_sg,
3367 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003368 .map_page = intel_map_page,
3369 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003370 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003371};
3372
3373static inline int iommu_domain_cache_init(void)
3374{
3375 int ret = 0;
3376
3377 iommu_domain_cache = kmem_cache_create("iommu_domain",
3378 sizeof(struct dmar_domain),
3379 0,
3380 SLAB_HWCACHE_ALIGN,
3381
3382 NULL);
3383 if (!iommu_domain_cache) {
3384 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3385 ret = -ENOMEM;
3386 }
3387
3388 return ret;
3389}
3390
3391static inline int iommu_devinfo_cache_init(void)
3392{
3393 int ret = 0;
3394
3395 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3396 sizeof(struct device_domain_info),
3397 0,
3398 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003399 NULL);
3400 if (!iommu_devinfo_cache) {
3401 printk(KERN_ERR "Couldn't create devinfo cache\n");
3402 ret = -ENOMEM;
3403 }
3404
3405 return ret;
3406}
3407
3408static inline int iommu_iova_cache_init(void)
3409{
3410 int ret = 0;
3411
3412 iommu_iova_cache = kmem_cache_create("iommu_iova",
3413 sizeof(struct iova),
3414 0,
3415 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003416 NULL);
3417 if (!iommu_iova_cache) {
3418 printk(KERN_ERR "Couldn't create iova cache\n");
3419 ret = -ENOMEM;
3420 }
3421
3422 return ret;
3423}
3424
3425static int __init iommu_init_mempool(void)
3426{
3427 int ret;
3428 ret = iommu_iova_cache_init();
3429 if (ret)
3430 return ret;
3431
3432 ret = iommu_domain_cache_init();
3433 if (ret)
3434 goto domain_error;
3435
3436 ret = iommu_devinfo_cache_init();
3437 if (!ret)
3438 return ret;
3439
3440 kmem_cache_destroy(iommu_domain_cache);
3441domain_error:
3442 kmem_cache_destroy(iommu_iova_cache);
3443
3444 return -ENOMEM;
3445}
3446
3447static void __init iommu_exit_mempool(void)
3448{
3449 kmem_cache_destroy(iommu_devinfo_cache);
3450 kmem_cache_destroy(iommu_domain_cache);
3451 kmem_cache_destroy(iommu_iova_cache);
3452
3453}
3454
Dan Williams556ab452010-07-23 15:47:56 -07003455static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3456{
3457 struct dmar_drhd_unit *drhd;
3458 u32 vtbar;
3459 int rc;
3460
3461 /* We know that this device on this chipset has its own IOMMU.
3462 * If we find it under a different IOMMU, then the BIOS is lying
3463 * to us. Hope that the IOMMU for this device is actually
3464 * disabled, and it needs no translation...
3465 */
3466 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3467 if (rc) {
3468 /* "can't" happen */
3469 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3470 return;
3471 }
3472 vtbar &= 0xffff0000;
3473
3474 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3475 drhd = dmar_find_matched_drhd_unit(pdev);
3476 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3477 TAINT_FIRMWARE_WORKAROUND,
3478 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3479 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3480}
3481DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3482
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003483static void __init init_no_remapping_devices(void)
3484{
3485 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003486 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003487 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003488
3489 for_each_drhd_unit(drhd) {
3490 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003491 for_each_active_dev_scope(drhd->devices,
3492 drhd->devices_cnt, i, dev)
3493 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003494 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495 if (i == drhd->devices_cnt)
3496 drhd->ignored = 1;
3497 }
3498 }
3499
Jiang Liu7c919772014-01-06 14:18:18 +08003500 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003501 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003502 continue;
3503
Jiang Liub683b232014-02-19 14:07:32 +08003504 for_each_active_dev_scope(drhd->devices,
3505 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003506 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003507 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003508 if (i < drhd->devices_cnt)
3509 continue;
3510
David Woodhousec0771df2011-10-14 20:59:46 +01003511 /* This IOMMU has *only* gfx devices. Either bypass it or
3512 set the gfx_mapped flag, as appropriate */
3513 if (dmar_map_gfx) {
3514 intel_iommu_gfx_mapped = 1;
3515 } else {
3516 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003517 for_each_active_dev_scope(drhd->devices,
3518 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003519 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520 }
3521 }
3522}
3523
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003524#ifdef CONFIG_SUSPEND
3525static int init_iommu_hw(void)
3526{
3527 struct dmar_drhd_unit *drhd;
3528 struct intel_iommu *iommu = NULL;
3529
3530 for_each_active_iommu(iommu, drhd)
3531 if (iommu->qi)
3532 dmar_reenable_qi(iommu);
3533
Joseph Cihulab7792602011-05-03 00:08:37 -07003534 for_each_iommu(iommu, drhd) {
3535 if (drhd->ignored) {
3536 /*
3537 * we always have to disable PMRs or DMA may fail on
3538 * this device
3539 */
3540 if (force_on)
3541 iommu_disable_protect_mem_regions(iommu);
3542 continue;
3543 }
3544
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003545 iommu_flush_write_buffer(iommu);
3546
3547 iommu_set_root_entry(iommu);
3548
3549 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003550 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003551 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3552 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003553 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003554 }
3555
3556 return 0;
3557}
3558
3559static void iommu_flush_all(void)
3560{
3561 struct dmar_drhd_unit *drhd;
3562 struct intel_iommu *iommu;
3563
3564 for_each_active_iommu(iommu, drhd) {
3565 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003566 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003567 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003568 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003569 }
3570}
3571
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003572static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003573{
3574 struct dmar_drhd_unit *drhd;
3575 struct intel_iommu *iommu = NULL;
3576 unsigned long flag;
3577
3578 for_each_active_iommu(iommu, drhd) {
3579 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3580 GFP_ATOMIC);
3581 if (!iommu->iommu_state)
3582 goto nomem;
3583 }
3584
3585 iommu_flush_all();
3586
3587 for_each_active_iommu(iommu, drhd) {
3588 iommu_disable_translation(iommu);
3589
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003590 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003591
3592 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3593 readl(iommu->reg + DMAR_FECTL_REG);
3594 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3595 readl(iommu->reg + DMAR_FEDATA_REG);
3596 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3597 readl(iommu->reg + DMAR_FEADDR_REG);
3598 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3599 readl(iommu->reg + DMAR_FEUADDR_REG);
3600
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003601 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003602 }
3603 return 0;
3604
3605nomem:
3606 for_each_active_iommu(iommu, drhd)
3607 kfree(iommu->iommu_state);
3608
3609 return -ENOMEM;
3610}
3611
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003612static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003613{
3614 struct dmar_drhd_unit *drhd;
3615 struct intel_iommu *iommu = NULL;
3616 unsigned long flag;
3617
3618 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003619 if (force_on)
3620 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3621 else
3622 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003623 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003624 }
3625
3626 for_each_active_iommu(iommu, drhd) {
3627
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003628 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003629
3630 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3631 iommu->reg + DMAR_FECTL_REG);
3632 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3633 iommu->reg + DMAR_FEDATA_REG);
3634 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3635 iommu->reg + DMAR_FEADDR_REG);
3636 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3637 iommu->reg + DMAR_FEUADDR_REG);
3638
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003639 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003640 }
3641
3642 for_each_active_iommu(iommu, drhd)
3643 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003644}
3645
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003646static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003647 .resume = iommu_resume,
3648 .suspend = iommu_suspend,
3649};
3650
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003651static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003652{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003653 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003654}
3655
3656#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003657static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003658#endif /* CONFIG_PM */
3659
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003660
3661int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3662{
3663 struct acpi_dmar_reserved_memory *rmrr;
3664 struct dmar_rmrr_unit *rmrru;
3665
3666 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3667 if (!rmrru)
3668 return -ENOMEM;
3669
3670 rmrru->hdr = header;
3671 rmrr = (struct acpi_dmar_reserved_memory *)header;
3672 rmrru->base_address = rmrr->base_address;
3673 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003674 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3675 ((void *)rmrr) + rmrr->header.length,
3676 &rmrru->devices_cnt);
3677 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3678 kfree(rmrru);
3679 return -ENOMEM;
3680 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003681
Jiang Liu2e455282014-02-19 14:07:36 +08003682 list_add(&rmrru->list, &dmar_rmrr_units);
3683
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003684 return 0;
3685}
3686
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003687int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3688{
3689 struct acpi_dmar_atsr *atsr;
3690 struct dmar_atsr_unit *atsru;
3691
3692 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3693 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3694 if (!atsru)
3695 return -ENOMEM;
3696
3697 atsru->hdr = hdr;
3698 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003699 if (!atsru->include_all) {
3700 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3701 (void *)atsr + atsr->header.length,
3702 &atsru->devices_cnt);
3703 if (atsru->devices_cnt && atsru->devices == NULL) {
3704 kfree(atsru);
3705 return -ENOMEM;
3706 }
3707 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003708
Jiang Liu0e242612014-02-19 14:07:34 +08003709 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003710
3711 return 0;
3712}
3713
Jiang Liu9bdc5312014-01-06 14:18:27 +08003714static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3715{
3716 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3717 kfree(atsru);
3718}
3719
3720static void intel_iommu_free_dmars(void)
3721{
3722 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3723 struct dmar_atsr_unit *atsru, *atsr_n;
3724
3725 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3726 list_del(&rmrru->list);
3727 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3728 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003729 }
3730
Jiang Liu9bdc5312014-01-06 14:18:27 +08003731 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3732 list_del(&atsru->list);
3733 intel_iommu_free_atsr(atsru);
3734 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003735}
3736
3737int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3738{
Jiang Liub683b232014-02-19 14:07:32 +08003739 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003740 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003741 struct pci_dev *bridge = NULL;
3742 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003743 struct acpi_dmar_atsr *atsr;
3744 struct dmar_atsr_unit *atsru;
3745
3746 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003747 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003748 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003749 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003750 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003751 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003752 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003753 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003754 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003755 if (!bridge)
3756 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003757
Jiang Liu0e242612014-02-19 14:07:34 +08003758 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003759 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3760 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3761 if (atsr->segment != pci_domain_nr(dev->bus))
3762 continue;
3763
Jiang Liub683b232014-02-19 14:07:32 +08003764 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003765 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003766 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003767
3768 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003769 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003770 }
Jiang Liub683b232014-02-19 14:07:32 +08003771 ret = 0;
3772out:
Jiang Liu0e242612014-02-19 14:07:34 +08003773 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003774
Jiang Liub683b232014-02-19 14:07:32 +08003775 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003776}
3777
Jiang Liu59ce0512014-02-19 14:07:35 +08003778int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3779{
3780 int ret = 0;
3781 struct dmar_rmrr_unit *rmrru;
3782 struct dmar_atsr_unit *atsru;
3783 struct acpi_dmar_atsr *atsr;
3784 struct acpi_dmar_reserved_memory *rmrr;
3785
3786 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3787 return 0;
3788
3789 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3790 rmrr = container_of(rmrru->hdr,
3791 struct acpi_dmar_reserved_memory, header);
3792 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3793 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3794 ((void *)rmrr) + rmrr->header.length,
3795 rmrr->segment, rmrru->devices,
3796 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003797 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003798 return ret;
3799 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003800 dmar_remove_dev_scope(info, rmrr->segment,
3801 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003802 }
3803 }
3804
3805 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3806 if (atsru->include_all)
3807 continue;
3808
3809 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3810 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3811 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3812 (void *)atsr + atsr->header.length,
3813 atsr->segment, atsru->devices,
3814 atsru->devices_cnt);
3815 if (ret > 0)
3816 break;
3817 else if(ret < 0)
3818 return ret;
3819 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3820 if (dmar_remove_dev_scope(info, atsr->segment,
3821 atsru->devices, atsru->devices_cnt))
3822 break;
3823 }
3824 }
3825
3826 return 0;
3827}
3828
Fenghua Yu99dcade2009-11-11 07:23:06 -08003829/*
3830 * Here we only respond to action of unbound device from driver.
3831 *
3832 * Added device is not attached to its DMAR domain here yet. That will happen
3833 * when mapping the device to iova.
3834 */
3835static int device_notifier(struct notifier_block *nb,
3836 unsigned long action, void *data)
3837{
3838 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003839 struct dmar_domain *domain;
3840
David Woodhouse3d891942014-03-06 15:59:26 +00003841 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003842 return 0;
3843
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003844 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3845 action != BUS_NOTIFY_DEL_DEVICE)
3846 return 0;
3847
David Woodhouse1525a292014-03-06 16:19:30 +00003848 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003849 if (!domain)
3850 return 0;
3851
Jiang Liu3a5670e2014-02-19 14:07:33 +08003852 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003853 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08003854 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003855 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003856 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003857
Fenghua Yu99dcade2009-11-11 07:23:06 -08003858 return 0;
3859}
3860
3861static struct notifier_block device_nb = {
3862 .notifier_call = device_notifier,
3863};
3864
Jiang Liu75f05562014-02-19 14:07:37 +08003865static int intel_iommu_memory_notifier(struct notifier_block *nb,
3866 unsigned long val, void *v)
3867{
3868 struct memory_notify *mhp = v;
3869 unsigned long long start, end;
3870 unsigned long start_vpfn, last_vpfn;
3871
3872 switch (val) {
3873 case MEM_GOING_ONLINE:
3874 start = mhp->start_pfn << PAGE_SHIFT;
3875 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3876 if (iommu_domain_identity_map(si_domain, start, end)) {
3877 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3878 start, end);
3879 return NOTIFY_BAD;
3880 }
3881 break;
3882
3883 case MEM_OFFLINE:
3884 case MEM_CANCEL_ONLINE:
3885 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3886 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3887 while (start_vpfn <= last_vpfn) {
3888 struct iova *iova;
3889 struct dmar_drhd_unit *drhd;
3890 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003891 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08003892
3893 iova = find_iova(&si_domain->iovad, start_vpfn);
3894 if (iova == NULL) {
3895 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3896 start_vpfn);
3897 break;
3898 }
3899
3900 iova = split_and_remove_iova(&si_domain->iovad, iova,
3901 start_vpfn, last_vpfn);
3902 if (iova == NULL) {
3903 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3904 start_vpfn, last_vpfn);
3905 return NOTIFY_BAD;
3906 }
3907
David Woodhouseea8ea462014-03-05 17:09:32 +00003908 freelist = domain_unmap(si_domain, iova->pfn_lo,
3909 iova->pfn_hi);
3910
Jiang Liu75f05562014-02-19 14:07:37 +08003911 rcu_read_lock();
3912 for_each_active_iommu(iommu, drhd)
3913 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003914 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003915 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08003916 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00003917 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08003918
3919 start_vpfn = iova->pfn_hi + 1;
3920 free_iova_mem(iova);
3921 }
3922 break;
3923 }
3924
3925 return NOTIFY_OK;
3926}
3927
3928static struct notifier_block intel_iommu_memory_nb = {
3929 .notifier_call = intel_iommu_memory_notifier,
3930 .priority = 0
3931};
3932
Alex Williamsona5459cf2014-06-12 16:12:31 -06003933
3934static ssize_t intel_iommu_show_version(struct device *dev,
3935 struct device_attribute *attr,
3936 char *buf)
3937{
3938 struct intel_iommu *iommu = dev_get_drvdata(dev);
3939 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3940 return sprintf(buf, "%d:%d\n",
3941 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3942}
3943static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3944
3945static ssize_t intel_iommu_show_address(struct device *dev,
3946 struct device_attribute *attr,
3947 char *buf)
3948{
3949 struct intel_iommu *iommu = dev_get_drvdata(dev);
3950 return sprintf(buf, "%llx\n", iommu->reg_phys);
3951}
3952static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3953
3954static ssize_t intel_iommu_show_cap(struct device *dev,
3955 struct device_attribute *attr,
3956 char *buf)
3957{
3958 struct intel_iommu *iommu = dev_get_drvdata(dev);
3959 return sprintf(buf, "%llx\n", iommu->cap);
3960}
3961static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3962
3963static ssize_t intel_iommu_show_ecap(struct device *dev,
3964 struct device_attribute *attr,
3965 char *buf)
3966{
3967 struct intel_iommu *iommu = dev_get_drvdata(dev);
3968 return sprintf(buf, "%llx\n", iommu->ecap);
3969}
3970static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
3971
3972static struct attribute *intel_iommu_attrs[] = {
3973 &dev_attr_version.attr,
3974 &dev_attr_address.attr,
3975 &dev_attr_cap.attr,
3976 &dev_attr_ecap.attr,
3977 NULL,
3978};
3979
3980static struct attribute_group intel_iommu_group = {
3981 .name = "intel-iommu",
3982 .attrs = intel_iommu_attrs,
3983};
3984
3985const struct attribute_group *intel_iommu_groups[] = {
3986 &intel_iommu_group,
3987 NULL,
3988};
3989
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003990int __init intel_iommu_init(void)
3991{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003992 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003993 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003994 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003995
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003996 /* VT-d is required for a TXT/tboot launch, so enforce that */
3997 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003998
Jiang Liu3a5670e2014-02-19 14:07:33 +08003999 if (iommu_init_mempool()) {
4000 if (force_on)
4001 panic("tboot: Failed to initialize iommu memory\n");
4002 return -ENOMEM;
4003 }
4004
4005 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004006 if (dmar_table_init()) {
4007 if (force_on)
4008 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004009 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004010 }
4011
Takao Indoh3a93c842013-04-23 17:35:03 +09004012 /*
4013 * Disable translation if already enabled prior to OS handover.
4014 */
Jiang Liu7c919772014-01-06 14:18:18 +08004015 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004016 if (iommu->gcmd & DMA_GCMD_TE)
4017 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004018
Suresh Siddhac2c72862011-08-23 17:05:19 -07004019 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004020 if (force_on)
4021 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004022 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004023 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004024
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004025 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004026 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004027
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004028 if (list_empty(&dmar_rmrr_units))
4029 printk(KERN_INFO "DMAR: No RMRR found\n");
4030
4031 if (list_empty(&dmar_atsr_units))
4032 printk(KERN_INFO "DMAR: No ATSR found\n");
4033
Joseph Cihula51a63e62011-03-21 11:04:24 -07004034 if (dmar_init_reserved_ranges()) {
4035 if (force_on)
4036 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004037 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004038 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004039
4040 init_no_remapping_devices();
4041
Joseph Cihulab7792602011-05-03 00:08:37 -07004042 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004043 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004044 if (force_on)
4045 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004046 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004047 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004048 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004049 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004050 printk(KERN_INFO
4051 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4052
mark gross5e0d2a62008-03-04 15:22:08 -08004053 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004054#ifdef CONFIG_SWIOTLB
4055 swiotlb = 0;
4056#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004057 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004058
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004059 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004060
Alex Williamsona5459cf2014-06-12 16:12:31 -06004061 for_each_active_iommu(iommu, drhd)
4062 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4063 intel_iommu_groups,
4064 iommu->name);
4065
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004066 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004067 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004068 if (si_domain && !hw_pass_through)
4069 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004070
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004071 intel_iommu_enabled = 1;
4072
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004073 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004074
4075out_free_reserved_range:
4076 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004077out_free_dmar:
4078 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004079 up_write(&dmar_global_lock);
4080 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004081 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004082}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004083
Alex Williamson579305f2014-07-03 09:51:43 -06004084static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4085{
4086 struct intel_iommu *iommu = opaque;
4087
4088 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4089 return 0;
4090}
4091
4092/*
4093 * NB - intel-iommu lacks any sort of reference counting for the users of
4094 * dependent devices. If multiple endpoints have intersecting dependent
4095 * devices, unbinding the driver from any one of them will possibly leave
4096 * the others unable to operate.
4097 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004098static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004099 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004100{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004101 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004102 return;
4103
Alex Williamson579305f2014-07-03 09:51:43 -06004104 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004105}
4106
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004107static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004108 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004109{
Yijing Wangbca2b912013-10-31 17:26:04 +08004110 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004111 struct intel_iommu *iommu;
4112 unsigned long flags;
4113 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004114 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004115
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004116 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004117 if (!iommu)
4118 return;
4119
4120 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004121 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004122 if (info->iommu == iommu && info->bus == bus &&
4123 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004124 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004125 spin_unlock_irqrestore(&device_domain_lock, flags);
4126
Yu Zhao93a23a72009-05-18 13:51:37 +08004127 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004128 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004129 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004130 free_devinfo_mem(info);
4131
4132 spin_lock_irqsave(&device_domain_lock, flags);
4133
4134 if (found)
4135 break;
4136 else
4137 continue;
4138 }
4139
4140 /* if there is no other devices under the same iommu
4141 * owned by this domain, clear this iommu in iommu_bmp
4142 * update iommu count and coherency
4143 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004144 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004145 found = 1;
4146 }
4147
Roland Dreier3e7abe22011-07-20 06:22:21 -07004148 spin_unlock_irqrestore(&device_domain_lock, flags);
4149
Weidong Hanc7151a82008-12-08 22:51:37 +08004150 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004151 domain_detach_iommu(domain, iommu);
4152 if (!domain_type_is_vm_or_si(domain))
4153 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004154 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004155}
4156
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004157static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004158{
4159 int adjust_width;
4160
4161 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004162 domain_reserve_special_ranges(domain);
4163
4164 /* calculate AGAW */
4165 domain->gaw = guest_width;
4166 adjust_width = guestwidth_to_adjustwidth(guest_width);
4167 domain->agaw = width_to_agaw(adjust_width);
4168
Weidong Han5e98c4b2008-12-08 23:03:27 +08004169 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004170 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004171 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004172 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004173
4174 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004175 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004176 if (!domain->pgd)
4177 return -ENOMEM;
4178 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4179 return 0;
4180}
4181
Joerg Roedel5d450802008-12-03 14:52:32 +01004182static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004183{
Joerg Roedel5d450802008-12-03 14:52:32 +01004184 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004185
Jiang Liuab8dfe22014-07-11 14:19:27 +08004186 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004187 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004188 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004189 "intel_iommu_domain_init: dmar_domain == NULL\n");
4190 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004191 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004192 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004193 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004194 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004195 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004196 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004197 }
Allen Kay8140a952011-10-14 12:32:17 -07004198 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004199 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004200
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004201 domain->geometry.aperture_start = 0;
4202 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4203 domain->geometry.force_aperture = true;
4204
Joerg Roedel5d450802008-12-03 14:52:32 +01004205 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004206}
Kay, Allen M38717942008-09-09 18:37:29 +03004207
Joerg Roedel5d450802008-12-03 14:52:32 +01004208static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004209{
Joerg Roedel5d450802008-12-03 14:52:32 +01004210 struct dmar_domain *dmar_domain = domain->priv;
4211
4212 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004213 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004214}
Kay, Allen M38717942008-09-09 18:37:29 +03004215
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004216static int intel_iommu_attach_device(struct iommu_domain *domain,
4217 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004218{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004219 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004220 struct intel_iommu *iommu;
4221 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004222 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004223
David Woodhouse7207d8f2014-03-09 16:31:06 -07004224 /* normally dev is not mapped */
4225 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004226 struct dmar_domain *old_domain;
4227
David Woodhouse1525a292014-03-06 16:19:30 +00004228 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004229 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004230 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004231 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004232 else
4233 domain_remove_dev_info(old_domain);
4234 }
4235 }
4236
David Woodhouse156baca2014-03-09 14:00:57 -07004237 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004238 if (!iommu)
4239 return -ENODEV;
4240
4241 /* check if this iommu agaw is sufficient for max mapped address */
4242 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004243 if (addr_width > cap_mgaw(iommu->cap))
4244 addr_width = cap_mgaw(iommu->cap);
4245
4246 if (dmar_domain->max_addr > (1LL << addr_width)) {
4247 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004248 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004249 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004250 return -EFAULT;
4251 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004252 dmar_domain->gaw = addr_width;
4253
4254 /*
4255 * Knock out extra levels of page tables if necessary
4256 */
4257 while (iommu->agaw < dmar_domain->agaw) {
4258 struct dma_pte *pte;
4259
4260 pte = dmar_domain->pgd;
4261 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004262 dmar_domain->pgd = (struct dma_pte *)
4263 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004264 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004265 }
4266 dmar_domain->agaw--;
4267 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004268
David Woodhouse5913c9b2014-03-09 16:27:31 -07004269 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004270}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004271
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004272static void intel_iommu_detach_device(struct iommu_domain *domain,
4273 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004274{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004275 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004276
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004277 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004278}
Kay, Allen M38717942008-09-09 18:37:29 +03004279
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004280static int intel_iommu_map(struct iommu_domain *domain,
4281 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004282 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004283{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004284 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004285 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004286 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004287 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004288
Joerg Roedeldde57a22008-12-03 15:04:09 +01004289 if (iommu_prot & IOMMU_READ)
4290 prot |= DMA_PTE_READ;
4291 if (iommu_prot & IOMMU_WRITE)
4292 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004293 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4294 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004295
David Woodhouse163cc522009-06-28 00:51:17 +01004296 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004297 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004298 u64 end;
4299
4300 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004301 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004302 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004303 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004304 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004305 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004306 return -EFAULT;
4307 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004308 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004309 }
David Woodhousead051222009-06-28 14:22:28 +01004310 /* Round up size to next multiple of PAGE_SIZE, if it and
4311 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004312 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004313 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4314 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004315 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004316}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004317
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004318static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004319 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004320{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004321 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004322 struct page *freelist = NULL;
4323 struct intel_iommu *iommu;
4324 unsigned long start_pfn, last_pfn;
4325 unsigned int npages;
4326 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004327
David Woodhouse5cf0a762014-03-19 16:07:49 +00004328 /* Cope with horrid API which requires us to unmap more than the
4329 size argument if it happens to be a large-page mapping. */
4330 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4331 BUG();
4332
4333 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4334 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4335
David Woodhouseea8ea462014-03-05 17:09:32 +00004336 start_pfn = iova >> VTD_PAGE_SHIFT;
4337 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4338
4339 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4340
4341 npages = last_pfn - start_pfn + 1;
4342
4343 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4344 iommu = g_iommus[iommu_id];
4345
4346 /*
4347 * find bit position of dmar_domain
4348 */
4349 ndomains = cap_ndoms(iommu->cap);
4350 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4351 if (iommu->domains[num] == dmar_domain)
4352 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4353 npages, !freelist, 0);
4354 }
4355
4356 }
4357
4358 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004359
David Woodhouse163cc522009-06-28 00:51:17 +01004360 if (dmar_domain->max_addr == iova + size)
4361 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004362
David Woodhouse5cf0a762014-03-19 16:07:49 +00004363 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004364}
Kay, Allen M38717942008-09-09 18:37:29 +03004365
Joerg Roedeld14d6572008-12-03 15:06:57 +01004366static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304367 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004368{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004369 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004370 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004371 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004372 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004373
David Woodhouse5cf0a762014-03-19 16:07:49 +00004374 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004375 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004376 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004377
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004378 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004379}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004380
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004381static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4382 unsigned long cap)
4383{
4384 struct dmar_domain *dmar_domain = domain->priv;
4385
4386 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4387 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004388 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004389 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004390
4391 return 0;
4392}
4393
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004394static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004395{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004396 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004397 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004398 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004399
Alex Williamsona5459cf2014-06-12 16:12:31 -06004400 iommu = device_to_iommu(dev, &bus, &devfn);
4401 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004402 return -ENODEV;
4403
Alex Williamsona5459cf2014-06-12 16:12:31 -06004404 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004405
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004406 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004407
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004408 if (IS_ERR(group))
4409 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004410
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004411 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004412 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004413}
4414
4415static void intel_iommu_remove_device(struct device *dev)
4416{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004417 struct intel_iommu *iommu;
4418 u8 bus, devfn;
4419
4420 iommu = device_to_iommu(dev, &bus, &devfn);
4421 if (!iommu)
4422 return;
4423
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004424 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004425
4426 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004427}
4428
Thierry Redingb22f6432014-06-27 09:03:12 +02004429static const struct iommu_ops intel_iommu_ops = {
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004430 .domain_init = intel_iommu_domain_init,
4431 .domain_destroy = intel_iommu_domain_destroy,
4432 .attach_dev = intel_iommu_attach_device,
4433 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004434 .map = intel_iommu_map,
4435 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004436 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004437 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004438 .add_device = intel_iommu_add_device,
4439 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004440 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004441};
David Woodhouse9af88142009-02-13 23:18:03 +00004442
Daniel Vetter94526182013-01-20 23:50:13 +01004443static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4444{
4445 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4446 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4447 dmar_map_gfx = 0;
4448}
4449
4450DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4451DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4452DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4453DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4454DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4455DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4456DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4457
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004458static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004459{
4460 /*
4461 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004462 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004463 */
4464 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4465 rwbf_quirk = 1;
4466}
4467
4468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4471DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4472DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4473DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4474DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004475
Adam Jacksoneecfd572010-08-25 21:17:34 +01004476#define GGC 0x52
4477#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4478#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4479#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4480#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4481#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4482#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4483#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4484#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4485
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004486static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004487{
4488 unsigned short ggc;
4489
Adam Jacksoneecfd572010-08-25 21:17:34 +01004490 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004491 return;
4492
Adam Jacksoneecfd572010-08-25 21:17:34 +01004493 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004494 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4495 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004496 } else if (dmar_map_gfx) {
4497 /* we have to ensure the gfx device is idle before we flush */
4498 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4499 intel_iommu_strict = 1;
4500 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004501}
4502DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4503DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4504DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4505DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4506
David Woodhousee0fc7e02009-09-30 09:12:17 -07004507/* On Tylersburg chipsets, some BIOSes have been known to enable the
4508 ISOCH DMAR unit for the Azalia sound device, but not give it any
4509 TLB entries, which causes it to deadlock. Check for that. We do
4510 this in a function called from init_dmars(), instead of in a PCI
4511 quirk, because we don't want to print the obnoxious "BIOS broken"
4512 message if VT-d is actually disabled.
4513*/
4514static void __init check_tylersburg_isoch(void)
4515{
4516 struct pci_dev *pdev;
4517 uint32_t vtisochctrl;
4518
4519 /* If there's no Azalia in the system anyway, forget it. */
4520 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4521 if (!pdev)
4522 return;
4523 pci_dev_put(pdev);
4524
4525 /* System Management Registers. Might be hidden, in which case
4526 we can't do the sanity check. But that's OK, because the
4527 known-broken BIOSes _don't_ actually hide it, so far. */
4528 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4529 if (!pdev)
4530 return;
4531
4532 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4533 pci_dev_put(pdev);
4534 return;
4535 }
4536
4537 pci_dev_put(pdev);
4538
4539 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4540 if (vtisochctrl & 1)
4541 return;
4542
4543 /* Drop all bits other than the number of TLB entries */
4544 vtisochctrl &= 0x1c;
4545
4546 /* If we have the recommended number of TLB entries (16), fine. */
4547 if (vtisochctrl == 0x10)
4548 return;
4549
4550 /* Zero TLB entries? You get to ride the short bus to school. */
4551 if (!vtisochctrl) {
4552 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4553 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4554 dmi_get_system_info(DMI_BIOS_VENDOR),
4555 dmi_get_system_info(DMI_BIOS_VERSION),
4556 dmi_get_system_info(DMI_PRODUCT_VERSION));
4557 iommu_identity_mapping |= IDENTMAP_AZALIA;
4558 return;
4559 }
4560
4561 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4562 vtisochctrl);
4563}