blob: 2ffe589699448543c27a8f29d5274d3107b3c6e7 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000053#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070055#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056
57#define IOAPIC_RANGE_START (0xfee00000)
58#define IOAPIC_RANGE_END (0xfeefffff)
59#define IOVA_START_ADDR (0x1000)
60
61#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070063#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080064#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070065
David Woodhouse2ebe3152009-09-19 07:34:04 -070066#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
72 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070074
Robin Murphy1b722502015-01-12 17:51:15 +000075/* IO virtual address start page frame number */
76#define IOVA_START_PFN (1)
77
Mark McLoughlinf27be032008-11-20 15:49:43 +000078#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070079#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070080#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080081
Andrew Mortondf08cdc2010-09-22 13:05:11 -070082/* page table handling */
83#define LEVEL_STRIDE (9)
84#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
85
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020086/*
87 * This bitmap is used to advertise the page sizes our hardware support
88 * to the IOMMU core, which will then use this information to split
89 * physically contiguous memory regions it is mapping into page sizes
90 * that we support.
91 *
92 * Traditionally the IOMMU core just handed us the mappings directly,
93 * after making sure the size is an order of a 4KiB page and that the
94 * mapping has natural alignment.
95 *
96 * To retain this behavior, we currently advertise that we support
97 * all page sizes that are an order of 4KiB.
98 *
99 * If at some point we'd like to utilize the IOMMU core's new behavior,
100 * we could change this to advertise the real page sizes we support.
101 */
102#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
103
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700104static inline int agaw_to_level(int agaw)
105{
106 return agaw + 2;
107}
108
109static inline int agaw_to_width(int agaw)
110{
Jiang Liu5c645b32014-01-06 14:18:12 +0800111 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700112}
113
114static inline int width_to_agaw(int width)
115{
Jiang Liu5c645b32014-01-06 14:18:12 +0800116 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700117}
118
119static inline unsigned int level_to_offset_bits(int level)
120{
121 return (level - 1) * LEVEL_STRIDE;
122}
123
124static inline int pfn_level_offset(unsigned long pfn, int level)
125{
126 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
127}
128
129static inline unsigned long level_mask(int level)
130{
131 return -1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long level_size(int level)
135{
136 return 1UL << level_to_offset_bits(level);
137}
138
139static inline unsigned long align_to_level(unsigned long pfn, int level)
140{
141 return (pfn + level_size(level) - 1) & level_mask(level);
142}
David Woodhousefd18de52009-05-10 23:57:41 +0100143
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
145{
Jiang Liu5c645b32014-01-06 14:18:12 +0800146 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100147}
148
David Woodhousedd4e8312009-06-27 16:21:20 +0100149/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
150 are never going to work. */
151static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
152{
153 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155
156static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
157{
158 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
159}
160static inline unsigned long page_to_dma_pfn(struct page *pg)
161{
162 return mm_to_dma_pfn(page_to_pfn(pg));
163}
164static inline unsigned long virt_to_dma_pfn(void *p)
165{
166 return page_to_dma_pfn(virt_to_page(p));
167}
168
Weidong Hand9630fe2008-12-08 11:06:32 +0800169/* global iommu list, set NULL for ignored DMAR units */
170static struct intel_iommu **g_iommus;
171
David Woodhousee0fc7e02009-09-30 09:12:17 -0700172static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000173static int rwbf_quirk;
174
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000175/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700176 * set to 1 to panic kernel if can't successfully enable VT-d
177 * (used when kernel is launched w/ TXT)
178 */
179static int force_on = 0;
180
181/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000182 * 0: Present
183 * 1-11: Reserved
184 * 12-63: Context Ptr (12 - (haw-1))
185 * 64-127: Reserved
186 */
187struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000188 u64 lo;
189 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000190};
191#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000192
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000193
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000194/*
195 * low 64 bits:
196 * 0: present
197 * 1: fault processing disable
198 * 2-3: translation type
199 * 12-63: address space root
200 * high 64 bits:
201 * 0-2: address width
202 * 3-6: aval
203 * 8-23: domain id
204 */
205struct context_entry {
206 u64 lo;
207 u64 hi;
208};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000209
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000210static inline bool context_present(struct context_entry *context)
211{
212 return (context->lo & 1);
213}
214static inline void context_set_present(struct context_entry *context)
215{
216 context->lo |= 1;
217}
218
219static inline void context_set_fault_enable(struct context_entry *context)
220{
221 context->lo &= (((u64)-1) << 2) | 1;
222}
223
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000224static inline void context_set_translation_type(struct context_entry *context,
225 unsigned long value)
226{
227 context->lo &= (((u64)-1) << 4) | 3;
228 context->lo |= (value & 3) << 2;
229}
230
231static inline void context_set_address_root(struct context_entry *context,
232 unsigned long value)
233{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800234 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000235 context->lo |= value & VTD_PAGE_MASK;
236}
237
238static inline void context_set_address_width(struct context_entry *context,
239 unsigned long value)
240{
241 context->hi |= value & 7;
242}
243
244static inline void context_set_domain_id(struct context_entry *context,
245 unsigned long value)
246{
247 context->hi |= (value & ((1 << 16) - 1)) << 8;
248}
249
250static inline void context_clear_entry(struct context_entry *context)
251{
252 context->lo = 0;
253 context->hi = 0;
254}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000255
Mark McLoughlin622ba122008-11-20 15:49:46 +0000256/*
257 * 0: readable
258 * 1: writable
259 * 2-6: reserved
260 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800261 * 8-10: available
262 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000263 * 12-63: Host physcial address
264 */
265struct dma_pte {
266 u64 val;
267};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000268
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000269static inline void dma_clear_pte(struct dma_pte *pte)
270{
271 pte->val = 0;
272}
273
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000274static inline u64 dma_pte_addr(struct dma_pte *pte)
275{
David Woodhousec85994e2009-07-01 19:21:24 +0100276#ifdef CONFIG_64BIT
277 return pte->val & VTD_PAGE_MASK;
278#else
279 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100280 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100281#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000282}
283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline bool dma_pte_present(struct dma_pte *pte)
285{
286 return (pte->val & 3) != 0;
287}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000288
Allen Kay4399c8b2011-10-14 12:32:46 -0700289static inline bool dma_pte_superpage(struct dma_pte *pte)
290{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200291 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700292}
293
David Woodhouse75e6bf92009-07-02 11:21:16 +0100294static inline int first_pte_in_page(struct dma_pte *pte)
295{
296 return !((unsigned long)pte & ~VTD_PAGE_MASK);
297}
298
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700299/*
300 * This domain is a statically identity mapping domain.
301 * 1. This domain creats a static 1:1 mapping to all usable memory.
302 * 2. It maps to each iommu if successful.
303 * 3. Each iommu mapps to this domain if successful.
304 */
David Woodhouse19943b02009-08-04 16:19:20 +0100305static struct dmar_domain *si_domain;
306static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700307
Weidong Han1ce28fe2008-12-08 16:35:39 +0800308/* domain represents a virtual machine, more than one devices
309 * across iommus may be owned in one domain, e.g. kvm guest.
310 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800311#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800312
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700313/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800314#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700315
Mark McLoughlin99126f72008-11-20 15:49:47 +0000316struct dmar_domain {
317 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700318 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800319 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800320 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000321
Joerg Roedel00a77de2015-03-26 13:43:08 +0100322 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000323 struct iova_domain iovad; /* iova's that belong to this domain */
324
325 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000326 int gaw; /* max guest address width */
327
328 /* adjusted guest address width, 0 is level 2 30-bit */
329 int agaw;
330
Weidong Han3b5410e2008-12-08 09:17:15 +0800331 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800332
333 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800334 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800335 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100336 int iommu_superpage;/* Level of superpages supported:
337 0 == 4KiB (no superpages), 1 == 2MiB,
338 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800339 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800340 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100341
342 struct iommu_domain domain; /* generic domain data structure for
343 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344};
345
Mark McLoughlina647dac2008-11-20 15:49:48 +0000346/* PCI domain-device relationship */
347struct device_domain_info {
348 struct list_head link; /* link to domain siblings */
349 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100350 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000351 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000352 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800353 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000354 struct dmar_domain *domain; /* pointer to domain */
355};
356
Jiang Liub94e4112014-02-19 14:07:25 +0800357struct dmar_rmrr_unit {
358 struct list_head list; /* list of rmrr units */
359 struct acpi_dmar_header *hdr; /* ACPI header */
360 u64 base_address; /* reserved base address*/
361 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000362 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800363 int devices_cnt; /* target device count */
364};
365
366struct dmar_atsr_unit {
367 struct list_head list; /* list of ATSR units */
368 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000369 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800370 int devices_cnt; /* target device count */
371 u8 include_all:1; /* include all ports */
372};
373
374static LIST_HEAD(dmar_atsr_units);
375static LIST_HEAD(dmar_rmrr_units);
376
377#define for_each_rmrr_units(rmrr) \
378 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
379
mark gross5e0d2a62008-03-04 15:22:08 -0800380static void flush_unmaps_timeout(unsigned long data);
381
Jiang Liub707cb02014-01-06 14:18:26 +0800382static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800383
mark gross80b20dd2008-04-18 13:53:58 -0700384#define HIGH_WATER_MARK 250
385struct deferred_flush_tables {
386 int next;
387 struct iova *iova[HIGH_WATER_MARK];
388 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000389 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700390};
391
392static struct deferred_flush_tables *deferred_flush;
393
mark gross5e0d2a62008-03-04 15:22:08 -0800394/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800395static int g_num_of_iommus;
396
397static DEFINE_SPINLOCK(async_umap_flush_lock);
398static LIST_HEAD(unmaps_to_do);
399
400static int timer_on;
401static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800402
Jiang Liu92d03cc2014-02-19 14:07:28 +0800403static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700404static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800405static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700406 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800407static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000408 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800409static int domain_detach_iommu(struct dmar_domain *domain,
410 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700411
Suresh Siddhad3f13812011-08-23 17:05:25 -0700412#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800413int dmar_disabled = 0;
414#else
415int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700416#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800417
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200418int intel_iommu_enabled = 0;
419EXPORT_SYMBOL_GPL(intel_iommu_enabled);
420
David Woodhouse2d9e6672010-06-15 10:57:57 +0100421static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700422static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800423static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100424static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700425
David Woodhousec0771df2011-10-14 20:59:46 +0100426int intel_iommu_gfx_mapped;
427EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700429#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
430static DEFINE_SPINLOCK(device_domain_lock);
431static LIST_HEAD(device_domain_list);
432
Thierry Redingb22f6432014-06-27 09:03:12 +0200433static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100434
Joerg Roedel00a77de2015-03-26 13:43:08 +0100435/* Convert generic 'struct iommu_domain to private struct dmar_domain */
436static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
437{
438 return container_of(dom, struct dmar_domain, domain);
439}
440
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700441static int __init intel_iommu_setup(char *str)
442{
443 if (!str)
444 return -EINVAL;
445 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800446 if (!strncmp(str, "on", 2)) {
447 dmar_disabled = 0;
448 printk(KERN_INFO "Intel-IOMMU: enabled\n");
449 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800451 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452 } else if (!strncmp(str, "igfx_off", 8)) {
453 dmar_map_gfx = 0;
454 printk(KERN_INFO
455 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700456 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800457 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700458 "Intel-IOMMU: Forcing DAC for PCI devices\n");
459 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800460 } else if (!strncmp(str, "strict", 6)) {
461 printk(KERN_INFO
462 "Intel-IOMMU: disable batched IOTLB flush\n");
463 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100464 } else if (!strncmp(str, "sp_off", 6)) {
465 printk(KERN_INFO
466 "Intel-IOMMU: disable supported super page\n");
467 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700468 }
469
470 str += strcspn(str, ",");
471 while (*str == ',')
472 str++;
473 }
474 return 0;
475}
476__setup("intel_iommu=", intel_iommu_setup);
477
478static struct kmem_cache *iommu_domain_cache;
479static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700480
Suresh Siddha4c923d42009-10-02 11:01:24 -0700481static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700482{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700483 struct page *page;
484 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700485
Suresh Siddha4c923d42009-10-02 11:01:24 -0700486 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
487 if (page)
488 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700489 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700490}
491
492static inline void free_pgtable_page(void *vaddr)
493{
494 free_page((unsigned long)vaddr);
495}
496
497static inline void *alloc_domain_mem(void)
498{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900499 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500}
501
Kay, Allen M38717942008-09-09 18:37:29 +0300502static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700503{
504 kmem_cache_free(iommu_domain_cache, vaddr);
505}
506
507static inline void * alloc_devinfo_mem(void)
508{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900509 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
512static inline void free_devinfo_mem(void *vaddr)
513{
514 kmem_cache_free(iommu_devinfo_cache, vaddr);
515}
516
Jiang Liuab8dfe22014-07-11 14:19:27 +0800517static inline int domain_type_is_vm(struct dmar_domain *domain)
518{
519 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
520}
521
522static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
523{
524 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
525 DOMAIN_FLAG_STATIC_IDENTITY);
526}
Weidong Han1b573682008-12-08 15:34:06 +0800527
Jiang Liu162d1b12014-07-11 14:19:35 +0800528static inline int domain_pfn_supported(struct dmar_domain *domain,
529 unsigned long pfn)
530{
531 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
532
533 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
534}
535
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700536static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800537{
538 unsigned long sagaw;
539 int agaw = -1;
540
541 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700542 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800543 agaw >= 0; agaw--) {
544 if (test_bit(agaw, &sagaw))
545 break;
546 }
547
548 return agaw;
549}
550
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700551/*
552 * Calculate max SAGAW for each iommu.
553 */
554int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
555{
556 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
557}
558
559/*
560 * calculate agaw for each iommu.
561 * "SAGAW" may be different across iommus, use a default agaw, and
562 * get a supported less agaw for iommus that don't support the default agaw.
563 */
564int iommu_calculate_agaw(struct intel_iommu *iommu)
565{
566 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
567}
568
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700569/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800570static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
571{
572 int iommu_id;
573
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700574 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800575 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800576 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800577 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
578 return NULL;
579
580 return g_iommus[iommu_id];
581}
582
Weidong Han8e6040972008-12-08 15:49:06 +0800583static void domain_update_iommu_coherency(struct dmar_domain *domain)
584{
David Woodhoused0501962014-03-11 17:10:29 -0700585 struct dmar_drhd_unit *drhd;
586 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100587 bool found = false;
588 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800589
David Woodhoused0501962014-03-11 17:10:29 -0700590 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800591
Mike Travis1b198bb2012-03-05 15:05:16 -0800592 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100593 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800594 if (!ecap_coherent(g_iommus[i]->ecap)) {
595 domain->iommu_coherency = 0;
596 break;
597 }
Weidong Han8e6040972008-12-08 15:49:06 +0800598 }
David Woodhoused0501962014-03-11 17:10:29 -0700599 if (found)
600 return;
601
602 /* No hardware attached; use lowest common denominator */
603 rcu_read_lock();
604 for_each_active_iommu(iommu, drhd) {
605 if (!ecap_coherent(iommu->ecap)) {
606 domain->iommu_coherency = 0;
607 break;
608 }
609 }
610 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800611}
612
Jiang Liu161f6932014-07-11 14:19:37 +0800613static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100614{
Allen Kay8140a952011-10-14 12:32:17 -0700615 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800616 struct intel_iommu *iommu;
617 int ret = 1;
618
619 rcu_read_lock();
620 for_each_active_iommu(iommu, drhd) {
621 if (iommu != skip) {
622 if (!ecap_sc_support(iommu->ecap)) {
623 ret = 0;
624 break;
625 }
626 }
627 }
628 rcu_read_unlock();
629
630 return ret;
631}
632
633static int domain_update_iommu_superpage(struct intel_iommu *skip)
634{
635 struct dmar_drhd_unit *drhd;
636 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700637 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100638
639 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800640 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100641 }
642
Allen Kay8140a952011-10-14 12:32:17 -0700643 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800644 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700645 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800646 if (iommu != skip) {
647 mask &= cap_super_page_val(iommu->cap);
648 if (!mask)
649 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100650 }
651 }
Jiang Liu0e242612014-02-19 14:07:34 +0800652 rcu_read_unlock();
653
Jiang Liu161f6932014-07-11 14:19:37 +0800654 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100655}
656
Sheng Yang58c610b2009-03-18 15:33:05 +0800657/* Some capabilities may be different across iommus */
658static void domain_update_iommu_cap(struct dmar_domain *domain)
659{
660 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800661 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
662 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800663}
664
David Woodhouse03ecc322015-02-13 14:35:21 +0000665static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
666 u8 bus, u8 devfn, int alloc)
667{
668 struct root_entry *root = &iommu->root_entry[bus];
669 struct context_entry *context;
670 u64 *entry;
671
672 if (ecap_ecs(iommu->ecap)) {
673 if (devfn >= 0x80) {
674 devfn -= 0x80;
675 entry = &root->hi;
676 }
677 devfn *= 2;
678 }
679 entry = &root->lo;
680 if (*entry & 1)
681 context = phys_to_virt(*entry & VTD_PAGE_MASK);
682 else {
683 unsigned long phy_addr;
684 if (!alloc)
685 return NULL;
686
687 context = alloc_pgtable_page(iommu->node);
688 if (!context)
689 return NULL;
690
691 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
692 phy_addr = virt_to_phys((void *)context);
693 *entry = phy_addr | 1;
694 __iommu_flush_cache(iommu, entry, sizeof(*entry));
695 }
696 return &context[devfn];
697}
698
David Woodhouse4ed6a542015-05-11 14:59:20 +0100699static int iommu_dummy(struct device *dev)
700{
701 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
702}
703
David Woodhouse156baca2014-03-09 14:00:57 -0700704static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800705{
706 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800707 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700708 struct device *tmp;
709 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800710 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800711 int i;
712
David Woodhouse4ed6a542015-05-11 14:59:20 +0100713 if (iommu_dummy(dev))
714 return NULL;
715
David Woodhouse156baca2014-03-09 14:00:57 -0700716 if (dev_is_pci(dev)) {
717 pdev = to_pci_dev(dev);
718 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100719 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700720 dev = &ACPI_COMPANION(dev)->dev;
721
Jiang Liu0e242612014-02-19 14:07:34 +0800722 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800723 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700724 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100725 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800726
Jiang Liub683b232014-02-19 14:07:32 +0800727 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700728 drhd->devices_cnt, i, tmp) {
729 if (tmp == dev) {
730 *bus = drhd->devices[i].bus;
731 *devfn = drhd->devices[i].devfn;
732 goto out;
733 }
734
735 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000736 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700737
738 ptmp = to_pci_dev(tmp);
739 if (ptmp->subordinate &&
740 ptmp->subordinate->number <= pdev->bus->number &&
741 ptmp->subordinate->busn_res.end >= pdev->bus->number)
742 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100743 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800744
David Woodhouse156baca2014-03-09 14:00:57 -0700745 if (pdev && drhd->include_all) {
746 got_pdev:
747 *bus = pdev->bus->number;
748 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800749 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700750 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800751 }
Jiang Liub683b232014-02-19 14:07:32 +0800752 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700753 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800754 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800755
Jiang Liub683b232014-02-19 14:07:32 +0800756 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800757}
758
Weidong Han5331fe62008-12-08 23:00:00 +0800759static void domain_flush_cache(struct dmar_domain *domain,
760 void *addr, int size)
761{
762 if (!domain->iommu_coherency)
763 clflush_cache_range(addr, size);
764}
765
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
767{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700768 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000769 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770 unsigned long flags;
771
772 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000773 context = iommu_context_addr(iommu, bus, devfn, 0);
774 if (context)
775 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776 spin_unlock_irqrestore(&iommu->lock, flags);
777 return ret;
778}
779
780static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
781{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 struct context_entry *context;
783 unsigned long flags;
784
785 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000786 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000788 context_clear_entry(context);
789 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790 }
791 spin_unlock_irqrestore(&iommu->lock, flags);
792}
793
794static void free_context_table(struct intel_iommu *iommu)
795{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796 int i;
797 unsigned long flags;
798 struct context_entry *context;
799
800 spin_lock_irqsave(&iommu->lock, flags);
801 if (!iommu->root_entry) {
802 goto out;
803 }
804 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000805 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 if (context)
807 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000808
809 if (!ecap_ecs(iommu->ecap))
810 continue;
811
812 context = iommu_context_addr(iommu, i, 0x80, 0);
813 if (context)
814 free_pgtable_page(context);
815
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 }
817 free_pgtable_page(iommu->root_entry);
818 iommu->root_entry = NULL;
819out:
820 spin_unlock_irqrestore(&iommu->lock, flags);
821}
822
David Woodhouseb026fd22009-06-28 10:37:25 +0100823static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000824 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 struct dma_pte *parent, *pte = NULL;
827 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700828 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829
830 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200831
Jiang Liu162d1b12014-07-11 14:19:35 +0800832 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200833 /* Address beyond IOMMU's addressing capabilities. */
834 return NULL;
835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 parent = domain->pgd;
837
David Woodhouse5cf0a762014-03-19 16:07:49 +0000838 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 void *tmp_page;
840
David Woodhouseb026fd22009-06-28 10:37:25 +0100841 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000843 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000845 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700846 break;
847
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000848 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100849 uint64_t pteval;
850
Suresh Siddha4c923d42009-10-02 11:01:24 -0700851 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852
David Woodhouse206a73c12009-07-01 19:30:28 +0100853 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100855
David Woodhousec85994e2009-07-01 19:21:24 +0100856 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400857 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800858 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100859 /* Someone else set it while we were thinking; use theirs. */
860 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800861 else
David Woodhousec85994e2009-07-01 19:21:24 +0100862 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000864 if (level == 1)
865 break;
866
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000867 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 level--;
869 }
870
David Woodhouse5cf0a762014-03-19 16:07:49 +0000871 if (!*target_level)
872 *target_level = level;
873
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874 return pte;
875}
876
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100879static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
880 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100881 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882{
883 struct dma_pte *parent, *pte = NULL;
884 int total = agaw_to_level(domain->agaw);
885 int offset;
886
887 parent = domain->pgd;
888 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100889 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890 pte = &parent[offset];
891 if (level == total)
892 return pte;
893
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 if (!dma_pte_present(pte)) {
895 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 }
898
Yijing Wange16922a2014-05-20 20:37:51 +0800899 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100900 *large_page = total;
901 return pte;
902 }
903
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000904 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 total--;
906 }
907 return NULL;
908}
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000911static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100912 unsigned long start_pfn,
913 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100915 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100916 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917
Jiang Liu162d1b12014-07-11 14:19:35 +0800918 BUG_ON(!domain_pfn_supported(domain, start_pfn));
919 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700920 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100921
David Woodhouse04b18e62009-06-27 19:15:01 +0100922 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700923 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 large_page = 1;
925 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100926 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100927 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100928 continue;
929 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100931 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100932 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100933 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100934 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
935
David Woodhouse310a5ab2009-06-28 18:52:20 +0100936 domain_flush_cache(domain, first_pte,
937 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700938
939 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940}
941
Alex Williamson3269ee02013-06-15 10:27:19 -0600942static void dma_pte_free_level(struct dmar_domain *domain, int level,
943 struct dma_pte *pte, unsigned long pfn,
944 unsigned long start_pfn, unsigned long last_pfn)
945{
946 pfn = max(start_pfn, pfn);
947 pte = &pte[pfn_level_offset(pfn, level)];
948
949 do {
950 unsigned long level_pfn;
951 struct dma_pte *level_pte;
952
953 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
954 goto next;
955
956 level_pfn = pfn & level_mask(level - 1);
957 level_pte = phys_to_virt(dma_pte_addr(pte));
958
959 if (level > 2)
960 dma_pte_free_level(domain, level - 1, level_pte,
961 level_pfn, start_pfn, last_pfn);
962
963 /* If range covers entire pagetable, free it */
964 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800965 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600966 dma_clear_pte(pte);
967 domain_flush_cache(domain, pte, sizeof(*pte));
968 free_pgtable_page(level_pte);
969 }
970next:
971 pfn += level_size(level);
972 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
973}
974
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975/* free page table pages. last level pte should already be cleared */
976static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100977 unsigned long start_pfn,
978 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979{
Jiang Liu162d1b12014-07-11 14:19:35 +0800980 BUG_ON(!domain_pfn_supported(domain, start_pfn));
981 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700982 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
Jiang Liud41a4ad2014-07-11 14:19:34 +0800984 dma_pte_clear_range(domain, start_pfn, last_pfn);
985
David Woodhousef3a0a522009-06-30 03:40:07 +0100986 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600987 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
988 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100989
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100991 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 free_pgtable_page(domain->pgd);
993 domain->pgd = NULL;
994 }
995}
996
David Woodhouseea8ea462014-03-05 17:09:32 +0000997/* When a page at a given level is being unlinked from its parent, we don't
998 need to *modify* it at all. All we need to do is make a list of all the
999 pages which can be freed just as soon as we've flushed the IOTLB and we
1000 know the hardware page-walk will no longer touch them.
1001 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1002 be freed. */
1003static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1004 int level, struct dma_pte *pte,
1005 struct page *freelist)
1006{
1007 struct page *pg;
1008
1009 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1010 pg->freelist = freelist;
1011 freelist = pg;
1012
1013 if (level == 1)
1014 return freelist;
1015
Jiang Liuadeb2592014-04-09 10:20:39 +08001016 pte = page_address(pg);
1017 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001018 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1019 freelist = dma_pte_list_pagetables(domain, level - 1,
1020 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001021 pte++;
1022 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001023
1024 return freelist;
1025}
1026
1027static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1028 struct dma_pte *pte, unsigned long pfn,
1029 unsigned long start_pfn,
1030 unsigned long last_pfn,
1031 struct page *freelist)
1032{
1033 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1034
1035 pfn = max(start_pfn, pfn);
1036 pte = &pte[pfn_level_offset(pfn, level)];
1037
1038 do {
1039 unsigned long level_pfn;
1040
1041 if (!dma_pte_present(pte))
1042 goto next;
1043
1044 level_pfn = pfn & level_mask(level);
1045
1046 /* If range covers entire pagetable, free it */
1047 if (start_pfn <= level_pfn &&
1048 last_pfn >= level_pfn + level_size(level) - 1) {
1049 /* These suborbinate page tables are going away entirely. Don't
1050 bother to clear them; we're just going to *free* them. */
1051 if (level > 1 && !dma_pte_superpage(pte))
1052 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1053
1054 dma_clear_pte(pte);
1055 if (!first_pte)
1056 first_pte = pte;
1057 last_pte = pte;
1058 } else if (level > 1) {
1059 /* Recurse down into a level that isn't *entirely* obsolete */
1060 freelist = dma_pte_clear_level(domain, level - 1,
1061 phys_to_virt(dma_pte_addr(pte)),
1062 level_pfn, start_pfn, last_pfn,
1063 freelist);
1064 }
1065next:
1066 pfn += level_size(level);
1067 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1068
1069 if (first_pte)
1070 domain_flush_cache(domain, first_pte,
1071 (void *)++last_pte - (void *)first_pte);
1072
1073 return freelist;
1074}
1075
1076/* We can't just free the pages because the IOMMU may still be walking
1077 the page tables, and may have cached the intermediate levels. The
1078 pages can only be freed after the IOTLB flush has been done. */
1079struct page *domain_unmap(struct dmar_domain *domain,
1080 unsigned long start_pfn,
1081 unsigned long last_pfn)
1082{
David Woodhouseea8ea462014-03-05 17:09:32 +00001083 struct page *freelist = NULL;
1084
Jiang Liu162d1b12014-07-11 14:19:35 +08001085 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1086 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001087 BUG_ON(start_pfn > last_pfn);
1088
1089 /* we don't need lock here; nobody else touches the iova range */
1090 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1091 domain->pgd, 0, start_pfn, last_pfn, NULL);
1092
1093 /* free pgd */
1094 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1095 struct page *pgd_page = virt_to_page(domain->pgd);
1096 pgd_page->freelist = freelist;
1097 freelist = pgd_page;
1098
1099 domain->pgd = NULL;
1100 }
1101
1102 return freelist;
1103}
1104
1105void dma_free_pagelist(struct page *freelist)
1106{
1107 struct page *pg;
1108
1109 while ((pg = freelist)) {
1110 freelist = pg->freelist;
1111 free_pgtable_page(page_address(pg));
1112 }
1113}
1114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001115/* iommu handling */
1116static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1117{
1118 struct root_entry *root;
1119 unsigned long flags;
1120
Suresh Siddha4c923d42009-10-02 11:01:24 -07001121 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001122 if (!root) {
1123 pr_err("IOMMU: allocating root entry for %s failed\n",
1124 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001125 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001126 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001128 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129
1130 spin_lock_irqsave(&iommu->lock, flags);
1131 iommu->root_entry = root;
1132 spin_unlock_irqrestore(&iommu->lock, flags);
1133
1134 return 0;
1135}
1136
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137static void iommu_set_root_entry(struct intel_iommu *iommu)
1138{
David Woodhouse03ecc322015-02-13 14:35:21 +00001139 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001140 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 unsigned long flag;
1142
David Woodhouse03ecc322015-02-13 14:35:21 +00001143 addr = virt_to_phys(iommu->root_entry);
1144 if (ecap_ecs(iommu->ecap))
1145 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001147 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001148 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149
David Woodhousec416daa2009-05-10 20:30:58 +01001150 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001151
1152 /* Make sure hardware complete it */
1153 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001154 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001156 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001157}
1158
1159static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1160{
1161 u32 val;
1162 unsigned long flag;
1163
David Woodhouse9af88142009-02-13 23:18:03 +00001164 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001166
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001167 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001168 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001169
1170 /* Make sure hardware complete it */
1171 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001172 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001174 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175}
1176
1177/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001178static void __iommu_flush_context(struct intel_iommu *iommu,
1179 u16 did, u16 source_id, u8 function_mask,
1180 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181{
1182 u64 val = 0;
1183 unsigned long flag;
1184
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185 switch (type) {
1186 case DMA_CCMD_GLOBAL_INVL:
1187 val = DMA_CCMD_GLOBAL_INVL;
1188 break;
1189 case DMA_CCMD_DOMAIN_INVL:
1190 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1191 break;
1192 case DMA_CCMD_DEVICE_INVL:
1193 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1194 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1195 break;
1196 default:
1197 BUG();
1198 }
1199 val |= DMA_CCMD_ICC;
1200
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001201 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1203
1204 /* Make sure hardware complete it */
1205 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1206 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1207
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001208 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209}
1210
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001212static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1213 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001214{
1215 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1216 u64 val = 0, val_iva = 0;
1217 unsigned long flag;
1218
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001219 switch (type) {
1220 case DMA_TLB_GLOBAL_FLUSH:
1221 /* global flush doesn't need set IVA_REG */
1222 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1223 break;
1224 case DMA_TLB_DSI_FLUSH:
1225 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1226 break;
1227 case DMA_TLB_PSI_FLUSH:
1228 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001229 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001230 val_iva = size_order | addr;
1231 break;
1232 default:
1233 BUG();
1234 }
1235 /* Note: set drain read/write */
1236#if 0
1237 /*
1238 * This is probably to be super secure.. Looks like we can
1239 * ignore it without any impact.
1240 */
1241 if (cap_read_drain(iommu->cap))
1242 val |= DMA_TLB_READ_DRAIN;
1243#endif
1244 if (cap_write_drain(iommu->cap))
1245 val |= DMA_TLB_WRITE_DRAIN;
1246
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001247 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 /* Note: Only uses first TLB reg currently */
1249 if (val_iva)
1250 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1251 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1252
1253 /* Make sure hardware complete it */
1254 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1255 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1256
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001257 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001258
1259 /* check IOTLB invalidation granularity */
1260 if (DMA_TLB_IAIG(val) == 0)
1261 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1262 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1263 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001264 (unsigned long long)DMA_TLB_IIRG(type),
1265 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001266}
1267
David Woodhouse64ae8922014-03-09 12:52:30 -07001268static struct device_domain_info *
1269iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1270 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001272 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001273 unsigned long flags;
1274 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001275 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001276
1277 if (!ecap_dev_iotlb_support(iommu->ecap))
1278 return NULL;
1279
1280 if (!iommu->qi)
1281 return NULL;
1282
1283 spin_lock_irqsave(&device_domain_lock, flags);
1284 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001285 if (info->iommu == iommu && info->bus == bus &&
1286 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001287 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001288 break;
1289 }
1290 spin_unlock_irqrestore(&device_domain_lock, flags);
1291
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001292 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 return NULL;
1294
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001295 pdev = to_pci_dev(info->dev);
1296
1297 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001298 return NULL;
1299
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001301 return NULL;
1302
Yu Zhao93a23a72009-05-18 13:51:37 +08001303 return info;
1304}
1305
1306static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1307{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001308 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001309 return;
1310
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001311 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001312}
1313
1314static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1315{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001316 if (!info->dev || !dev_is_pci(info->dev) ||
1317 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001318 return;
1319
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001320 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001321}
1322
1323static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1324 u64 addr, unsigned mask)
1325{
1326 u16 sid, qdep;
1327 unsigned long flags;
1328 struct device_domain_info *info;
1329
1330 spin_lock_irqsave(&device_domain_lock, flags);
1331 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001332 struct pci_dev *pdev;
1333 if (!info->dev || !dev_is_pci(info->dev))
1334 continue;
1335
1336 pdev = to_pci_dev(info->dev);
1337 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001338 continue;
1339
1340 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001341 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001342 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1343 }
1344 spin_unlock_irqrestore(&device_domain_lock, flags);
1345}
1346
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001347static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001348 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001350 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001351 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001353 BUG_ON(pages == 0);
1354
David Woodhouseea8ea462014-03-05 17:09:32 +00001355 if (ih)
1356 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 * Fallback to domain selective flush if no PSI support or the size is
1359 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001360 * PSI requires page size to be 2 ^ x, and the base address is naturally
1361 * aligned to the size
1362 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001363 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1364 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001365 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001366 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001367 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001368 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001369
1370 /*
Nadav Amit82653632010-04-01 13:24:40 +03001371 * In caching mode, changes of pages from non-present to present require
1372 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001373 */
Nadav Amit82653632010-04-01 13:24:40 +03001374 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001375 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001376}
1377
mark grossf8bab732008-02-08 04:18:38 -08001378static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1379{
1380 u32 pmen;
1381 unsigned long flags;
1382
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001383 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001384 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1385 pmen &= ~DMA_PMEN_EPM;
1386 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1387
1388 /* wait for the protected region status bit to clear */
1389 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1390 readl, !(pmen & DMA_PMEN_PRS), pmen);
1391
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001392 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001393}
1394
Jiang Liu2a41cce2014-07-11 14:19:33 +08001395static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001396{
1397 u32 sts;
1398 unsigned long flags;
1399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001401 iommu->gcmd |= DMA_GCMD_TE;
1402 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403
1404 /* Make sure hardware complete it */
1405 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001406 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001408 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001409}
1410
Jiang Liu2a41cce2014-07-11 14:19:33 +08001411static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412{
1413 u32 sts;
1414 unsigned long flag;
1415
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001416 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417 iommu->gcmd &= ~DMA_GCMD_TE;
1418 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1419
1420 /* Make sure hardware complete it */
1421 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001422 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001423
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001424 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001425}
1426
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001427
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428static int iommu_init_domains(struct intel_iommu *iommu)
1429{
1430 unsigned long ndomains;
1431 unsigned long nlongs;
1432
1433 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001434 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1435 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436 nlongs = BITS_TO_LONGS(ndomains);
1437
Donald Dutile94a91b52009-08-20 16:51:34 -04001438 spin_lock_init(&iommu->lock);
1439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 /* TBD: there might be 64K domains,
1441 * consider other allocation for future chip
1442 */
1443 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1444 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001445 pr_err("IOMMU%d: allocating domain id array failed\n",
1446 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001447 return -ENOMEM;
1448 }
1449 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1450 GFP_KERNEL);
1451 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001452 pr_err("IOMMU%d: allocating domain array failed\n",
1453 iommu->seq_id);
1454 kfree(iommu->domain_ids);
1455 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456 return -ENOMEM;
1457 }
1458
1459 /*
1460 * if Caching mode is set, then invalid translations are tagged
1461 * with domainid 0. Hence we need to pre-allocate it.
1462 */
1463 if (cap_caching_mode(iommu->cap))
1464 set_bit(0, iommu->domain_ids);
1465 return 0;
1466}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001467
Jiang Liuffebeb42014-11-09 22:48:02 +08001468static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469{
1470 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001471 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001472
Donald Dutile94a91b52009-08-20 16:51:34 -04001473 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001474 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001475 /*
1476 * Domain id 0 is reserved for invalid translation
1477 * if hardware supports caching mode.
1478 */
1479 if (cap_caching_mode(iommu->cap) && i == 0)
1480 continue;
1481
Donald Dutile94a91b52009-08-20 16:51:34 -04001482 domain = iommu->domains[i];
1483 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001484 if (domain_detach_iommu(domain, iommu) == 0 &&
1485 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001486 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001487 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001488 }
1489
1490 if (iommu->gcmd & DMA_GCMD_TE)
1491 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001492}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001493
Jiang Liuffebeb42014-11-09 22:48:02 +08001494static void free_dmar_iommu(struct intel_iommu *iommu)
1495{
1496 if ((iommu->domains) && (iommu->domain_ids)) {
1497 kfree(iommu->domains);
1498 kfree(iommu->domain_ids);
1499 iommu->domains = NULL;
1500 iommu->domain_ids = NULL;
1501 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001502
Weidong Hand9630fe2008-12-08 11:06:32 +08001503 g_iommus[iommu->seq_id] = NULL;
1504
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505 /* free context mapping */
1506 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001507}
1508
Jiang Liuab8dfe22014-07-11 14:19:27 +08001509static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001511 /* domain id for virtual machine, it won't be set in context */
1512 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001513 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514
1515 domain = alloc_domain_mem();
1516 if (!domain)
1517 return NULL;
1518
Jiang Liuab8dfe22014-07-11 14:19:27 +08001519 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001520 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001521 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001522 spin_lock_init(&domain->iommu_lock);
1523 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001524 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001525 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526
1527 return domain;
1528}
1529
Jiang Liufb170fb2014-07-11 14:19:28 +08001530static int __iommu_attach_domain(struct dmar_domain *domain,
1531 struct intel_iommu *iommu)
1532{
1533 int num;
1534 unsigned long ndomains;
1535
1536 ndomains = cap_ndoms(iommu->cap);
1537 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1538 if (num < ndomains) {
1539 set_bit(num, iommu->domain_ids);
1540 iommu->domains[num] = domain;
1541 } else {
1542 num = -ENOSPC;
1543 }
1544
1545 return num;
1546}
1547
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001548static int iommu_attach_domain(struct dmar_domain *domain,
1549 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001550{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001551 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001552 unsigned long flags;
1553
Weidong Han8c11e792008-12-08 15:29:22 +08001554 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001555 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001556 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001557 if (num < 0)
1558 pr_err("IOMMU: no free domain ids\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001559
Jiang Liufb170fb2014-07-11 14:19:28 +08001560 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001561}
1562
Jiang Liu44bde612014-07-11 14:19:29 +08001563static int iommu_attach_vm_domain(struct dmar_domain *domain,
1564 struct intel_iommu *iommu)
1565{
1566 int num;
1567 unsigned long ndomains;
1568
1569 ndomains = cap_ndoms(iommu->cap);
1570 for_each_set_bit(num, iommu->domain_ids, ndomains)
1571 if (iommu->domains[num] == domain)
1572 return num;
1573
1574 return __iommu_attach_domain(domain, iommu);
1575}
1576
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001577static void iommu_detach_domain(struct dmar_domain *domain,
1578 struct intel_iommu *iommu)
1579{
1580 unsigned long flags;
1581 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001582
1583 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001584 if (domain_type_is_vm_or_si(domain)) {
1585 ndomains = cap_ndoms(iommu->cap);
1586 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1587 if (iommu->domains[num] == domain) {
1588 clear_bit(num, iommu->domain_ids);
1589 iommu->domains[num] = NULL;
1590 break;
1591 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001592 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001593 } else {
1594 clear_bit(domain->id, iommu->domain_ids);
1595 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001596 }
Weidong Han8c11e792008-12-08 15:29:22 +08001597 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001598}
1599
Jiang Liufb170fb2014-07-11 14:19:28 +08001600static void domain_attach_iommu(struct dmar_domain *domain,
1601 struct intel_iommu *iommu)
1602{
1603 unsigned long flags;
1604
1605 spin_lock_irqsave(&domain->iommu_lock, flags);
1606 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1607 domain->iommu_count++;
1608 if (domain->iommu_count == 1)
1609 domain->nid = iommu->node;
1610 domain_update_iommu_cap(domain);
1611 }
1612 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1613}
1614
1615static int domain_detach_iommu(struct dmar_domain *domain,
1616 struct intel_iommu *iommu)
1617{
1618 unsigned long flags;
1619 int count = INT_MAX;
1620
1621 spin_lock_irqsave(&domain->iommu_lock, flags);
1622 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1623 count = --domain->iommu_count;
1624 domain_update_iommu_cap(domain);
1625 }
1626 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1627
1628 return count;
1629}
1630
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001632static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633
Joseph Cihula51a63e62011-03-21 11:04:24 -07001634static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001635{
1636 struct pci_dev *pdev = NULL;
1637 struct iova *iova;
1638 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001640 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1641 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642
Mark Gross8a443df2008-03-04 14:59:31 -08001643 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1644 &reserved_rbtree_key);
1645
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646 /* IOAPIC ranges shouldn't be accessed by DMA */
1647 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1648 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001649 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001651 return -ENODEV;
1652 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653
1654 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1655 for_each_pci_dev(pdev) {
1656 struct resource *r;
1657
1658 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1659 r = &pdev->resource[i];
1660 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1661 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001662 iova = reserve_iova(&reserved_iova_list,
1663 IOVA_PFN(r->start),
1664 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001665 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001667 return -ENODEV;
1668 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001669 }
1670 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001671 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672}
1673
1674static void domain_reserve_special_ranges(struct dmar_domain *domain)
1675{
1676 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1677}
1678
1679static inline int guestwidth_to_adjustwidth(int gaw)
1680{
1681 int agaw;
1682 int r = (gaw - 12) % 9;
1683
1684 if (r == 0)
1685 agaw = gaw;
1686 else
1687 agaw = gaw + 9 - r;
1688 if (agaw > 64)
1689 agaw = 64;
1690 return agaw;
1691}
1692
1693static int domain_init(struct dmar_domain *domain, int guest_width)
1694{
1695 struct intel_iommu *iommu;
1696 int adjust_width, agaw;
1697 unsigned long sagaw;
1698
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001699 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1700 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 domain_reserve_special_ranges(domain);
1702
1703 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001704 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 if (guest_width > cap_mgaw(iommu->cap))
1706 guest_width = cap_mgaw(iommu->cap);
1707 domain->gaw = guest_width;
1708 adjust_width = guestwidth_to_adjustwidth(guest_width);
1709 agaw = width_to_agaw(adjust_width);
1710 sagaw = cap_sagaw(iommu->cap);
1711 if (!test_bit(agaw, &sagaw)) {
1712 /* hardware doesn't support it, choose a bigger one */
1713 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1714 agaw = find_next_bit(&sagaw, 5, agaw);
1715 if (agaw >= 5)
1716 return -ENODEV;
1717 }
1718 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719
Weidong Han8e6040972008-12-08 15:49:06 +08001720 if (ecap_coherent(iommu->ecap))
1721 domain->iommu_coherency = 1;
1722 else
1723 domain->iommu_coherency = 0;
1724
Sheng Yang58c610b2009-03-18 15:33:05 +08001725 if (ecap_sc_support(iommu->ecap))
1726 domain->iommu_snooping = 1;
1727 else
1728 domain->iommu_snooping = 0;
1729
David Woodhouse214e39a2014-03-19 10:38:49 +00001730 if (intel_iommu_superpage)
1731 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1732 else
1733 domain->iommu_superpage = 0;
1734
Suresh Siddha4c923d42009-10-02 11:01:24 -07001735 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001736
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001738 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 if (!domain->pgd)
1740 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001741 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742 return 0;
1743}
1744
1745static void domain_exit(struct dmar_domain *domain)
1746{
David Woodhouseea8ea462014-03-05 17:09:32 +00001747 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001748 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001749
1750 /* Domain 0 is reserved, so dont process it */
1751 if (!domain)
1752 return;
1753
Alex Williamson7b668352011-05-24 12:02:41 +01001754 /* Flush any lazy unmaps that may reference this domain */
1755 if (!intel_iommu_strict)
1756 flush_unmaps_timeout(0);
1757
Jiang Liu92d03cc2014-02-19 14:07:28 +08001758 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001760
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761 /* destroy iovas */
1762 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763
David Woodhouseea8ea462014-03-05 17:09:32 +00001764 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765
Jiang Liu92d03cc2014-02-19 14:07:28 +08001766 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001767 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001768 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1769 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001770 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001771
David Woodhouseea8ea462014-03-05 17:09:32 +00001772 dma_free_pagelist(freelist);
1773
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001774 free_domain_mem(domain);
1775}
1776
David Woodhouse64ae8922014-03-09 12:52:30 -07001777static int domain_context_mapping_one(struct dmar_domain *domain,
1778 struct intel_iommu *iommu,
1779 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780{
1781 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001782 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001783 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001784 int id;
1785 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001786 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787
1788 pr_debug("Set context mapping for %02x:%02x.%d\n",
1789 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001790
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001792 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1793 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001794
David Woodhouse03ecc322015-02-13 14:35:21 +00001795 spin_lock_irqsave(&iommu->lock, flags);
1796 context = iommu_context_addr(iommu, bus, devfn, 1);
1797 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798 if (!context)
1799 return -ENOMEM;
1800 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001801 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802 spin_unlock_irqrestore(&iommu->lock, flags);
1803 return 0;
1804 }
1805
Weidong Hanea6606b2008-12-08 23:08:15 +08001806 id = domain->id;
1807 pgd = domain->pgd;
1808
Jiang Liuab8dfe22014-07-11 14:19:27 +08001809 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001810 if (domain_type_is_vm(domain)) {
1811 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001812 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001813 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001814 pr_err("IOMMU: no free domain ids\n");
Weidong Hanea6606b2008-12-08 23:08:15 +08001815 return -EFAULT;
1816 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001817 }
1818
1819 /* Skip top levels of page tables for
1820 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001821 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001822 */
Chris Wright1672af12009-12-02 12:06:34 -08001823 if (translation != CONTEXT_TT_PASS_THROUGH) {
1824 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1825 pgd = phys_to_virt(dma_pte_addr(pgd));
1826 if (!dma_pte_present(pgd)) {
1827 spin_unlock_irqrestore(&iommu->lock, flags);
1828 return -ENOMEM;
1829 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001830 }
1831 }
1832 }
1833
1834 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001835
Yu Zhao93a23a72009-05-18 13:51:37 +08001836 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001837 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001838 translation = info ? CONTEXT_TT_DEV_IOTLB :
1839 CONTEXT_TT_MULTI_LEVEL;
1840 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001841 /*
1842 * In pass through mode, AW must be programmed to indicate the largest
1843 * AGAW value supported by hardware. And ASR is ignored by hardware.
1844 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001845 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001846 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001847 else {
1848 context_set_address_root(context, virt_to_phys(pgd));
1849 context_set_address_width(context, iommu->agaw);
1850 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001851
1852 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001853 context_set_fault_enable(context);
1854 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001855 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001857 /*
1858 * It's a non-present to present mapping. If hardware doesn't cache
1859 * non-present entry we only need to flush the write-buffer. If the
1860 * _does_ cache non-present entries, then it does so in the special
1861 * domain #0, which we have to flush:
1862 */
1863 if (cap_caching_mode(iommu->cap)) {
1864 iommu->flush.flush_context(iommu, 0,
1865 (((u16)bus) << 8) | devfn,
1866 DMA_CCMD_MASK_NOBIT,
1867 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001868 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001869 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001871 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001872 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001874
Jiang Liufb170fb2014-07-11 14:19:28 +08001875 domain_attach_iommu(domain, iommu);
1876
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877 return 0;
1878}
1879
Alex Williamson579305f2014-07-03 09:51:43 -06001880struct domain_context_mapping_data {
1881 struct dmar_domain *domain;
1882 struct intel_iommu *iommu;
1883 int translation;
1884};
1885
1886static int domain_context_mapping_cb(struct pci_dev *pdev,
1887 u16 alias, void *opaque)
1888{
1889 struct domain_context_mapping_data *data = opaque;
1890
1891 return domain_context_mapping_one(data->domain, data->iommu,
1892 PCI_BUS_NUM(alias), alias & 0xff,
1893 data->translation);
1894}
1895
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001896static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001897domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1898 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899{
David Woodhouse64ae8922014-03-09 12:52:30 -07001900 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001901 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001902 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903
David Woodhousee1f167f2014-03-09 15:24:46 -07001904 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001905 if (!iommu)
1906 return -ENODEV;
1907
Alex Williamson579305f2014-07-03 09:51:43 -06001908 if (!dev_is_pci(dev))
1909 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001910 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001911
1912 data.domain = domain;
1913 data.iommu = iommu;
1914 data.translation = translation;
1915
1916 return pci_for_each_dma_alias(to_pci_dev(dev),
1917 &domain_context_mapping_cb, &data);
1918}
1919
1920static int domain_context_mapped_cb(struct pci_dev *pdev,
1921 u16 alias, void *opaque)
1922{
1923 struct intel_iommu *iommu = opaque;
1924
1925 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926}
1927
David Woodhousee1f167f2014-03-09 15:24:46 -07001928static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929{
Weidong Han5331fe62008-12-08 23:00:00 +08001930 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001931 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001932
David Woodhousee1f167f2014-03-09 15:24:46 -07001933 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001934 if (!iommu)
1935 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936
Alex Williamson579305f2014-07-03 09:51:43 -06001937 if (!dev_is_pci(dev))
1938 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001939
Alex Williamson579305f2014-07-03 09:51:43 -06001940 return !pci_for_each_dma_alias(to_pci_dev(dev),
1941 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942}
1943
Fenghua Yuf5329592009-08-04 15:09:37 -07001944/* Returns a number of VTD pages, but aligned to MM page size */
1945static inline unsigned long aligned_nrpages(unsigned long host_addr,
1946 size_t size)
1947{
1948 host_addr &= ~PAGE_MASK;
1949 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1950}
1951
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001952/* Return largest possible superpage level for a given mapping */
1953static inline int hardware_largepage_caps(struct dmar_domain *domain,
1954 unsigned long iov_pfn,
1955 unsigned long phy_pfn,
1956 unsigned long pages)
1957{
1958 int support, level = 1;
1959 unsigned long pfnmerge;
1960
1961 support = domain->iommu_superpage;
1962
1963 /* To use a large page, the virtual *and* physical addresses
1964 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1965 of them will mean we have to use smaller pages. So just
1966 merge them and check both at once. */
1967 pfnmerge = iov_pfn | phy_pfn;
1968
1969 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1970 pages >>= VTD_STRIDE_SHIFT;
1971 if (!pages)
1972 break;
1973 pfnmerge >>= VTD_STRIDE_SHIFT;
1974 level++;
1975 support--;
1976 }
1977 return level;
1978}
1979
David Woodhouse9051aa02009-06-29 12:30:54 +01001980static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1981 struct scatterlist *sg, unsigned long phys_pfn,
1982 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001983{
1984 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001985 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08001986 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 unsigned int largepage_lvl = 0;
1988 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001989
Jiang Liu162d1b12014-07-11 14:19:35 +08001990 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01001991
1992 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1993 return -EINVAL;
1994
1995 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1996
Jiang Liucc4f14a2014-11-26 09:42:10 +08001997 if (!sg) {
1998 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01001999 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2000 }
2001
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002002 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002003 uint64_t tmp;
2004
David Woodhousee1605492009-06-29 11:17:38 +01002005 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002006 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002007 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2008 sg->dma_length = sg->length;
2009 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002010 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002011 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002012
David Woodhousee1605492009-06-29 11:17:38 +01002013 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002014 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2015
David Woodhouse5cf0a762014-03-19 16:07:49 +00002016 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002017 if (!pte)
2018 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002019 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002020 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002021 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002022 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2023 /*
2024 * Ensure that old small page tables are
2025 * removed to make room for superpage,
2026 * if they exist.
2027 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002028 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002029 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002030 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002031 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002032 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002033
David Woodhousee1605492009-06-29 11:17:38 +01002034 }
2035 /* We don't need lock here, nobody else
2036 * touches the iova range
2037 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002038 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002039 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002040 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002041 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2042 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002043 if (dumps) {
2044 dumps--;
2045 debug_dma_dump_mappings(NULL);
2046 }
2047 WARN_ON(1);
2048 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002049
2050 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2051
2052 BUG_ON(nr_pages < lvl_pages);
2053 BUG_ON(sg_res < lvl_pages);
2054
2055 nr_pages -= lvl_pages;
2056 iov_pfn += lvl_pages;
2057 phys_pfn += lvl_pages;
2058 pteval += lvl_pages * VTD_PAGE_SIZE;
2059 sg_res -= lvl_pages;
2060
2061 /* If the next PTE would be the first in a new page, then we
2062 need to flush the cache on the entries we've just written.
2063 And then we'll need to recalculate 'pte', so clear it and
2064 let it get set again in the if (!pte) block above.
2065
2066 If we're done (!nr_pages) we need to flush the cache too.
2067
2068 Also if we've been setting superpages, we may need to
2069 recalculate 'pte' and switch back to smaller pages for the
2070 end of the mapping, if the trailing size is not enough to
2071 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002072 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002073 if (!nr_pages || first_pte_in_page(pte) ||
2074 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002075 domain_flush_cache(domain, first_pte,
2076 (void *)pte - (void *)first_pte);
2077 pte = NULL;
2078 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002079
2080 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002081 sg = sg_next(sg);
2082 }
2083 return 0;
2084}
2085
David Woodhouse9051aa02009-06-29 12:30:54 +01002086static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2087 struct scatterlist *sg, unsigned long nr_pages,
2088 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002089{
David Woodhouse9051aa02009-06-29 12:30:54 +01002090 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2091}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002092
David Woodhouse9051aa02009-06-29 12:30:54 +01002093static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2094 unsigned long phys_pfn, unsigned long nr_pages,
2095 int prot)
2096{
2097 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098}
2099
Weidong Hanc7151a82008-12-08 22:51:37 +08002100static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101{
Weidong Hanc7151a82008-12-08 22:51:37 +08002102 if (!iommu)
2103 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002104
2105 clear_context_table(iommu, bus, devfn);
2106 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002107 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002108 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002109}
2110
David Woodhouse109b9b02012-05-25 17:43:02 +01002111static inline void unlink_domain_info(struct device_domain_info *info)
2112{
2113 assert_spin_locked(&device_domain_lock);
2114 list_del(&info->link);
2115 list_del(&info->global);
2116 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002117 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002118}
2119
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120static void domain_remove_dev_info(struct dmar_domain *domain)
2121{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002122 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002123 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124
2125 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002126 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002127 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128 spin_unlock_irqrestore(&device_domain_lock, flags);
2129
Yu Zhao93a23a72009-05-18 13:51:37 +08002130 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002131 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002132
Jiang Liuab8dfe22014-07-11 14:19:27 +08002133 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002134 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002135 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002136 }
2137
2138 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002139 spin_lock_irqsave(&device_domain_lock, flags);
2140 }
2141 spin_unlock_irqrestore(&device_domain_lock, flags);
2142}
2143
2144/*
2145 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002146 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002147 */
David Woodhouse1525a292014-03-06 16:19:30 +00002148static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149{
2150 struct device_domain_info *info;
2151
2152 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002153 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002154 if (info)
2155 return info->domain;
2156 return NULL;
2157}
2158
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002159static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002160dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2161{
2162 struct device_domain_info *info;
2163
2164 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002165 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002166 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002167 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002168
2169 return NULL;
2170}
2171
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002172static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002173 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002174 struct device *dev,
2175 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002176{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002177 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002178 struct device_domain_info *info;
2179 unsigned long flags;
2180
2181 info = alloc_devinfo_mem();
2182 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002183 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002184
Jiang Liu745f2582014-02-19 14:07:26 +08002185 info->bus = bus;
2186 info->devfn = devfn;
2187 info->dev = dev;
2188 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002189 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002190
2191 spin_lock_irqsave(&device_domain_lock, flags);
2192 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002193 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002194 else {
2195 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002196 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002197 if (info2)
2198 found = info2->domain;
2199 }
Jiang Liu745f2582014-02-19 14:07:26 +08002200 if (found) {
2201 spin_unlock_irqrestore(&device_domain_lock, flags);
2202 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002203 /* Caller must free the original domain */
2204 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002205 }
2206
David Woodhouseb718cd32014-03-09 13:11:33 -07002207 list_add(&info->link, &domain->devices);
2208 list_add(&info->global, &device_domain_list);
2209 if (dev)
2210 dev->archdata.iommu = info;
2211 spin_unlock_irqrestore(&device_domain_lock, flags);
2212
2213 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002214}
2215
Alex Williamson579305f2014-07-03 09:51:43 -06002216static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2217{
2218 *(u16 *)opaque = alias;
2219 return 0;
2220}
2221
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002222/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002223static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002224{
Alex Williamson579305f2014-07-03 09:51:43 -06002225 struct dmar_domain *domain, *tmp;
2226 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002227 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002228 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002229 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002230 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002231
David Woodhouse146922e2014-03-09 15:44:17 -07002232 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002233 if (domain)
2234 return domain;
2235
David Woodhouse146922e2014-03-09 15:44:17 -07002236 iommu = device_to_iommu(dev, &bus, &devfn);
2237 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002238 return NULL;
2239
2240 if (dev_is_pci(dev)) {
2241 struct pci_dev *pdev = to_pci_dev(dev);
2242
2243 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2244
2245 spin_lock_irqsave(&device_domain_lock, flags);
2246 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2247 PCI_BUS_NUM(dma_alias),
2248 dma_alias & 0xff);
2249 if (info) {
2250 iommu = info->iommu;
2251 domain = info->domain;
2252 }
2253 spin_unlock_irqrestore(&device_domain_lock, flags);
2254
2255 /* DMA alias already has a domain, uses it */
2256 if (info)
2257 goto found_domain;
2258 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002259
David Woodhouse146922e2014-03-09 15:44:17 -07002260 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002261 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002262 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002263 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002264 domain->id = iommu_attach_domain(domain, iommu);
2265 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002266 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002267 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002268 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002269 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002270 if (domain_init(domain, gaw)) {
2271 domain_exit(domain);
2272 return NULL;
2273 }
2274
2275 /* register PCI DMA alias device */
2276 if (dev_is_pci(dev)) {
2277 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2278 dma_alias & 0xff, NULL, domain);
2279
2280 if (!tmp || tmp != domain) {
2281 domain_exit(domain);
2282 domain = tmp;
2283 }
2284
David Woodhouseb718cd32014-03-09 13:11:33 -07002285 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002286 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002287 }
2288
2289found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002290 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2291
2292 if (!tmp || tmp != domain) {
2293 domain_exit(domain);
2294 domain = tmp;
2295 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002296
2297 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002298}
2299
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002300static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002301#define IDENTMAP_ALL 1
2302#define IDENTMAP_GFX 2
2303#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002304
David Woodhouseb2132032009-06-26 18:50:28 +01002305static int iommu_domain_identity_map(struct dmar_domain *domain,
2306 unsigned long long start,
2307 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002308{
David Woodhousec5395d52009-06-28 16:35:56 +01002309 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2310 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002311
David Woodhousec5395d52009-06-28 16:35:56 +01002312 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2313 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002314 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002315 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002316 }
2317
David Woodhousec5395d52009-06-28 16:35:56 +01002318 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2319 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002320 /*
2321 * RMRR range might have overlap with physical memory range,
2322 * clear it first
2323 */
David Woodhousec5395d52009-06-28 16:35:56 +01002324 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002325
David Woodhousec5395d52009-06-28 16:35:56 +01002326 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2327 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002328 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002329}
2330
David Woodhouse0b9d9752014-03-09 15:48:15 -07002331static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002332 unsigned long long start,
2333 unsigned long long end)
2334{
2335 struct dmar_domain *domain;
2336 int ret;
2337
David Woodhouse0b9d9752014-03-09 15:48:15 -07002338 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002339 if (!domain)
2340 return -ENOMEM;
2341
David Woodhouse19943b02009-08-04 16:19:20 +01002342 /* For _hardware_ passthrough, don't bother. But for software
2343 passthrough, we do it anyway -- it may indicate a memory
2344 range which is reserved in E820, so which didn't get set
2345 up to start with in si_domain */
2346 if (domain == si_domain && hw_pass_through) {
2347 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002348 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002349 return 0;
2350 }
2351
2352 printk(KERN_INFO
2353 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002354 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002355
David Woodhouse5595b522009-12-02 09:21:55 +00002356 if (end < start) {
2357 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2358 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2359 dmi_get_system_info(DMI_BIOS_VENDOR),
2360 dmi_get_system_info(DMI_BIOS_VERSION),
2361 dmi_get_system_info(DMI_PRODUCT_VERSION));
2362 ret = -EIO;
2363 goto error;
2364 }
2365
David Woodhouse2ff729f2009-08-26 14:25:41 +01002366 if (end >> agaw_to_width(domain->agaw)) {
2367 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2368 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2369 agaw_to_width(domain->agaw),
2370 dmi_get_system_info(DMI_BIOS_VENDOR),
2371 dmi_get_system_info(DMI_BIOS_VERSION),
2372 dmi_get_system_info(DMI_PRODUCT_VERSION));
2373 ret = -EIO;
2374 goto error;
2375 }
David Woodhouse19943b02009-08-04 16:19:20 +01002376
David Woodhouseb2132032009-06-26 18:50:28 +01002377 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 if (ret)
2379 goto error;
2380
2381 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002382 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002383 if (ret)
2384 goto error;
2385
2386 return 0;
2387
2388 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389 domain_exit(domain);
2390 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002391}
2392
2393static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002394 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002395{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002396 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002398 return iommu_prepare_identity_map(dev, rmrr->base_address,
2399 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002400}
2401
Suresh Siddhad3f13812011-08-23 17:05:25 -07002402#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002403static inline void iommu_prepare_isa(void)
2404{
2405 struct pci_dev *pdev;
2406 int ret;
2407
2408 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2409 if (!pdev)
2410 return;
2411
David Woodhousec7ab48d2009-06-26 19:10:36 +01002412 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002413 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002414
2415 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002416 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2417 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002418
Yijing Wang9b27e822014-05-20 20:37:52 +08002419 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002420}
2421#else
2422static inline void iommu_prepare_isa(void)
2423{
2424 return;
2425}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002426#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002427
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002428static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002429
Matt Kraai071e1372009-08-23 22:30:22 -07002430static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002431{
2432 struct dmar_drhd_unit *drhd;
2433 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002434 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002435 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002436
Jiang Liuab8dfe22014-07-11 14:19:27 +08002437 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002438 if (!si_domain)
2439 return -EFAULT;
2440
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002441 for_each_active_iommu(iommu, drhd) {
2442 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002443 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002444 domain_exit(si_domain);
2445 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002446 } else if (first) {
2447 si_domain->id = ret;
2448 first = false;
2449 } else if (si_domain->id != ret) {
2450 domain_exit(si_domain);
2451 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002452 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002453 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002454 }
2455
2456 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2457 domain_exit(si_domain);
2458 return -EFAULT;
2459 }
2460
Jiang Liu9544c002014-01-06 14:18:13 +08002461 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2462 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002463
David Woodhouse19943b02009-08-04 16:19:20 +01002464 if (hw)
2465 return 0;
2466
David Woodhousec7ab48d2009-06-26 19:10:36 +01002467 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002468 unsigned long start_pfn, end_pfn;
2469 int i;
2470
2471 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2472 ret = iommu_domain_identity_map(si_domain,
2473 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2474 if (ret)
2475 return ret;
2476 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002477 }
2478
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002479 return 0;
2480}
2481
David Woodhouse9b226622014-03-09 14:03:28 -07002482static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002483{
2484 struct device_domain_info *info;
2485
2486 if (likely(!iommu_identity_mapping))
2487 return 0;
2488
David Woodhouse9b226622014-03-09 14:03:28 -07002489 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002490 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2491 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002492
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002493 return 0;
2494}
2495
2496static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002497 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002498{
David Woodhouse0ac72662014-03-09 13:19:22 -07002499 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002500 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002501 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002502 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002503
David Woodhouse5913c9b2014-03-09 16:27:31 -07002504 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002505 if (!iommu)
2506 return -ENODEV;
2507
David Woodhouse5913c9b2014-03-09 16:27:31 -07002508 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002509 if (ndomain != domain)
2510 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002511
David Woodhouse5913c9b2014-03-09 16:27:31 -07002512 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002513 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002514 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002515 return ret;
2516 }
2517
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002518 return 0;
2519}
2520
David Woodhouse0b9d9752014-03-09 15:48:15 -07002521static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002522{
2523 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002524 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002525 int i;
2526
Jiang Liu0e242612014-02-19 14:07:34 +08002527 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002528 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002529 /*
2530 * Return TRUE if this RMRR contains the device that
2531 * is passed in.
2532 */
2533 for_each_active_dev_scope(rmrr->devices,
2534 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002535 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002536 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002537 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002538 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002539 }
Jiang Liu0e242612014-02-19 14:07:34 +08002540 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002541 return false;
2542}
2543
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002544/*
2545 * There are a couple cases where we need to restrict the functionality of
2546 * devices associated with RMRRs. The first is when evaluating a device for
2547 * identity mapping because problems exist when devices are moved in and out
2548 * of domains and their respective RMRR information is lost. This means that
2549 * a device with associated RMRRs will never be in a "passthrough" domain.
2550 * The second is use of the device through the IOMMU API. This interface
2551 * expects to have full control of the IOVA space for the device. We cannot
2552 * satisfy both the requirement that RMRR access is maintained and have an
2553 * unencumbered IOVA space. We also have no ability to quiesce the device's
2554 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2555 * We therefore prevent devices associated with an RMRR from participating in
2556 * the IOMMU API, which eliminates them from device assignment.
2557 *
2558 * In both cases we assume that PCI USB devices with RMRRs have them largely
2559 * for historical reasons and that the RMRR space is not actively used post
2560 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002561 *
2562 * The same exception is made for graphics devices, with the requirement that
2563 * any use of the RMRR regions will be torn down before assigning the device
2564 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002565 */
2566static bool device_is_rmrr_locked(struct device *dev)
2567{
2568 if (!device_has_rmrr(dev))
2569 return false;
2570
2571 if (dev_is_pci(dev)) {
2572 struct pci_dev *pdev = to_pci_dev(dev);
2573
David Woodhouse18436af2015-03-25 15:05:47 +00002574 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002575 return false;
2576 }
2577
2578 return true;
2579}
2580
David Woodhouse3bdb2592014-03-09 16:03:08 -07002581static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002582{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002583
David Woodhouse3bdb2592014-03-09 16:03:08 -07002584 if (dev_is_pci(dev)) {
2585 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002586
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002587 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002588 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002589
David Woodhouse3bdb2592014-03-09 16:03:08 -07002590 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2591 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002592
David Woodhouse3bdb2592014-03-09 16:03:08 -07002593 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2594 return 1;
2595
2596 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2597 return 0;
2598
2599 /*
2600 * We want to start off with all devices in the 1:1 domain, and
2601 * take them out later if we find they can't access all of memory.
2602 *
2603 * However, we can't do this for PCI devices behind bridges,
2604 * because all PCI devices behind the same bridge will end up
2605 * with the same source-id on their transactions.
2606 *
2607 * Practically speaking, we can't change things around for these
2608 * devices at run-time, because we can't be sure there'll be no
2609 * DMA transactions in flight for any of their siblings.
2610 *
2611 * So PCI devices (unless they're on the root bus) as well as
2612 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2613 * the 1:1 domain, just in _case_ one of their siblings turns out
2614 * not to be able to map all of memory.
2615 */
2616 if (!pci_is_pcie(pdev)) {
2617 if (!pci_is_root_bus(pdev->bus))
2618 return 0;
2619 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2620 return 0;
2621 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2622 return 0;
2623 } else {
2624 if (device_has_rmrr(dev))
2625 return 0;
2626 }
David Woodhouse6941af22009-07-04 18:24:27 +01002627
David Woodhouse3dfc8132009-07-04 19:11:08 +01002628 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002629 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002630 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002631 * take them out of the 1:1 domain later.
2632 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002633 if (!startup) {
2634 /*
2635 * If the device's dma_mask is less than the system's memory
2636 * size then this is not a candidate for identity mapping.
2637 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002638 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002639
David Woodhouse3bdb2592014-03-09 16:03:08 -07002640 if (dev->coherent_dma_mask &&
2641 dev->coherent_dma_mask < dma_mask)
2642 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002643
David Woodhouse3bdb2592014-03-09 16:03:08 -07002644 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002645 }
David Woodhouse6941af22009-07-04 18:24:27 +01002646
2647 return 1;
2648}
2649
David Woodhousecf04eee2014-03-21 16:49:04 +00002650static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2651{
2652 int ret;
2653
2654 if (!iommu_should_identity_map(dev, 1))
2655 return 0;
2656
2657 ret = domain_add_dev_info(si_domain, dev,
2658 hw ? CONTEXT_TT_PASS_THROUGH :
2659 CONTEXT_TT_MULTI_LEVEL);
2660 if (!ret)
2661 pr_info("IOMMU: %s identity mapping for device %s\n",
2662 hw ? "hardware" : "software", dev_name(dev));
2663 else if (ret == -ENODEV)
2664 /* device not associated with an iommu */
2665 ret = 0;
2666
2667 return ret;
2668}
2669
2670
Matt Kraai071e1372009-08-23 22:30:22 -07002671static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002672{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002673 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002674 struct dmar_drhd_unit *drhd;
2675 struct intel_iommu *iommu;
2676 struct device *dev;
2677 int i;
2678 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002679
David Woodhouse19943b02009-08-04 16:19:20 +01002680 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002681 if (ret)
2682 return -EFAULT;
2683
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002684 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002685 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2686 if (ret)
2687 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002688 }
2689
David Woodhousecf04eee2014-03-21 16:49:04 +00002690 for_each_active_iommu(iommu, drhd)
2691 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2692 struct acpi_device_physical_node *pn;
2693 struct acpi_device *adev;
2694
2695 if (dev->bus != &acpi_bus_type)
2696 continue;
2697
2698 adev= to_acpi_device(dev);
2699 mutex_lock(&adev->physical_node_lock);
2700 list_for_each_entry(pn, &adev->physical_node_list, node) {
2701 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2702 if (ret)
2703 break;
2704 }
2705 mutex_unlock(&adev->physical_node_lock);
2706 if (ret)
2707 return ret;
2708 }
2709
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002710 return 0;
2711}
2712
Jiang Liuffebeb42014-11-09 22:48:02 +08002713static void intel_iommu_init_qi(struct intel_iommu *iommu)
2714{
2715 /*
2716 * Start from the sane iommu hardware state.
2717 * If the queued invalidation is already initialized by us
2718 * (for example, while enabling interrupt-remapping) then
2719 * we got the things already rolling from a sane state.
2720 */
2721 if (!iommu->qi) {
2722 /*
2723 * Clear any previous faults.
2724 */
2725 dmar_fault(-1, iommu);
2726 /*
2727 * Disable queued invalidation if supported and already enabled
2728 * before OS handover.
2729 */
2730 dmar_disable_qi(iommu);
2731 }
2732
2733 if (dmar_enable_qi(iommu)) {
2734 /*
2735 * Queued Invalidate not enabled, use Register Based Invalidate
2736 */
2737 iommu->flush.flush_context = __iommu_flush_context;
2738 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2739 pr_info("IOMMU: %s using Register based invalidation\n",
2740 iommu->name);
2741 } else {
2742 iommu->flush.flush_context = qi_flush_context;
2743 iommu->flush.flush_iotlb = qi_flush_iotlb;
2744 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2745 }
2746}
2747
Joseph Cihulab7792602011-05-03 00:08:37 -07002748static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002749{
2750 struct dmar_drhd_unit *drhd;
2751 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002752 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002753 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002754 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755
2756 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757 * for each drhd
2758 * allocate root
2759 * initialize and program root entry to not present
2760 * endfor
2761 */
2762 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002763 /*
2764 * lock not needed as this is only incremented in the single
2765 * threaded kernel __init code path all other access are read
2766 * only
2767 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002768 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002769 g_num_of_iommus++;
2770 continue;
2771 }
2772 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
Jiang Liu78d8e702014-11-09 22:47:57 +08002773 DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002774 }
2775
Jiang Liuffebeb42014-11-09 22:48:02 +08002776 /* Preallocate enough resources for IOMMU hot-addition */
2777 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2778 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2779
Weidong Hand9630fe2008-12-08 11:06:32 +08002780 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2781 GFP_KERNEL);
2782 if (!g_iommus) {
2783 printk(KERN_ERR "Allocating global iommu array failed\n");
2784 ret = -ENOMEM;
2785 goto error;
2786 }
2787
mark gross80b20dd2008-04-18 13:53:58 -07002788 deferred_flush = kzalloc(g_num_of_iommus *
2789 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2790 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002791 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002792 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002793 }
2794
Jiang Liu7c919772014-01-06 14:18:18 +08002795 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002796 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002797
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002798 ret = iommu_init_domains(iommu);
2799 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002800 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002801
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002802 /*
2803 * TBD:
2804 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002805 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002806 */
2807 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002808 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002809 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002810 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002811 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002812 }
2813
Jiang Liuffebeb42014-11-09 22:48:02 +08002814 for_each_active_iommu(iommu, drhd)
2815 intel_iommu_init_qi(iommu);
Youquan Songa77b67d2008-10-16 16:31:56 -07002816
David Woodhouse19943b02009-08-04 16:19:20 +01002817 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002818 iommu_identity_mapping |= IDENTMAP_ALL;
2819
Suresh Siddhad3f13812011-08-23 17:05:25 -07002820#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002821 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002822#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002823
2824 check_tylersburg_isoch();
2825
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002826 /*
2827 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002828 * identity mappings for rmrr, gfx, and isa and may fall back to static
2829 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002830 */
David Woodhouse19943b02009-08-04 16:19:20 +01002831 if (iommu_identity_mapping) {
2832 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2833 if (ret) {
2834 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002835 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002836 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002837 }
David Woodhouse19943b02009-08-04 16:19:20 +01002838 /*
2839 * For each rmrr
2840 * for each dev attached to rmrr
2841 * do
2842 * locate drhd for dev, alloc domain for dev
2843 * allocate free domain
2844 * allocate page table entries for rmrr
2845 * if context not allocated for bus
2846 * allocate and init context
2847 * set present in root table for this bus
2848 * init context with domain, translation etc
2849 * endfor
2850 * endfor
2851 */
2852 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2853 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002854 /* some BIOS lists non-exist devices in DMAR table. */
2855 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002856 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002857 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002858 if (ret)
2859 printk(KERN_ERR
2860 "IOMMU: mapping reserved region failed\n");
2861 }
2862 }
2863
2864 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002865
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002866 /*
2867 * for each drhd
2868 * enable fault log
2869 * global invalidate context cache
2870 * global invalidate iotlb
2871 * enable translation
2872 */
Jiang Liu7c919772014-01-06 14:18:18 +08002873 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002874 if (drhd->ignored) {
2875 /*
2876 * we always have to disable PMRs or DMA may fail on
2877 * this device
2878 */
2879 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002880 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002881 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002882 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002883
2884 iommu_flush_write_buffer(iommu);
2885
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002886 ret = dmar_set_interrupt(iommu);
2887 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002888 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002889
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002890 iommu_set_root_entry(iommu);
2891
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002892 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002893 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Jiang Liu2a41cce2014-07-11 14:19:33 +08002894 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002895 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002896 }
2897
2898 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002899
2900free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002901 for_each_active_iommu(iommu, drhd) {
2902 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002903 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002904 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002905 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002906free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002907 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002908error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002909 return ret;
2910}
2911
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002912/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002913static struct iova *intel_alloc_iova(struct device *dev,
2914 struct dmar_domain *domain,
2915 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002917 struct iova *iova = NULL;
2918
David Woodhouse875764d2009-06-28 21:20:51 +01002919 /* Restrict dma_mask to the width that the iommu can handle */
2920 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2921
2922 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002923 /*
2924 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002925 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002926 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002927 */
David Woodhouse875764d2009-06-28 21:20:51 +01002928 iova = alloc_iova(&domain->iovad, nrpages,
2929 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2930 if (iova)
2931 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002932 }
David Woodhouse875764d2009-06-28 21:20:51 +01002933 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2934 if (unlikely(!iova)) {
2935 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002936 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002937 return NULL;
2938 }
2939
2940 return iova;
2941}
2942
David Woodhoused4b709f2014-03-09 16:07:40 -07002943static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944{
2945 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002946 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002947
David Woodhoused4b709f2014-03-09 16:07:40 -07002948 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002949 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002950 printk(KERN_ERR "Allocating domain for %s failed",
2951 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002952 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002953 }
2954
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002955 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002956 if (unlikely(!domain_context_mapped(dev))) {
2957 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002958 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002959 printk(KERN_ERR "Domain context map for %s failed",
2960 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002961 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002962 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002963 }
2964
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002965 return domain;
2966}
2967
David Woodhoused4b709f2014-03-09 16:07:40 -07002968static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002969{
2970 struct device_domain_info *info;
2971
2972 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002973 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002974 if (likely(info))
2975 return info->domain;
2976
2977 return __get_valid_domain_for_dev(dev);
2978}
2979
David Woodhouseecb509e2014-03-09 16:29:55 -07002980/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002981static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002982{
2983 int found;
2984
David Woodhouse3d891942014-03-06 15:59:26 +00002985 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002986 return 1;
2987
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002988 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002989 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002990
David Woodhouse9b226622014-03-09 14:03:28 -07002991 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002992 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002993 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002994 return 1;
2995 else {
2996 /*
2997 * 32 bit DMA is removed from si_domain and fall back
2998 * to non-identity mapping.
2999 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003000 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003001 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003002 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003003 return 0;
3004 }
3005 } else {
3006 /*
3007 * In case of a detached 64 bit DMA device from vm, the device
3008 * is put into si_domain for identity mapping.
3009 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003010 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003011 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003012 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003013 hw_pass_through ?
3014 CONTEXT_TT_PASS_THROUGH :
3015 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003016 if (!ret) {
3017 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07003018 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003019 return 1;
3020 }
3021 }
3022 }
3023
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003024 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003025}
3026
David Woodhouse5040a912014-03-09 16:14:00 -07003027static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003028 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003029{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003030 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003031 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003032 struct iova *iova;
3033 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003034 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003035 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003036 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003037
3038 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003039
David Woodhouse5040a912014-03-09 16:14:00 -07003040 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003041 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003042
David Woodhouse5040a912014-03-09 16:14:00 -07003043 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003044 if (!domain)
3045 return 0;
3046
Weidong Han8c11e792008-12-08 15:29:22 +08003047 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003048 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049
David Woodhouse5040a912014-03-09 16:14:00 -07003050 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003051 if (!iova)
3052 goto error;
3053
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003054 /*
3055 * Check if DMAR supports zero-length reads on write only
3056 * mappings..
3057 */
3058 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003059 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003060 prot |= DMA_PTE_READ;
3061 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3062 prot |= DMA_PTE_WRITE;
3063 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003064 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003065 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003066 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003067 * is not a big problem
3068 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003069 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003070 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003071 if (ret)
3072 goto error;
3073
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003074 /* it's a non-present to present mapping. Only flush if caching mode */
3075 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003076 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003077 else
Weidong Han8c11e792008-12-08 15:29:22 +08003078 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003079
David Woodhouse03d6a242009-06-28 15:33:46 +01003080 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3081 start_paddr += paddr & ~PAGE_MASK;
3082 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003083
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003084error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003085 if (iova)
3086 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003087 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003088 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003089 return 0;
3090}
3091
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003092static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3093 unsigned long offset, size_t size,
3094 enum dma_data_direction dir,
3095 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003096{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003097 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003098 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003099}
3100
mark gross5e0d2a62008-03-04 15:22:08 -08003101static void flush_unmaps(void)
3102{
mark gross80b20dd2008-04-18 13:53:58 -07003103 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003104
mark gross5e0d2a62008-03-04 15:22:08 -08003105 timer_on = 0;
3106
3107 /* just flush them all */
3108 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003109 struct intel_iommu *iommu = g_iommus[i];
3110 if (!iommu)
3111 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003112
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003113 if (!deferred_flush[i].next)
3114 continue;
3115
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003116 /* In caching mode, global flushes turn emulation expensive */
3117 if (!cap_caching_mode(iommu->cap))
3118 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003119 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003120 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003121 unsigned long mask;
3122 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003123 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003124
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003125 /* On real hardware multiple invalidations are expensive */
3126 if (cap_caching_mode(iommu->cap))
3127 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003128 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003129 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003130 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003131 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003132 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3133 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3134 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003135 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003136 if (deferred_flush[i].freelist[j])
3137 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003138 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003139 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003140 }
3141
mark gross5e0d2a62008-03-04 15:22:08 -08003142 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003143}
3144
3145static void flush_unmaps_timeout(unsigned long data)
3146{
mark gross80b20dd2008-04-18 13:53:58 -07003147 unsigned long flags;
3148
3149 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003150 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003151 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003152}
3153
David Woodhouseea8ea462014-03-05 17:09:32 +00003154static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003155{
3156 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003157 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003158 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003159
3160 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003161 if (list_size == HIGH_WATER_MARK)
3162 flush_unmaps();
3163
Weidong Han8c11e792008-12-08 15:29:22 +08003164 iommu = domain_get_iommu(dom);
3165 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003166
mark gross80b20dd2008-04-18 13:53:58 -07003167 next = deferred_flush[iommu_id].next;
3168 deferred_flush[iommu_id].domain[next] = dom;
3169 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003170 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003171 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003172
3173 if (!timer_on) {
3174 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3175 timer_on = 1;
3176 }
3177 list_size++;
3178 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3179}
3180
Jiang Liud41a4ad2014-07-11 14:19:34 +08003181static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003182{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003183 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003184 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003185 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003186 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003187 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188
David Woodhouse73676832009-07-04 14:08:36 +01003189 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003191
David Woodhouse1525a292014-03-06 16:19:30 +00003192 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003193 BUG_ON(!domain);
3194
Weidong Han8c11e792008-12-08 15:29:22 +08003195 iommu = domain_get_iommu(domain);
3196
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003197 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003198 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3199 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003200 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003201
David Woodhoused794dc92009-06-28 00:27:49 +01003202 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3203 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003204
David Woodhoused794dc92009-06-28 00:27:49 +01003205 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003206 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003207
David Woodhouseea8ea462014-03-05 17:09:32 +00003208 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003209
mark gross5e0d2a62008-03-04 15:22:08 -08003210 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003211 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003212 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003213 /* free iova */
3214 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003215 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003216 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003217 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003218 /*
3219 * queue up the release of the unmap to save the 1/6th of the
3220 * cpu used up by the iotlb flush operation...
3221 */
mark gross5e0d2a62008-03-04 15:22:08 -08003222 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003223}
3224
Jiang Liud41a4ad2014-07-11 14:19:34 +08003225static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3226 size_t size, enum dma_data_direction dir,
3227 struct dma_attrs *attrs)
3228{
3229 intel_unmap(dev, dev_addr);
3230}
3231
David Woodhouse5040a912014-03-09 16:14:00 -07003232static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003233 dma_addr_t *dma_handle, gfp_t flags,
3234 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235{
Akinobu Mita36746432014-06-04 16:06:51 -07003236 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003237 int order;
3238
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003239 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003240 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003241
David Woodhouse5040a912014-03-09 16:14:00 -07003242 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003243 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003244 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3245 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003246 flags |= GFP_DMA;
3247 else
3248 flags |= GFP_DMA32;
3249 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250
Akinobu Mita36746432014-06-04 16:06:51 -07003251 if (flags & __GFP_WAIT) {
3252 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003253
Akinobu Mita36746432014-06-04 16:06:51 -07003254 page = dma_alloc_from_contiguous(dev, count, order);
3255 if (page && iommu_no_mapping(dev) &&
3256 page_to_phys(page) + size > dev->coherent_dma_mask) {
3257 dma_release_from_contiguous(dev, page, count);
3258 page = NULL;
3259 }
3260 }
3261
3262 if (!page)
3263 page = alloc_pages(flags, order);
3264 if (!page)
3265 return NULL;
3266 memset(page_address(page), 0, size);
3267
3268 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003269 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003270 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003271 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003272 return page_address(page);
3273 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3274 __free_pages(page, order);
3275
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003276 return NULL;
3277}
3278
David Woodhouse5040a912014-03-09 16:14:00 -07003279static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003280 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003281{
3282 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003283 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003285 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003286 order = get_order(size);
3287
Jiang Liud41a4ad2014-07-11 14:19:34 +08003288 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003289 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3290 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003291}
3292
David Woodhouse5040a912014-03-09 16:14:00 -07003293static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003294 int nelems, enum dma_data_direction dir,
3295 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003296{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003297 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298}
3299
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003301 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302{
3303 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003304 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003305
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003306 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003307 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003308 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003309 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003310 }
3311 return nelems;
3312}
3313
David Woodhouse5040a912014-03-09 16:14:00 -07003314static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003315 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003317 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003318 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003319 size_t size = 0;
3320 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003321 struct iova *iova = NULL;
3322 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003323 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003324 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003325 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003326
3327 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003328 if (iommu_no_mapping(dev))
3329 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003330
David Woodhouse5040a912014-03-09 16:14:00 -07003331 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003332 if (!domain)
3333 return 0;
3334
Weidong Han8c11e792008-12-08 15:29:22 +08003335 iommu = domain_get_iommu(domain);
3336
David Woodhouseb536d242009-06-28 14:49:31 +01003337 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003338 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003339
David Woodhouse5040a912014-03-09 16:14:00 -07003340 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3341 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003342 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003343 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003344 return 0;
3345 }
3346
3347 /*
3348 * Check if DMAR supports zero-length reads on write only
3349 * mappings..
3350 */
3351 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003352 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003353 prot |= DMA_PTE_READ;
3354 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3355 prot |= DMA_PTE_WRITE;
3356
David Woodhouseb536d242009-06-28 14:49:31 +01003357 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003358
Fenghua Yuf5329592009-08-04 15:09:37 -07003359 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003360 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003361 dma_pte_free_pagetable(domain, start_vpfn,
3362 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003363 __free_iova(&domain->iovad, iova);
3364 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003365 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003366
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003367 /* it's a non-present to present mapping. Only flush if caching mode */
3368 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003369 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003370 else
Weidong Han8c11e792008-12-08 15:29:22 +08003371 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003372
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003373 return nelems;
3374}
3375
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003376static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3377{
3378 return !dma_addr;
3379}
3380
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003381struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003382 .alloc = intel_alloc_coherent,
3383 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003384 .map_sg = intel_map_sg,
3385 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003386 .map_page = intel_map_page,
3387 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003388 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003389};
3390
3391static inline int iommu_domain_cache_init(void)
3392{
3393 int ret = 0;
3394
3395 iommu_domain_cache = kmem_cache_create("iommu_domain",
3396 sizeof(struct dmar_domain),
3397 0,
3398 SLAB_HWCACHE_ALIGN,
3399
3400 NULL);
3401 if (!iommu_domain_cache) {
3402 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3403 ret = -ENOMEM;
3404 }
3405
3406 return ret;
3407}
3408
3409static inline int iommu_devinfo_cache_init(void)
3410{
3411 int ret = 0;
3412
3413 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3414 sizeof(struct device_domain_info),
3415 0,
3416 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003417 NULL);
3418 if (!iommu_devinfo_cache) {
3419 printk(KERN_ERR "Couldn't create devinfo cache\n");
3420 ret = -ENOMEM;
3421 }
3422
3423 return ret;
3424}
3425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003426static int __init iommu_init_mempool(void)
3427{
3428 int ret;
3429 ret = iommu_iova_cache_init();
3430 if (ret)
3431 return ret;
3432
3433 ret = iommu_domain_cache_init();
3434 if (ret)
3435 goto domain_error;
3436
3437 ret = iommu_devinfo_cache_init();
3438 if (!ret)
3439 return ret;
3440
3441 kmem_cache_destroy(iommu_domain_cache);
3442domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003443 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003444
3445 return -ENOMEM;
3446}
3447
3448static void __init iommu_exit_mempool(void)
3449{
3450 kmem_cache_destroy(iommu_devinfo_cache);
3451 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003452 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003453}
3454
Dan Williams556ab452010-07-23 15:47:56 -07003455static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3456{
3457 struct dmar_drhd_unit *drhd;
3458 u32 vtbar;
3459 int rc;
3460
3461 /* We know that this device on this chipset has its own IOMMU.
3462 * If we find it under a different IOMMU, then the BIOS is lying
3463 * to us. Hope that the IOMMU for this device is actually
3464 * disabled, and it needs no translation...
3465 */
3466 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3467 if (rc) {
3468 /* "can't" happen */
3469 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3470 return;
3471 }
3472 vtbar &= 0xffff0000;
3473
3474 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3475 drhd = dmar_find_matched_drhd_unit(pdev);
3476 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3477 TAINT_FIRMWARE_WORKAROUND,
3478 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3479 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3480}
3481DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3482
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003483static void __init init_no_remapping_devices(void)
3484{
3485 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003486 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003487 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003488
3489 for_each_drhd_unit(drhd) {
3490 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003491 for_each_active_dev_scope(drhd->devices,
3492 drhd->devices_cnt, i, dev)
3493 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003494 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495 if (i == drhd->devices_cnt)
3496 drhd->ignored = 1;
3497 }
3498 }
3499
Jiang Liu7c919772014-01-06 14:18:18 +08003500 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003501 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003502 continue;
3503
Jiang Liub683b232014-02-19 14:07:32 +08003504 for_each_active_dev_scope(drhd->devices,
3505 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003506 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003507 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003508 if (i < drhd->devices_cnt)
3509 continue;
3510
David Woodhousec0771df2011-10-14 20:59:46 +01003511 /* This IOMMU has *only* gfx devices. Either bypass it or
3512 set the gfx_mapped flag, as appropriate */
3513 if (dmar_map_gfx) {
3514 intel_iommu_gfx_mapped = 1;
3515 } else {
3516 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003517 for_each_active_dev_scope(drhd->devices,
3518 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003519 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003520 }
3521 }
3522}
3523
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003524#ifdef CONFIG_SUSPEND
3525static int init_iommu_hw(void)
3526{
3527 struct dmar_drhd_unit *drhd;
3528 struct intel_iommu *iommu = NULL;
3529
3530 for_each_active_iommu(iommu, drhd)
3531 if (iommu->qi)
3532 dmar_reenable_qi(iommu);
3533
Joseph Cihulab7792602011-05-03 00:08:37 -07003534 for_each_iommu(iommu, drhd) {
3535 if (drhd->ignored) {
3536 /*
3537 * we always have to disable PMRs or DMA may fail on
3538 * this device
3539 */
3540 if (force_on)
3541 iommu_disable_protect_mem_regions(iommu);
3542 continue;
3543 }
3544
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003545 iommu_flush_write_buffer(iommu);
3546
3547 iommu_set_root_entry(iommu);
3548
3549 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003550 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003551 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3552 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003553 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003554 }
3555
3556 return 0;
3557}
3558
3559static void iommu_flush_all(void)
3560{
3561 struct dmar_drhd_unit *drhd;
3562 struct intel_iommu *iommu;
3563
3564 for_each_active_iommu(iommu, drhd) {
3565 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003566 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003567 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003568 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003569 }
3570}
3571
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003572static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003573{
3574 struct dmar_drhd_unit *drhd;
3575 struct intel_iommu *iommu = NULL;
3576 unsigned long flag;
3577
3578 for_each_active_iommu(iommu, drhd) {
3579 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3580 GFP_ATOMIC);
3581 if (!iommu->iommu_state)
3582 goto nomem;
3583 }
3584
3585 iommu_flush_all();
3586
3587 for_each_active_iommu(iommu, drhd) {
3588 iommu_disable_translation(iommu);
3589
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003590 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003591
3592 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3593 readl(iommu->reg + DMAR_FECTL_REG);
3594 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3595 readl(iommu->reg + DMAR_FEDATA_REG);
3596 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3597 readl(iommu->reg + DMAR_FEADDR_REG);
3598 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3599 readl(iommu->reg + DMAR_FEUADDR_REG);
3600
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003601 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003602 }
3603 return 0;
3604
3605nomem:
3606 for_each_active_iommu(iommu, drhd)
3607 kfree(iommu->iommu_state);
3608
3609 return -ENOMEM;
3610}
3611
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003612static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003613{
3614 struct dmar_drhd_unit *drhd;
3615 struct intel_iommu *iommu = NULL;
3616 unsigned long flag;
3617
3618 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003619 if (force_on)
3620 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3621 else
3622 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003623 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003624 }
3625
3626 for_each_active_iommu(iommu, drhd) {
3627
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003628 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003629
3630 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3631 iommu->reg + DMAR_FECTL_REG);
3632 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3633 iommu->reg + DMAR_FEDATA_REG);
3634 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3635 iommu->reg + DMAR_FEADDR_REG);
3636 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3637 iommu->reg + DMAR_FEUADDR_REG);
3638
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003639 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003640 }
3641
3642 for_each_active_iommu(iommu, drhd)
3643 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003644}
3645
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003646static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003647 .resume = iommu_resume,
3648 .suspend = iommu_suspend,
3649};
3650
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003651static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003652{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003653 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003654}
3655
3656#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003657static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003658#endif /* CONFIG_PM */
3659
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003660
Jiang Liuc2a0b532014-11-09 22:47:56 +08003661int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003662{
3663 struct acpi_dmar_reserved_memory *rmrr;
3664 struct dmar_rmrr_unit *rmrru;
3665
3666 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3667 if (!rmrru)
3668 return -ENOMEM;
3669
3670 rmrru->hdr = header;
3671 rmrr = (struct acpi_dmar_reserved_memory *)header;
3672 rmrru->base_address = rmrr->base_address;
3673 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003674 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3675 ((void *)rmrr) + rmrr->header.length,
3676 &rmrru->devices_cnt);
3677 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3678 kfree(rmrru);
3679 return -ENOMEM;
3680 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003681
Jiang Liu2e455282014-02-19 14:07:36 +08003682 list_add(&rmrru->list, &dmar_rmrr_units);
3683
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003684 return 0;
3685}
3686
Jiang Liu6b197242014-11-09 22:47:58 +08003687static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3688{
3689 struct dmar_atsr_unit *atsru;
3690 struct acpi_dmar_atsr *tmp;
3691
3692 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3693 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3694 if (atsr->segment != tmp->segment)
3695 continue;
3696 if (atsr->header.length != tmp->header.length)
3697 continue;
3698 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3699 return atsru;
3700 }
3701
3702 return NULL;
3703}
3704
3705int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003706{
3707 struct acpi_dmar_atsr *atsr;
3708 struct dmar_atsr_unit *atsru;
3709
Jiang Liu6b197242014-11-09 22:47:58 +08003710 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3711 return 0;
3712
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003713 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003714 atsru = dmar_find_atsr(atsr);
3715 if (atsru)
3716 return 0;
3717
3718 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003719 if (!atsru)
3720 return -ENOMEM;
3721
Jiang Liu6b197242014-11-09 22:47:58 +08003722 /*
3723 * If memory is allocated from slab by ACPI _DSM method, we need to
3724 * copy the memory content because the memory buffer will be freed
3725 * on return.
3726 */
3727 atsru->hdr = (void *)(atsru + 1);
3728 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003729 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003730 if (!atsru->include_all) {
3731 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3732 (void *)atsr + atsr->header.length,
3733 &atsru->devices_cnt);
3734 if (atsru->devices_cnt && atsru->devices == NULL) {
3735 kfree(atsru);
3736 return -ENOMEM;
3737 }
3738 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003739
Jiang Liu0e242612014-02-19 14:07:34 +08003740 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003741
3742 return 0;
3743}
3744
Jiang Liu9bdc5312014-01-06 14:18:27 +08003745static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3746{
3747 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3748 kfree(atsru);
3749}
3750
Jiang Liu6b197242014-11-09 22:47:58 +08003751int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3752{
3753 struct acpi_dmar_atsr *atsr;
3754 struct dmar_atsr_unit *atsru;
3755
3756 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3757 atsru = dmar_find_atsr(atsr);
3758 if (atsru) {
3759 list_del_rcu(&atsru->list);
3760 synchronize_rcu();
3761 intel_iommu_free_atsr(atsru);
3762 }
3763
3764 return 0;
3765}
3766
3767int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3768{
3769 int i;
3770 struct device *dev;
3771 struct acpi_dmar_atsr *atsr;
3772 struct dmar_atsr_unit *atsru;
3773
3774 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3775 atsru = dmar_find_atsr(atsr);
3776 if (!atsru)
3777 return 0;
3778
3779 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3780 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3781 i, dev)
3782 return -EBUSY;
3783
3784 return 0;
3785}
3786
Jiang Liuffebeb42014-11-09 22:48:02 +08003787static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3788{
3789 int sp, ret = 0;
3790 struct intel_iommu *iommu = dmaru->iommu;
3791
3792 if (g_iommus[iommu->seq_id])
3793 return 0;
3794
3795 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3796 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3797 iommu->name);
3798 return -ENXIO;
3799 }
3800 if (!ecap_sc_support(iommu->ecap) &&
3801 domain_update_iommu_snooping(iommu)) {
3802 pr_warn("IOMMU: %s doesn't support snooping.\n",
3803 iommu->name);
3804 return -ENXIO;
3805 }
3806 sp = domain_update_iommu_superpage(iommu) - 1;
3807 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3808 pr_warn("IOMMU: %s doesn't support large page.\n",
3809 iommu->name);
3810 return -ENXIO;
3811 }
3812
3813 /*
3814 * Disable translation if already enabled prior to OS handover.
3815 */
3816 if (iommu->gcmd & DMA_GCMD_TE)
3817 iommu_disable_translation(iommu);
3818
3819 g_iommus[iommu->seq_id] = iommu;
3820 ret = iommu_init_domains(iommu);
3821 if (ret == 0)
3822 ret = iommu_alloc_root_entry(iommu);
3823 if (ret)
3824 goto out;
3825
3826 if (dmaru->ignored) {
3827 /*
3828 * we always have to disable PMRs or DMA may fail on this device
3829 */
3830 if (force_on)
3831 iommu_disable_protect_mem_regions(iommu);
3832 return 0;
3833 }
3834
3835 intel_iommu_init_qi(iommu);
3836 iommu_flush_write_buffer(iommu);
3837 ret = dmar_set_interrupt(iommu);
3838 if (ret)
3839 goto disable_iommu;
3840
3841 iommu_set_root_entry(iommu);
3842 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3843 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3844 iommu_enable_translation(iommu);
3845
3846 if (si_domain) {
3847 ret = iommu_attach_domain(si_domain, iommu);
3848 if (ret < 0 || si_domain->id != ret)
3849 goto disable_iommu;
3850 domain_attach_iommu(si_domain, iommu);
3851 }
3852
3853 iommu_disable_protect_mem_regions(iommu);
3854 return 0;
3855
3856disable_iommu:
3857 disable_dmar_iommu(iommu);
3858out:
3859 free_dmar_iommu(iommu);
3860 return ret;
3861}
3862
Jiang Liu6b197242014-11-09 22:47:58 +08003863int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3864{
Jiang Liuffebeb42014-11-09 22:48:02 +08003865 int ret = 0;
3866 struct intel_iommu *iommu = dmaru->iommu;
3867
3868 if (!intel_iommu_enabled)
3869 return 0;
3870 if (iommu == NULL)
3871 return -EINVAL;
3872
3873 if (insert) {
3874 ret = intel_iommu_add(dmaru);
3875 } else {
3876 disable_dmar_iommu(iommu);
3877 free_dmar_iommu(iommu);
3878 }
3879
3880 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003881}
3882
Jiang Liu9bdc5312014-01-06 14:18:27 +08003883static void intel_iommu_free_dmars(void)
3884{
3885 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3886 struct dmar_atsr_unit *atsru, *atsr_n;
3887
3888 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3889 list_del(&rmrru->list);
3890 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3891 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003892 }
3893
Jiang Liu9bdc5312014-01-06 14:18:27 +08003894 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3895 list_del(&atsru->list);
3896 intel_iommu_free_atsr(atsru);
3897 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003898}
3899
3900int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3901{
Jiang Liub683b232014-02-19 14:07:32 +08003902 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003903 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003904 struct pci_dev *bridge = NULL;
3905 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003906 struct acpi_dmar_atsr *atsr;
3907 struct dmar_atsr_unit *atsru;
3908
3909 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003910 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003911 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003912 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003913 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003914 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003915 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003916 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003917 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003918 if (!bridge)
3919 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003920
Jiang Liu0e242612014-02-19 14:07:34 +08003921 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003922 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3923 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3924 if (atsr->segment != pci_domain_nr(dev->bus))
3925 continue;
3926
Jiang Liub683b232014-02-19 14:07:32 +08003927 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003928 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003929 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003930
3931 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003932 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003933 }
Jiang Liub683b232014-02-19 14:07:32 +08003934 ret = 0;
3935out:
Jiang Liu0e242612014-02-19 14:07:34 +08003936 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003937
Jiang Liub683b232014-02-19 14:07:32 +08003938 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003939}
3940
Jiang Liu59ce0512014-02-19 14:07:35 +08003941int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3942{
3943 int ret = 0;
3944 struct dmar_rmrr_unit *rmrru;
3945 struct dmar_atsr_unit *atsru;
3946 struct acpi_dmar_atsr *atsr;
3947 struct acpi_dmar_reserved_memory *rmrr;
3948
3949 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3950 return 0;
3951
3952 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3953 rmrr = container_of(rmrru->hdr,
3954 struct acpi_dmar_reserved_memory, header);
3955 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3956 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3957 ((void *)rmrr) + rmrr->header.length,
3958 rmrr->segment, rmrru->devices,
3959 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003960 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003961 return ret;
3962 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003963 dmar_remove_dev_scope(info, rmrr->segment,
3964 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003965 }
3966 }
3967
3968 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3969 if (atsru->include_all)
3970 continue;
3971
3972 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3973 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3974 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3975 (void *)atsr + atsr->header.length,
3976 atsr->segment, atsru->devices,
3977 atsru->devices_cnt);
3978 if (ret > 0)
3979 break;
3980 else if(ret < 0)
3981 return ret;
3982 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3983 if (dmar_remove_dev_scope(info, atsr->segment,
3984 atsru->devices, atsru->devices_cnt))
3985 break;
3986 }
3987 }
3988
3989 return 0;
3990}
3991
Fenghua Yu99dcade2009-11-11 07:23:06 -08003992/*
3993 * Here we only respond to action of unbound device from driver.
3994 *
3995 * Added device is not attached to its DMAR domain here yet. That will happen
3996 * when mapping the device to iova.
3997 */
3998static int device_notifier(struct notifier_block *nb,
3999 unsigned long action, void *data)
4000{
4001 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004002 struct dmar_domain *domain;
4003
David Woodhouse3d891942014-03-06 15:59:26 +00004004 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004005 return 0;
4006
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004007 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004008 return 0;
4009
David Woodhouse1525a292014-03-06 16:19:30 +00004010 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004011 if (!domain)
4012 return 0;
4013
Jiang Liu3a5670e2014-02-19 14:07:33 +08004014 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004015 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004016 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004017 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004018 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004019
Fenghua Yu99dcade2009-11-11 07:23:06 -08004020 return 0;
4021}
4022
4023static struct notifier_block device_nb = {
4024 .notifier_call = device_notifier,
4025};
4026
Jiang Liu75f05562014-02-19 14:07:37 +08004027static int intel_iommu_memory_notifier(struct notifier_block *nb,
4028 unsigned long val, void *v)
4029{
4030 struct memory_notify *mhp = v;
4031 unsigned long long start, end;
4032 unsigned long start_vpfn, last_vpfn;
4033
4034 switch (val) {
4035 case MEM_GOING_ONLINE:
4036 start = mhp->start_pfn << PAGE_SHIFT;
4037 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4038 if (iommu_domain_identity_map(si_domain, start, end)) {
4039 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4040 start, end);
4041 return NOTIFY_BAD;
4042 }
4043 break;
4044
4045 case MEM_OFFLINE:
4046 case MEM_CANCEL_ONLINE:
4047 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4048 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4049 while (start_vpfn <= last_vpfn) {
4050 struct iova *iova;
4051 struct dmar_drhd_unit *drhd;
4052 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004053 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004054
4055 iova = find_iova(&si_domain->iovad, start_vpfn);
4056 if (iova == NULL) {
4057 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4058 start_vpfn);
4059 break;
4060 }
4061
4062 iova = split_and_remove_iova(&si_domain->iovad, iova,
4063 start_vpfn, last_vpfn);
4064 if (iova == NULL) {
4065 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4066 start_vpfn, last_vpfn);
4067 return NOTIFY_BAD;
4068 }
4069
David Woodhouseea8ea462014-03-05 17:09:32 +00004070 freelist = domain_unmap(si_domain, iova->pfn_lo,
4071 iova->pfn_hi);
4072
Jiang Liu75f05562014-02-19 14:07:37 +08004073 rcu_read_lock();
4074 for_each_active_iommu(iommu, drhd)
4075 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004076 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004077 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004078 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004079 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004080
4081 start_vpfn = iova->pfn_hi + 1;
4082 free_iova_mem(iova);
4083 }
4084 break;
4085 }
4086
4087 return NOTIFY_OK;
4088}
4089
4090static struct notifier_block intel_iommu_memory_nb = {
4091 .notifier_call = intel_iommu_memory_notifier,
4092 .priority = 0
4093};
4094
Alex Williamsona5459cf2014-06-12 16:12:31 -06004095
4096static ssize_t intel_iommu_show_version(struct device *dev,
4097 struct device_attribute *attr,
4098 char *buf)
4099{
4100 struct intel_iommu *iommu = dev_get_drvdata(dev);
4101 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4102 return sprintf(buf, "%d:%d\n",
4103 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4104}
4105static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4106
4107static ssize_t intel_iommu_show_address(struct device *dev,
4108 struct device_attribute *attr,
4109 char *buf)
4110{
4111 struct intel_iommu *iommu = dev_get_drvdata(dev);
4112 return sprintf(buf, "%llx\n", iommu->reg_phys);
4113}
4114static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4115
4116static ssize_t intel_iommu_show_cap(struct device *dev,
4117 struct device_attribute *attr,
4118 char *buf)
4119{
4120 struct intel_iommu *iommu = dev_get_drvdata(dev);
4121 return sprintf(buf, "%llx\n", iommu->cap);
4122}
4123static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4124
4125static ssize_t intel_iommu_show_ecap(struct device *dev,
4126 struct device_attribute *attr,
4127 char *buf)
4128{
4129 struct intel_iommu *iommu = dev_get_drvdata(dev);
4130 return sprintf(buf, "%llx\n", iommu->ecap);
4131}
4132static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4133
4134static struct attribute *intel_iommu_attrs[] = {
4135 &dev_attr_version.attr,
4136 &dev_attr_address.attr,
4137 &dev_attr_cap.attr,
4138 &dev_attr_ecap.attr,
4139 NULL,
4140};
4141
4142static struct attribute_group intel_iommu_group = {
4143 .name = "intel-iommu",
4144 .attrs = intel_iommu_attrs,
4145};
4146
4147const struct attribute_group *intel_iommu_groups[] = {
4148 &intel_iommu_group,
4149 NULL,
4150};
4151
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004152int __init intel_iommu_init(void)
4153{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004154 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004155 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004156 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004157
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004158 /* VT-d is required for a TXT/tboot launch, so enforce that */
4159 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004160
Jiang Liu3a5670e2014-02-19 14:07:33 +08004161 if (iommu_init_mempool()) {
4162 if (force_on)
4163 panic("tboot: Failed to initialize iommu memory\n");
4164 return -ENOMEM;
4165 }
4166
4167 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004168 if (dmar_table_init()) {
4169 if (force_on)
4170 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004171 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004172 }
4173
Takao Indoh3a93c842013-04-23 17:35:03 +09004174 /*
4175 * Disable translation if already enabled prior to OS handover.
4176 */
Jiang Liu7c919772014-01-06 14:18:18 +08004177 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004178 if (iommu->gcmd & DMA_GCMD_TE)
4179 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004180
Suresh Siddhac2c72862011-08-23 17:05:19 -07004181 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004182 if (force_on)
4183 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004184 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004185 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004186
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004187 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004188 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004189
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004190 if (list_empty(&dmar_rmrr_units))
4191 printk(KERN_INFO "DMAR: No RMRR found\n");
4192
4193 if (list_empty(&dmar_atsr_units))
4194 printk(KERN_INFO "DMAR: No ATSR found\n");
4195
Joseph Cihula51a63e62011-03-21 11:04:24 -07004196 if (dmar_init_reserved_ranges()) {
4197 if (force_on)
4198 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004199 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004200 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004201
4202 init_no_remapping_devices();
4203
Joseph Cihulab7792602011-05-03 00:08:37 -07004204 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004205 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004206 if (force_on)
4207 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004208 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004209 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004210 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004211 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004212 printk(KERN_INFO
4213 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4214
mark gross5e0d2a62008-03-04 15:22:08 -08004215 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004216#ifdef CONFIG_SWIOTLB
4217 swiotlb = 0;
4218#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004219 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004220
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004221 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004222
Alex Williamsona5459cf2014-06-12 16:12:31 -06004223 for_each_active_iommu(iommu, drhd)
4224 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4225 intel_iommu_groups,
4226 iommu->name);
4227
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004228 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004229 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004230 if (si_domain && !hw_pass_through)
4231 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004232
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004233 intel_iommu_enabled = 1;
4234
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004235 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004236
4237out_free_reserved_range:
4238 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004239out_free_dmar:
4240 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004241 up_write(&dmar_global_lock);
4242 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004243 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004244}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004245
Alex Williamson579305f2014-07-03 09:51:43 -06004246static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4247{
4248 struct intel_iommu *iommu = opaque;
4249
4250 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4251 return 0;
4252}
4253
4254/*
4255 * NB - intel-iommu lacks any sort of reference counting for the users of
4256 * dependent devices. If multiple endpoints have intersecting dependent
4257 * devices, unbinding the driver from any one of them will possibly leave
4258 * the others unable to operate.
4259 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004260static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004261 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004262{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004263 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004264 return;
4265
Alex Williamson579305f2014-07-03 09:51:43 -06004266 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004267}
4268
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004269static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004270 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004271{
Yijing Wangbca2b912013-10-31 17:26:04 +08004272 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004273 struct intel_iommu *iommu;
4274 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004275 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004276 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004277
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004278 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004279 if (!iommu)
4280 return;
4281
4282 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004283 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004284 if (info->iommu == iommu && info->bus == bus &&
4285 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004286 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004287 spin_unlock_irqrestore(&device_domain_lock, flags);
4288
Yu Zhao93a23a72009-05-18 13:51:37 +08004289 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004290 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004291 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004292 free_devinfo_mem(info);
4293
4294 spin_lock_irqsave(&device_domain_lock, flags);
4295
4296 if (found)
4297 break;
4298 else
4299 continue;
4300 }
4301
4302 /* if there is no other devices under the same iommu
4303 * owned by this domain, clear this iommu in iommu_bmp
4304 * update iommu count and coherency
4305 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004306 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004307 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004308 }
4309
Roland Dreier3e7abe22011-07-20 06:22:21 -07004310 spin_unlock_irqrestore(&device_domain_lock, flags);
4311
Weidong Hanc7151a82008-12-08 22:51:37 +08004312 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004313 domain_detach_iommu(domain, iommu);
4314 if (!domain_type_is_vm_or_si(domain))
4315 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004316 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004317}
4318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004319static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004320{
4321 int adjust_width;
4322
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004323 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4324 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004325 domain_reserve_special_ranges(domain);
4326
4327 /* calculate AGAW */
4328 domain->gaw = guest_width;
4329 adjust_width = guestwidth_to_adjustwidth(guest_width);
4330 domain->agaw = width_to_agaw(adjust_width);
4331
Weidong Han5e98c4b2008-12-08 23:03:27 +08004332 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004333 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004334 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004335 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004336
4337 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004338 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004339 if (!domain->pgd)
4340 return -ENOMEM;
4341 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4342 return 0;
4343}
4344
Joerg Roedel00a77de2015-03-26 13:43:08 +01004345static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004346{
Joerg Roedel5d450802008-12-03 14:52:32 +01004347 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004348 struct iommu_domain *domain;
4349
4350 if (type != IOMMU_DOMAIN_UNMANAGED)
4351 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004352
Jiang Liuab8dfe22014-07-11 14:19:27 +08004353 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004354 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004355 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004356 "intel_iommu_domain_init: dmar_domain == NULL\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004357 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004358 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004359 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004360 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004361 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004362 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004363 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004364 }
Allen Kay8140a952011-10-14 12:32:17 -07004365 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004366
Joerg Roedel00a77de2015-03-26 13:43:08 +01004367 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004368 domain->geometry.aperture_start = 0;
4369 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4370 domain->geometry.force_aperture = true;
4371
Joerg Roedel00a77de2015-03-26 13:43:08 +01004372 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004373}
Kay, Allen M38717942008-09-09 18:37:29 +03004374
Joerg Roedel00a77de2015-03-26 13:43:08 +01004375static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004376{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004377 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004378}
Kay, Allen M38717942008-09-09 18:37:29 +03004379
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004380static int intel_iommu_attach_device(struct iommu_domain *domain,
4381 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004382{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004383 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004384 struct intel_iommu *iommu;
4385 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004386 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004387
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004388 if (device_is_rmrr_locked(dev)) {
4389 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4390 return -EPERM;
4391 }
4392
David Woodhouse7207d8f2014-03-09 16:31:06 -07004393 /* normally dev is not mapped */
4394 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004395 struct dmar_domain *old_domain;
4396
David Woodhouse1525a292014-03-06 16:19:30 +00004397 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004398 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004399 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004400 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004401 else
4402 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004403
4404 if (!domain_type_is_vm_or_si(old_domain) &&
4405 list_empty(&old_domain->devices))
4406 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004407 }
4408 }
4409
David Woodhouse156baca2014-03-09 14:00:57 -07004410 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004411 if (!iommu)
4412 return -ENODEV;
4413
4414 /* check if this iommu agaw is sufficient for max mapped address */
4415 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004416 if (addr_width > cap_mgaw(iommu->cap))
4417 addr_width = cap_mgaw(iommu->cap);
4418
4419 if (dmar_domain->max_addr > (1LL << addr_width)) {
4420 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004421 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004422 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004423 return -EFAULT;
4424 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004425 dmar_domain->gaw = addr_width;
4426
4427 /*
4428 * Knock out extra levels of page tables if necessary
4429 */
4430 while (iommu->agaw < dmar_domain->agaw) {
4431 struct dma_pte *pte;
4432
4433 pte = dmar_domain->pgd;
4434 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004435 dmar_domain->pgd = (struct dma_pte *)
4436 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004437 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004438 }
4439 dmar_domain->agaw--;
4440 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004441
David Woodhouse5913c9b2014-03-09 16:27:31 -07004442 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004443}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004444
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004445static void intel_iommu_detach_device(struct iommu_domain *domain,
4446 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004447{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004448 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004449}
Kay, Allen M38717942008-09-09 18:37:29 +03004450
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004451static int intel_iommu_map(struct iommu_domain *domain,
4452 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004453 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004454{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004455 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004456 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004457 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004458 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004459
Joerg Roedeldde57a22008-12-03 15:04:09 +01004460 if (iommu_prot & IOMMU_READ)
4461 prot |= DMA_PTE_READ;
4462 if (iommu_prot & IOMMU_WRITE)
4463 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004464 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4465 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004466
David Woodhouse163cc522009-06-28 00:51:17 +01004467 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004468 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004469 u64 end;
4470
4471 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004472 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004473 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004474 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004475 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004476 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004477 return -EFAULT;
4478 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004479 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004480 }
David Woodhousead051222009-06-28 14:22:28 +01004481 /* Round up size to next multiple of PAGE_SIZE, if it and
4482 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004483 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004484 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4485 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004486 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004487}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004488
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004489static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004490 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004491{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004492 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004493 struct page *freelist = NULL;
4494 struct intel_iommu *iommu;
4495 unsigned long start_pfn, last_pfn;
4496 unsigned int npages;
4497 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004498
David Woodhouse5cf0a762014-03-19 16:07:49 +00004499 /* Cope with horrid API which requires us to unmap more than the
4500 size argument if it happens to be a large-page mapping. */
4501 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4502 BUG();
4503
4504 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4505 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4506
David Woodhouseea8ea462014-03-05 17:09:32 +00004507 start_pfn = iova >> VTD_PAGE_SHIFT;
4508 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4509
4510 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4511
4512 npages = last_pfn - start_pfn + 1;
4513
4514 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4515 iommu = g_iommus[iommu_id];
4516
4517 /*
4518 * find bit position of dmar_domain
4519 */
4520 ndomains = cap_ndoms(iommu->cap);
4521 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4522 if (iommu->domains[num] == dmar_domain)
4523 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4524 npages, !freelist, 0);
4525 }
4526
4527 }
4528
4529 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004530
David Woodhouse163cc522009-06-28 00:51:17 +01004531 if (dmar_domain->max_addr == iova + size)
4532 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004533
David Woodhouse5cf0a762014-03-19 16:07:49 +00004534 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004535}
Kay, Allen M38717942008-09-09 18:37:29 +03004536
Joerg Roedeld14d6572008-12-03 15:06:57 +01004537static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304538 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004539{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004540 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004541 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004542 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004543 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004544
David Woodhouse5cf0a762014-03-19 16:07:49 +00004545 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004546 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004547 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004548
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004549 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004550}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004551
Joerg Roedel5d587b82014-09-05 10:50:45 +02004552static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004553{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004554 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004555 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004556 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004557 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004558
Joerg Roedel5d587b82014-09-05 10:50:45 +02004559 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004560}
4561
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004562static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004563{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004564 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004565 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004566 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004567
Alex Williamsona5459cf2014-06-12 16:12:31 -06004568 iommu = device_to_iommu(dev, &bus, &devfn);
4569 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004570 return -ENODEV;
4571
Alex Williamsona5459cf2014-06-12 16:12:31 -06004572 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004573
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004574 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004575
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004576 if (IS_ERR(group))
4577 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004578
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004579 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004580 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004581}
4582
4583static void intel_iommu_remove_device(struct device *dev)
4584{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004585 struct intel_iommu *iommu;
4586 u8 bus, devfn;
4587
4588 iommu = device_to_iommu(dev, &bus, &devfn);
4589 if (!iommu)
4590 return;
4591
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004592 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004593
4594 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004595}
4596
Thierry Redingb22f6432014-06-27 09:03:12 +02004597static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004598 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004599 .domain_alloc = intel_iommu_domain_alloc,
4600 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004601 .attach_dev = intel_iommu_attach_device,
4602 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004603 .map = intel_iommu_map,
4604 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004605 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004606 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004607 .add_device = intel_iommu_add_device,
4608 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004609 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004610};
David Woodhouse9af88142009-02-13 23:18:03 +00004611
Daniel Vetter94526182013-01-20 23:50:13 +01004612static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4613{
4614 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4615 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4616 dmar_map_gfx = 0;
4617}
4618
4619DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4622DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4623DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4625DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4626
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004627static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004628{
4629 /*
4630 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004631 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004632 */
4633 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4634 rwbf_quirk = 1;
4635}
4636
4637DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004638DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4639DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4640DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4641DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4643DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004644
Adam Jacksoneecfd572010-08-25 21:17:34 +01004645#define GGC 0x52
4646#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4647#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4648#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4649#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4650#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4651#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4652#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4653#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4654
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004655static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004656{
4657 unsigned short ggc;
4658
Adam Jacksoneecfd572010-08-25 21:17:34 +01004659 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004660 return;
4661
Adam Jacksoneecfd572010-08-25 21:17:34 +01004662 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004663 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4664 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004665 } else if (dmar_map_gfx) {
4666 /* we have to ensure the gfx device is idle before we flush */
4667 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4668 intel_iommu_strict = 1;
4669 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004670}
4671DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4673DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4674DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4675
David Woodhousee0fc7e02009-09-30 09:12:17 -07004676/* On Tylersburg chipsets, some BIOSes have been known to enable the
4677 ISOCH DMAR unit for the Azalia sound device, but not give it any
4678 TLB entries, which causes it to deadlock. Check for that. We do
4679 this in a function called from init_dmars(), instead of in a PCI
4680 quirk, because we don't want to print the obnoxious "BIOS broken"
4681 message if VT-d is actually disabled.
4682*/
4683static void __init check_tylersburg_isoch(void)
4684{
4685 struct pci_dev *pdev;
4686 uint32_t vtisochctrl;
4687
4688 /* If there's no Azalia in the system anyway, forget it. */
4689 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4690 if (!pdev)
4691 return;
4692 pci_dev_put(pdev);
4693
4694 /* System Management Registers. Might be hidden, in which case
4695 we can't do the sanity check. But that's OK, because the
4696 known-broken BIOSes _don't_ actually hide it, so far. */
4697 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4698 if (!pdev)
4699 return;
4700
4701 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4702 pci_dev_put(pdev);
4703 return;
4704 }
4705
4706 pci_dev_put(pdev);
4707
4708 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4709 if (vtisochctrl & 1)
4710 return;
4711
4712 /* Drop all bits other than the number of TLB entries */
4713 vtisochctrl &= 0x1c;
4714
4715 /* If we have the recommended number of TLB entries (16), fine. */
4716 if (vtisochctrl == 0x10)
4717 return;
4718
4719 /* Zero TLB entries? You get to ride the short bus to school. */
4720 if (!vtisochctrl) {
4721 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4722 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4723 dmi_get_system_info(DMI_BIOS_VENDOR),
4724 dmi_get_system_info(DMI_BIOS_VERSION),
4725 dmi_get_system_info(DMI_PRODUCT_VERSION));
4726 iommu_identity_mapping |= IDENTMAP_AZALIA;
4727 return;
4728 }
4729
4730 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4731 vtisochctrl);
4732}