blob: 39b90621a1a102cae8811ccdfd318512c9cf7e5f [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070045#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070046#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090048#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049
Joerg Roedel078e1ee2012-09-26 12:44:43 +020050#include "irq_remapping.h"
51
Fenghua Yu5b6985c2008-10-16 18:02:32 -070052#define ROOT_SIZE VTD_PAGE_SIZE
53#define CONTEXT_SIZE VTD_PAGE_SIZE
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000056#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070057#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070058#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070059
60#define IOAPIC_RANGE_START (0xfee00000)
61#define IOAPIC_RANGE_END (0xfeefffff)
62#define IOVA_START_ADDR (0x1000)
63
64#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
65
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070066#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080067#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070068
David Woodhouse2ebe3152009-09-19 07:34:04 -070069#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
70#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
71
72/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
75 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070077
Robin Murphy1b722502015-01-12 17:51:15 +000078/* IO virtual address start page frame number */
79#define IOVA_START_PFN (1)
80
Mark McLoughlinf27be032008-11-20 15:49:43 +000081#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070082#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070083#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080084
Andrew Mortondf08cdc2010-09-22 13:05:11 -070085/* page table handling */
86#define LEVEL_STRIDE (9)
87#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
88
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020089/*
90 * This bitmap is used to advertise the page sizes our hardware support
91 * to the IOMMU core, which will then use this information to split
92 * physically contiguous memory regions it is mapping into page sizes
93 * that we support.
94 *
95 * Traditionally the IOMMU core just handed us the mappings directly,
96 * after making sure the size is an order of a 4KiB page and that the
97 * mapping has natural alignment.
98 *
99 * To retain this behavior, we currently advertise that we support
100 * all page sizes that are an order of 4KiB.
101 *
102 * If at some point we'd like to utilize the IOMMU core's new behavior,
103 * we could change this to advertise the real page sizes we support.
104 */
105#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
106
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700107static inline int agaw_to_level(int agaw)
108{
109 return agaw + 2;
110}
111
112static inline int agaw_to_width(int agaw)
113{
Jiang Liu5c645b32014-01-06 14:18:12 +0800114 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700115}
116
117static inline int width_to_agaw(int width)
118{
Jiang Liu5c645b32014-01-06 14:18:12 +0800119 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700120}
121
122static inline unsigned int level_to_offset_bits(int level)
123{
124 return (level - 1) * LEVEL_STRIDE;
125}
126
127static inline int pfn_level_offset(unsigned long pfn, int level)
128{
129 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
130}
131
132static inline unsigned long level_mask(int level)
133{
134 return -1UL << level_to_offset_bits(level);
135}
136
137static inline unsigned long level_size(int level)
138{
139 return 1UL << level_to_offset_bits(level);
140}
141
142static inline unsigned long align_to_level(unsigned long pfn, int level)
143{
144 return (pfn + level_size(level) - 1) & level_mask(level);
145}
David Woodhousefd18de52009-05-10 23:57:41 +0100146
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100147static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
148{
Jiang Liu5c645b32014-01-06 14:18:12 +0800149 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100150}
151
David Woodhousedd4e8312009-06-27 16:21:20 +0100152/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
153 are never going to work. */
154static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
155{
156 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
157}
158
159static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
160{
161 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
162}
163static inline unsigned long page_to_dma_pfn(struct page *pg)
164{
165 return mm_to_dma_pfn(page_to_pfn(pg));
166}
167static inline unsigned long virt_to_dma_pfn(void *p)
168{
169 return page_to_dma_pfn(virt_to_page(p));
170}
171
Weidong Hand9630fe2008-12-08 11:06:32 +0800172/* global iommu list, set NULL for ignored DMAR units */
173static struct intel_iommu **g_iommus;
174
David Woodhousee0fc7e02009-09-30 09:12:17 -0700175static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000176static int rwbf_quirk;
177
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000178/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700179 * set to 1 to panic kernel if can't successfully enable VT-d
180 * (used when kernel is launched w/ TXT)
181 */
182static int force_on = 0;
183
184/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000185 * 0: Present
186 * 1-11: Reserved
187 * 12-63: Context Ptr (12 - (haw-1))
188 * 64-127: Reserved
189 */
190struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000191 u64 lo;
192 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000193};
194#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000195
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000197/*
198 * low 64 bits:
199 * 0: present
200 * 1: fault processing disable
201 * 2-3: translation type
202 * 12-63: address space root
203 * high 64 bits:
204 * 0-2: address width
205 * 3-6: aval
206 * 8-23: domain id
207 */
208struct context_entry {
209 u64 lo;
210 u64 hi;
211};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000212
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000213static inline bool context_present(struct context_entry *context)
214{
215 return (context->lo & 1);
216}
217static inline void context_set_present(struct context_entry *context)
218{
219 context->lo |= 1;
220}
221
222static inline void context_set_fault_enable(struct context_entry *context)
223{
224 context->lo &= (((u64)-1) << 2) | 1;
225}
226
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000227static inline void context_set_translation_type(struct context_entry *context,
228 unsigned long value)
229{
230 context->lo &= (((u64)-1) << 4) | 3;
231 context->lo |= (value & 3) << 2;
232}
233
234static inline void context_set_address_root(struct context_entry *context,
235 unsigned long value)
236{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800237 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000238 context->lo |= value & VTD_PAGE_MASK;
239}
240
241static inline void context_set_address_width(struct context_entry *context,
242 unsigned long value)
243{
244 context->hi |= value & 7;
245}
246
247static inline void context_set_domain_id(struct context_entry *context,
248 unsigned long value)
249{
250 context->hi |= (value & ((1 << 16) - 1)) << 8;
251}
252
253static inline void context_clear_entry(struct context_entry *context)
254{
255 context->lo = 0;
256 context->hi = 0;
257}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000258
Mark McLoughlin622ba122008-11-20 15:49:46 +0000259/*
260 * 0: readable
261 * 1: writable
262 * 2-6: reserved
263 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800264 * 8-10: available
265 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000266 * 12-63: Host physcial address
267 */
268struct dma_pte {
269 u64 val;
270};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000271
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000272static inline void dma_clear_pte(struct dma_pte *pte)
273{
274 pte->val = 0;
275}
276
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000277static inline u64 dma_pte_addr(struct dma_pte *pte)
278{
David Woodhousec85994e2009-07-01 19:21:24 +0100279#ifdef CONFIG_64BIT
280 return pte->val & VTD_PAGE_MASK;
281#else
282 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100283 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100284#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000285}
286
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000287static inline bool dma_pte_present(struct dma_pte *pte)
288{
289 return (pte->val & 3) != 0;
290}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000291
Allen Kay4399c8b2011-10-14 12:32:46 -0700292static inline bool dma_pte_superpage(struct dma_pte *pte)
293{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200294 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700295}
296
David Woodhouse75e6bf92009-07-02 11:21:16 +0100297static inline int first_pte_in_page(struct dma_pte *pte)
298{
299 return !((unsigned long)pte & ~VTD_PAGE_MASK);
300}
301
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700302/*
303 * This domain is a statically identity mapping domain.
304 * 1. This domain creats a static 1:1 mapping to all usable memory.
305 * 2. It maps to each iommu if successful.
306 * 3. Each iommu mapps to this domain if successful.
307 */
David Woodhouse19943b02009-08-04 16:19:20 +0100308static struct dmar_domain *si_domain;
309static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700310
Weidong Han1ce28fe2008-12-08 16:35:39 +0800311/* domain represents a virtual machine, more than one devices
312 * across iommus may be owned in one domain, e.g. kvm guest.
313 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800314#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800315
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700316/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800317#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700318
Mark McLoughlin99126f72008-11-20 15:49:47 +0000319struct dmar_domain {
320 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700321 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800322 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800323 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000324
Joerg Roedel00a77de2015-03-26 13:43:08 +0100325 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000326 struct iova_domain iovad; /* iova's that belong to this domain */
327
328 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000329 int gaw; /* max guest address width */
330
331 /* adjusted guest address width, 0 is level 2 30-bit */
332 int agaw;
333
Weidong Han3b5410e2008-12-08 09:17:15 +0800334 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800335
336 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800337 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800338 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100339 int iommu_superpage;/* Level of superpages supported:
340 0 == 4KiB (no superpages), 1 == 2MiB,
341 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800342 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800343 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100344
345 struct iommu_domain domain; /* generic domain data structure for
346 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000347};
348
Mark McLoughlina647dac2008-11-20 15:49:48 +0000349/* PCI domain-device relationship */
350struct device_domain_info {
351 struct list_head link; /* link to domain siblings */
352 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100353 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000354 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000355 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800356 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000357 struct dmar_domain *domain; /* pointer to domain */
358};
359
Jiang Liub94e4112014-02-19 14:07:25 +0800360struct dmar_rmrr_unit {
361 struct list_head list; /* list of rmrr units */
362 struct acpi_dmar_header *hdr; /* ACPI header */
363 u64 base_address; /* reserved base address*/
364 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000365 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800366 int devices_cnt; /* target device count */
367};
368
369struct dmar_atsr_unit {
370 struct list_head list; /* list of ATSR units */
371 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000372 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800373 int devices_cnt; /* target device count */
374 u8 include_all:1; /* include all ports */
375};
376
377static LIST_HEAD(dmar_atsr_units);
378static LIST_HEAD(dmar_rmrr_units);
379
380#define for_each_rmrr_units(rmrr) \
381 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
382
mark gross5e0d2a62008-03-04 15:22:08 -0800383static void flush_unmaps_timeout(unsigned long data);
384
Jiang Liub707cb02014-01-06 14:18:26 +0800385static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800386
mark gross80b20dd2008-04-18 13:53:58 -0700387#define HIGH_WATER_MARK 250
388struct deferred_flush_tables {
389 int next;
390 struct iova *iova[HIGH_WATER_MARK];
391 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000392 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700393};
394
395static struct deferred_flush_tables *deferred_flush;
396
mark gross5e0d2a62008-03-04 15:22:08 -0800397/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800398static int g_num_of_iommus;
399
400static DEFINE_SPINLOCK(async_umap_flush_lock);
401static LIST_HEAD(unmaps_to_do);
402
403static int timer_on;
404static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800405
Jiang Liu92d03cc2014-02-19 14:07:28 +0800406static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800408static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700409 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800410static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000411 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800412static int domain_detach_iommu(struct dmar_domain *domain,
413 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700414
Suresh Siddhad3f13812011-08-23 17:05:25 -0700415#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800416int dmar_disabled = 0;
417#else
418int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700419#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800420
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200421int intel_iommu_enabled = 0;
422EXPORT_SYMBOL_GPL(intel_iommu_enabled);
423
David Woodhouse2d9e6672010-06-15 10:57:57 +0100424static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700425static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800426static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100427static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100428static int intel_iommu_ecs = 1;
429
430/* We only actually use ECS when PASID support (on the new bit 40)
431 * is also advertised. Some early implementations — the ones with
432 * PASID support on bit 28 — have issues even when we *only* use
433 * extended root/context tables. */
434#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
435 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700436
David Woodhousec0771df2011-10-14 20:59:46 +0100437int intel_iommu_gfx_mapped;
438EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
441static DEFINE_SPINLOCK(device_domain_lock);
442static LIST_HEAD(device_domain_list);
443
Thierry Redingb22f6432014-06-27 09:03:12 +0200444static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100445
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200446static bool translation_pre_enabled(struct intel_iommu *iommu)
447{
448 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
449}
450
451static void init_translation_status(struct intel_iommu *iommu)
452{
453 u32 gsts;
454
455 gsts = readl(iommu->reg + DMAR_GSTS_REG);
456 if (gsts & DMA_GSTS_TES)
457 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
458}
459
Joerg Roedel00a77de2015-03-26 13:43:08 +0100460/* Convert generic 'struct iommu_domain to private struct dmar_domain */
461static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
462{
463 return container_of(dom, struct dmar_domain, domain);
464}
465
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700466static int __init intel_iommu_setup(char *str)
467{
468 if (!str)
469 return -EINVAL;
470 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800471 if (!strncmp(str, "on", 2)) {
472 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200473 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800474 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700475 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200476 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700477 } else if (!strncmp(str, "igfx_off", 8)) {
478 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200479 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700480 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200481 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700482 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800483 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200484 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800485 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100486 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200487 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100488 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100489 } else if (!strncmp(str, "ecs_off", 7)) {
490 printk(KERN_INFO
491 "Intel-IOMMU: disable extended context table support\n");
492 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700493 }
494
495 str += strcspn(str, ",");
496 while (*str == ',')
497 str++;
498 }
499 return 0;
500}
501__setup("intel_iommu=", intel_iommu_setup);
502
503static struct kmem_cache *iommu_domain_cache;
504static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700505
Suresh Siddha4c923d42009-10-02 11:01:24 -0700506static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700507{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700508 struct page *page;
509 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700510
Suresh Siddha4c923d42009-10-02 11:01:24 -0700511 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
512 if (page)
513 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700514 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700515}
516
517static inline void free_pgtable_page(void *vaddr)
518{
519 free_page((unsigned long)vaddr);
520}
521
522static inline void *alloc_domain_mem(void)
523{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900524 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700525}
526
Kay, Allen M38717942008-09-09 18:37:29 +0300527static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700528{
529 kmem_cache_free(iommu_domain_cache, vaddr);
530}
531
532static inline void * alloc_devinfo_mem(void)
533{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900534 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700535}
536
537static inline void free_devinfo_mem(void *vaddr)
538{
539 kmem_cache_free(iommu_devinfo_cache, vaddr);
540}
541
Jiang Liuab8dfe22014-07-11 14:19:27 +0800542static inline int domain_type_is_vm(struct dmar_domain *domain)
543{
544 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
545}
546
547static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
548{
549 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
550 DOMAIN_FLAG_STATIC_IDENTITY);
551}
Weidong Han1b573682008-12-08 15:34:06 +0800552
Jiang Liu162d1b12014-07-11 14:19:35 +0800553static inline int domain_pfn_supported(struct dmar_domain *domain,
554 unsigned long pfn)
555{
556 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
557
558 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
559}
560
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700561static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800562{
563 unsigned long sagaw;
564 int agaw = -1;
565
566 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700567 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800568 agaw >= 0; agaw--) {
569 if (test_bit(agaw, &sagaw))
570 break;
571 }
572
573 return agaw;
574}
575
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700576/*
577 * Calculate max SAGAW for each iommu.
578 */
579int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
580{
581 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
582}
583
584/*
585 * calculate agaw for each iommu.
586 * "SAGAW" may be different across iommus, use a default agaw, and
587 * get a supported less agaw for iommus that don't support the default agaw.
588 */
589int iommu_calculate_agaw(struct intel_iommu *iommu)
590{
591 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
592}
593
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700594/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800595static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
596{
597 int iommu_id;
598
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700599 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800600 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800601 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800602 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
603 return NULL;
604
605 return g_iommus[iommu_id];
606}
607
Weidong Han8e6040972008-12-08 15:49:06 +0800608static void domain_update_iommu_coherency(struct dmar_domain *domain)
609{
David Woodhoused0501962014-03-11 17:10:29 -0700610 struct dmar_drhd_unit *drhd;
611 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100612 bool found = false;
613 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800614
David Woodhoused0501962014-03-11 17:10:29 -0700615 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800616
Mike Travis1b198bb2012-03-05 15:05:16 -0800617 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100618 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800619 if (!ecap_coherent(g_iommus[i]->ecap)) {
620 domain->iommu_coherency = 0;
621 break;
622 }
Weidong Han8e6040972008-12-08 15:49:06 +0800623 }
David Woodhoused0501962014-03-11 17:10:29 -0700624 if (found)
625 return;
626
627 /* No hardware attached; use lowest common denominator */
628 rcu_read_lock();
629 for_each_active_iommu(iommu, drhd) {
630 if (!ecap_coherent(iommu->ecap)) {
631 domain->iommu_coherency = 0;
632 break;
633 }
634 }
635 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800636}
637
Jiang Liu161f6932014-07-11 14:19:37 +0800638static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100639{
Allen Kay8140a952011-10-14 12:32:17 -0700640 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800641 struct intel_iommu *iommu;
642 int ret = 1;
643
644 rcu_read_lock();
645 for_each_active_iommu(iommu, drhd) {
646 if (iommu != skip) {
647 if (!ecap_sc_support(iommu->ecap)) {
648 ret = 0;
649 break;
650 }
651 }
652 }
653 rcu_read_unlock();
654
655 return ret;
656}
657
658static int domain_update_iommu_superpage(struct intel_iommu *skip)
659{
660 struct dmar_drhd_unit *drhd;
661 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700662 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100663
664 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800665 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100666 }
667
Allen Kay8140a952011-10-14 12:32:17 -0700668 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800669 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700670 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800671 if (iommu != skip) {
672 mask &= cap_super_page_val(iommu->cap);
673 if (!mask)
674 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100675 }
676 }
Jiang Liu0e242612014-02-19 14:07:34 +0800677 rcu_read_unlock();
678
Jiang Liu161f6932014-07-11 14:19:37 +0800679 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100680}
681
Sheng Yang58c610b2009-03-18 15:33:05 +0800682/* Some capabilities may be different across iommus */
683static void domain_update_iommu_cap(struct dmar_domain *domain)
684{
685 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800686 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
687 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800688}
689
David Woodhouse03ecc322015-02-13 14:35:21 +0000690static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
691 u8 bus, u8 devfn, int alloc)
692{
693 struct root_entry *root = &iommu->root_entry[bus];
694 struct context_entry *context;
695 u64 *entry;
696
David Woodhousec83b2f22015-06-12 10:15:49 +0100697 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000698 if (devfn >= 0x80) {
699 devfn -= 0x80;
700 entry = &root->hi;
701 }
702 devfn *= 2;
703 }
704 entry = &root->lo;
705 if (*entry & 1)
706 context = phys_to_virt(*entry & VTD_PAGE_MASK);
707 else {
708 unsigned long phy_addr;
709 if (!alloc)
710 return NULL;
711
712 context = alloc_pgtable_page(iommu->node);
713 if (!context)
714 return NULL;
715
716 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
717 phy_addr = virt_to_phys((void *)context);
718 *entry = phy_addr | 1;
719 __iommu_flush_cache(iommu, entry, sizeof(*entry));
720 }
721 return &context[devfn];
722}
723
David Woodhouse4ed6a542015-05-11 14:59:20 +0100724static int iommu_dummy(struct device *dev)
725{
726 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
727}
728
David Woodhouse156baca2014-03-09 14:00:57 -0700729static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800730{
731 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800732 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700733 struct device *tmp;
734 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800735 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800736 int i;
737
David Woodhouse4ed6a542015-05-11 14:59:20 +0100738 if (iommu_dummy(dev))
739 return NULL;
740
David Woodhouse156baca2014-03-09 14:00:57 -0700741 if (dev_is_pci(dev)) {
742 pdev = to_pci_dev(dev);
743 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100744 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700745 dev = &ACPI_COMPANION(dev)->dev;
746
Jiang Liu0e242612014-02-19 14:07:34 +0800747 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800748 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700749 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100750 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800751
Jiang Liub683b232014-02-19 14:07:32 +0800752 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700753 drhd->devices_cnt, i, tmp) {
754 if (tmp == dev) {
755 *bus = drhd->devices[i].bus;
756 *devfn = drhd->devices[i].devfn;
757 goto out;
758 }
759
760 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000761 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700762
763 ptmp = to_pci_dev(tmp);
764 if (ptmp->subordinate &&
765 ptmp->subordinate->number <= pdev->bus->number &&
766 ptmp->subordinate->busn_res.end >= pdev->bus->number)
767 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100768 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800769
David Woodhouse156baca2014-03-09 14:00:57 -0700770 if (pdev && drhd->include_all) {
771 got_pdev:
772 *bus = pdev->bus->number;
773 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800774 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700775 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800776 }
Jiang Liub683b232014-02-19 14:07:32 +0800777 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700778 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800779 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800780
Jiang Liub683b232014-02-19 14:07:32 +0800781 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800782}
783
Weidong Han5331fe62008-12-08 23:00:00 +0800784static void domain_flush_cache(struct dmar_domain *domain,
785 void *addr, int size)
786{
787 if (!domain->iommu_coherency)
788 clflush_cache_range(addr, size);
789}
790
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700791static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
792{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000794 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700795 unsigned long flags;
796
797 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000798 context = iommu_context_addr(iommu, bus, devfn, 0);
799 if (context)
800 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 spin_unlock_irqrestore(&iommu->lock, flags);
802 return ret;
803}
804
805static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
806{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 struct context_entry *context;
808 unsigned long flags;
809
810 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000811 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000813 context_clear_entry(context);
814 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815 }
816 spin_unlock_irqrestore(&iommu->lock, flags);
817}
818
819static void free_context_table(struct intel_iommu *iommu)
820{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821 int i;
822 unsigned long flags;
823 struct context_entry *context;
824
825 spin_lock_irqsave(&iommu->lock, flags);
826 if (!iommu->root_entry) {
827 goto out;
828 }
829 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000830 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700831 if (context)
832 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000833
David Woodhousec83b2f22015-06-12 10:15:49 +0100834 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000835 continue;
836
837 context = iommu_context_addr(iommu, i, 0x80, 0);
838 if (context)
839 free_pgtable_page(context);
840
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700841 }
842 free_pgtable_page(iommu->root_entry);
843 iommu->root_entry = NULL;
844out:
845 spin_unlock_irqrestore(&iommu->lock, flags);
846}
847
David Woodhouseb026fd22009-06-28 10:37:25 +0100848static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000849 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700850{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 struct dma_pte *parent, *pte = NULL;
852 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700853 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854
855 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200856
Jiang Liu162d1b12014-07-11 14:19:35 +0800857 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200858 /* Address beyond IOMMU's addressing capabilities. */
859 return NULL;
860
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861 parent = domain->pgd;
862
David Woodhouse5cf0a762014-03-19 16:07:49 +0000863 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 void *tmp_page;
865
David Woodhouseb026fd22009-06-28 10:37:25 +0100866 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000868 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100869 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000870 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871 break;
872
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000873 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100874 uint64_t pteval;
875
Suresh Siddha4c923d42009-10-02 11:01:24 -0700876 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700877
David Woodhouse206a73c12009-07-01 19:30:28 +0100878 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700879 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100880
David Woodhousec85994e2009-07-01 19:21:24 +0100881 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400882 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800883 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100884 /* Someone else set it while we were thinking; use theirs. */
885 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800886 else
David Woodhousec85994e2009-07-01 19:21:24 +0100887 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000889 if (level == 1)
890 break;
891
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000892 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700893 level--;
894 }
895
David Woodhouse5cf0a762014-03-19 16:07:49 +0000896 if (!*target_level)
897 *target_level = level;
898
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899 return pte;
900}
901
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100902
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100904static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
905 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100906 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907{
908 struct dma_pte *parent, *pte = NULL;
909 int total = agaw_to_level(domain->agaw);
910 int offset;
911
912 parent = domain->pgd;
913 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100914 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700915 pte = &parent[offset];
916 if (level == total)
917 return pte;
918
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100919 if (!dma_pte_present(pte)) {
920 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700921 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100922 }
923
Yijing Wange16922a2014-05-20 20:37:51 +0800924 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100925 *large_page = total;
926 return pte;
927 }
928
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000929 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930 total--;
931 }
932 return NULL;
933}
934
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000936static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100937 unsigned long start_pfn,
938 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939{
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100940 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100941 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942
Jiang Liu162d1b12014-07-11 14:19:35 +0800943 BUG_ON(!domain_pfn_supported(domain, start_pfn));
944 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -0700945 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100946
David Woodhouse04b18e62009-06-27 19:15:01 +0100947 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700948 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100949 large_page = 1;
950 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100951 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100952 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100953 continue;
954 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100955 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100956 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100957 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100958 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100959 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
960
David Woodhouse310a5ab2009-06-28 18:52:20 +0100961 domain_flush_cache(domain, first_pte,
962 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700963
964 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965}
966
Alex Williamson3269ee02013-06-15 10:27:19 -0600967static void dma_pte_free_level(struct dmar_domain *domain, int level,
968 struct dma_pte *pte, unsigned long pfn,
969 unsigned long start_pfn, unsigned long last_pfn)
970{
971 pfn = max(start_pfn, pfn);
972 pte = &pte[pfn_level_offset(pfn, level)];
973
974 do {
975 unsigned long level_pfn;
976 struct dma_pte *level_pte;
977
978 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
979 goto next;
980
981 level_pfn = pfn & level_mask(level - 1);
982 level_pte = phys_to_virt(dma_pte_addr(pte));
983
984 if (level > 2)
985 dma_pte_free_level(domain, level - 1, level_pte,
986 level_pfn, start_pfn, last_pfn);
987
988 /* If range covers entire pagetable, free it */
989 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800990 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600991 dma_clear_pte(pte);
992 domain_flush_cache(domain, pte, sizeof(*pte));
993 free_pgtable_page(level_pte);
994 }
995next:
996 pfn += level_size(level);
997 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
998}
999
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000/* free page table pages. last level pte should already be cleared */
1001static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001002 unsigned long start_pfn,
1003 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001004{
Jiang Liu162d1b12014-07-11 14:19:35 +08001005 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1006 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001007 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008
Jiang Liud41a4ad2014-07-11 14:19:34 +08001009 dma_pte_clear_range(domain, start_pfn, last_pfn);
1010
David Woodhousef3a0a522009-06-30 03:40:07 +01001011 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001012 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1013 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001016 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 free_pgtable_page(domain->pgd);
1018 domain->pgd = NULL;
1019 }
1020}
1021
David Woodhouseea8ea462014-03-05 17:09:32 +00001022/* When a page at a given level is being unlinked from its parent, we don't
1023 need to *modify* it at all. All we need to do is make a list of all the
1024 pages which can be freed just as soon as we've flushed the IOTLB and we
1025 know the hardware page-walk will no longer touch them.
1026 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1027 be freed. */
1028static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1029 int level, struct dma_pte *pte,
1030 struct page *freelist)
1031{
1032 struct page *pg;
1033
1034 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1035 pg->freelist = freelist;
1036 freelist = pg;
1037
1038 if (level == 1)
1039 return freelist;
1040
Jiang Liuadeb2592014-04-09 10:20:39 +08001041 pte = page_address(pg);
1042 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001043 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1044 freelist = dma_pte_list_pagetables(domain, level - 1,
1045 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001046 pte++;
1047 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001048
1049 return freelist;
1050}
1051
1052static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1053 struct dma_pte *pte, unsigned long pfn,
1054 unsigned long start_pfn,
1055 unsigned long last_pfn,
1056 struct page *freelist)
1057{
1058 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1059
1060 pfn = max(start_pfn, pfn);
1061 pte = &pte[pfn_level_offset(pfn, level)];
1062
1063 do {
1064 unsigned long level_pfn;
1065
1066 if (!dma_pte_present(pte))
1067 goto next;
1068
1069 level_pfn = pfn & level_mask(level);
1070
1071 /* If range covers entire pagetable, free it */
1072 if (start_pfn <= level_pfn &&
1073 last_pfn >= level_pfn + level_size(level) - 1) {
1074 /* These suborbinate page tables are going away entirely. Don't
1075 bother to clear them; we're just going to *free* them. */
1076 if (level > 1 && !dma_pte_superpage(pte))
1077 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1078
1079 dma_clear_pte(pte);
1080 if (!first_pte)
1081 first_pte = pte;
1082 last_pte = pte;
1083 } else if (level > 1) {
1084 /* Recurse down into a level that isn't *entirely* obsolete */
1085 freelist = dma_pte_clear_level(domain, level - 1,
1086 phys_to_virt(dma_pte_addr(pte)),
1087 level_pfn, start_pfn, last_pfn,
1088 freelist);
1089 }
1090next:
1091 pfn += level_size(level);
1092 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1093
1094 if (first_pte)
1095 domain_flush_cache(domain, first_pte,
1096 (void *)++last_pte - (void *)first_pte);
1097
1098 return freelist;
1099}
1100
1101/* We can't just free the pages because the IOMMU may still be walking
1102 the page tables, and may have cached the intermediate levels. The
1103 pages can only be freed after the IOTLB flush has been done. */
1104struct page *domain_unmap(struct dmar_domain *domain,
1105 unsigned long start_pfn,
1106 unsigned long last_pfn)
1107{
David Woodhouseea8ea462014-03-05 17:09:32 +00001108 struct page *freelist = NULL;
1109
Jiang Liu162d1b12014-07-11 14:19:35 +08001110 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1111 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001112 BUG_ON(start_pfn > last_pfn);
1113
1114 /* we don't need lock here; nobody else touches the iova range */
1115 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1116 domain->pgd, 0, start_pfn, last_pfn, NULL);
1117
1118 /* free pgd */
1119 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1120 struct page *pgd_page = virt_to_page(domain->pgd);
1121 pgd_page->freelist = freelist;
1122 freelist = pgd_page;
1123
1124 domain->pgd = NULL;
1125 }
1126
1127 return freelist;
1128}
1129
1130void dma_free_pagelist(struct page *freelist)
1131{
1132 struct page *pg;
1133
1134 while ((pg = freelist)) {
1135 freelist = pg->freelist;
1136 free_pgtable_page(page_address(pg));
1137 }
1138}
1139
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001140/* iommu handling */
1141static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1142{
1143 struct root_entry *root;
1144 unsigned long flags;
1145
Suresh Siddha4c923d42009-10-02 11:01:24 -07001146 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001147 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001148 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001149 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001151 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001152
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001153 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154
1155 spin_lock_irqsave(&iommu->lock, flags);
1156 iommu->root_entry = root;
1157 spin_unlock_irqrestore(&iommu->lock, flags);
1158
1159 return 0;
1160}
1161
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162static void iommu_set_root_entry(struct intel_iommu *iommu)
1163{
David Woodhouse03ecc322015-02-13 14:35:21 +00001164 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001165 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001166 unsigned long flag;
1167
David Woodhouse03ecc322015-02-13 14:35:21 +00001168 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001169 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001170 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001172 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001173 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174
David Woodhousec416daa2009-05-10 20:30:58 +01001175 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
1177 /* Make sure hardware complete it */
1178 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001179 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001181 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182}
1183
1184static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1185{
1186 u32 val;
1187 unsigned long flag;
1188
David Woodhouse9af88142009-02-13 23:18:03 +00001189 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001190 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001191
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001192 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001193 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001194
1195 /* Make sure hardware complete it */
1196 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001197 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001198
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001199 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001200}
1201
1202/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001203static void __iommu_flush_context(struct intel_iommu *iommu,
1204 u16 did, u16 source_id, u8 function_mask,
1205 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001206{
1207 u64 val = 0;
1208 unsigned long flag;
1209
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001210 switch (type) {
1211 case DMA_CCMD_GLOBAL_INVL:
1212 val = DMA_CCMD_GLOBAL_INVL;
1213 break;
1214 case DMA_CCMD_DOMAIN_INVL:
1215 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1216 break;
1217 case DMA_CCMD_DEVICE_INVL:
1218 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1219 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1220 break;
1221 default:
1222 BUG();
1223 }
1224 val |= DMA_CCMD_ICC;
1225
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001226 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1228
1229 /* Make sure hardware complete it */
1230 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1231 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1232
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001233 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234}
1235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001237static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1238 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239{
1240 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1241 u64 val = 0, val_iva = 0;
1242 unsigned long flag;
1243
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244 switch (type) {
1245 case DMA_TLB_GLOBAL_FLUSH:
1246 /* global flush doesn't need set IVA_REG */
1247 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1248 break;
1249 case DMA_TLB_DSI_FLUSH:
1250 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1251 break;
1252 case DMA_TLB_PSI_FLUSH:
1253 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001254 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 val_iva = size_order | addr;
1256 break;
1257 default:
1258 BUG();
1259 }
1260 /* Note: set drain read/write */
1261#if 0
1262 /*
1263 * This is probably to be super secure.. Looks like we can
1264 * ignore it without any impact.
1265 */
1266 if (cap_read_drain(iommu->cap))
1267 val |= DMA_TLB_READ_DRAIN;
1268#endif
1269 if (cap_write_drain(iommu->cap))
1270 val |= DMA_TLB_WRITE_DRAIN;
1271
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001272 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001273 /* Note: Only uses first TLB reg currently */
1274 if (val_iva)
1275 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1276 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1277
1278 /* Make sure hardware complete it */
1279 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1280 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1281
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001282 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283
1284 /* check IOTLB invalidation granularity */
1285 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001286 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001287 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001288 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001289 (unsigned long long)DMA_TLB_IIRG(type),
1290 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001291}
1292
David Woodhouse64ae8922014-03-09 12:52:30 -07001293static struct device_domain_info *
1294iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1295 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001296{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001297 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001298 unsigned long flags;
1299 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001300 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001301
1302 if (!ecap_dev_iotlb_support(iommu->ecap))
1303 return NULL;
1304
1305 if (!iommu->qi)
1306 return NULL;
1307
1308 spin_lock_irqsave(&device_domain_lock, flags);
1309 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001310 if (info->iommu == iommu && info->bus == bus &&
1311 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001312 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001313 break;
1314 }
1315 spin_unlock_irqrestore(&device_domain_lock, flags);
1316
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001317 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001318 return NULL;
1319
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001320 pdev = to_pci_dev(info->dev);
1321
1322 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001323 return NULL;
1324
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001325 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001326 return NULL;
1327
Yu Zhao93a23a72009-05-18 13:51:37 +08001328 return info;
1329}
1330
1331static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1332{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001333 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001334 return;
1335
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001336 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001337}
1338
1339static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1340{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001341 if (!info->dev || !dev_is_pci(info->dev) ||
1342 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001343 return;
1344
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001345 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001346}
1347
1348static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1349 u64 addr, unsigned mask)
1350{
1351 u16 sid, qdep;
1352 unsigned long flags;
1353 struct device_domain_info *info;
1354
1355 spin_lock_irqsave(&device_domain_lock, flags);
1356 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001357 struct pci_dev *pdev;
1358 if (!info->dev || !dev_is_pci(info->dev))
1359 continue;
1360
1361 pdev = to_pci_dev(info->dev);
1362 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001363 continue;
1364
1365 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001366 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001367 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1368 }
1369 spin_unlock_irqrestore(&device_domain_lock, flags);
1370}
1371
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001372static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001373 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001375 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001376 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001377
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001378 BUG_ON(pages == 0);
1379
David Woodhouseea8ea462014-03-05 17:09:32 +00001380 if (ih)
1381 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001383 * Fallback to domain selective flush if no PSI support or the size is
1384 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385 * PSI requires page size to be 2 ^ x, and the base address is naturally
1386 * aligned to the size
1387 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001388 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1389 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001390 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001391 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001392 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001393 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001394
1395 /*
Nadav Amit82653632010-04-01 13:24:40 +03001396 * In caching mode, changes of pages from non-present to present require
1397 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001398 */
Nadav Amit82653632010-04-01 13:24:40 +03001399 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001400 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401}
1402
mark grossf8bab732008-02-08 04:18:38 -08001403static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1404{
1405 u32 pmen;
1406 unsigned long flags;
1407
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001408 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001409 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1410 pmen &= ~DMA_PMEN_EPM;
1411 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1412
1413 /* wait for the protected region status bit to clear */
1414 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1415 readl, !(pmen & DMA_PMEN_PRS), pmen);
1416
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001417 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001418}
1419
Jiang Liu2a41cce2014-07-11 14:19:33 +08001420static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421{
1422 u32 sts;
1423 unsigned long flags;
1424
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001425 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001426 iommu->gcmd |= DMA_GCMD_TE;
1427 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428
1429 /* Make sure hardware complete it */
1430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001431 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001433 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434}
1435
Jiang Liu2a41cce2014-07-11 14:19:33 +08001436static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437{
1438 u32 sts;
1439 unsigned long flag;
1440
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001441 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 iommu->gcmd &= ~DMA_GCMD_TE;
1443 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1444
1445 /* Make sure hardware complete it */
1446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001447 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001449 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450}
1451
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001452
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001453static int iommu_init_domains(struct intel_iommu *iommu)
1454{
1455 unsigned long ndomains;
1456 unsigned long nlongs;
1457
1458 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001459 pr_debug("%s: Number of Domains supported <%ld>\n",
1460 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461 nlongs = BITS_TO_LONGS(ndomains);
1462
Donald Dutile94a91b52009-08-20 16:51:34 -04001463 spin_lock_init(&iommu->lock);
1464
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465 /* TBD: there might be 64K domains,
1466 * consider other allocation for future chip
1467 */
1468 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1469 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001470 pr_err("%s: Allocating domain id array failed\n",
1471 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001472 return -ENOMEM;
1473 }
1474 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1475 GFP_KERNEL);
1476 if (!iommu->domains) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001477 pr_err("%s: Allocating domain array failed\n",
1478 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001479 kfree(iommu->domain_ids);
1480 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481 return -ENOMEM;
1482 }
1483
1484 /*
1485 * if Caching mode is set, then invalid translations are tagged
1486 * with domainid 0. Hence we need to pre-allocate it.
1487 */
1488 if (cap_caching_mode(iommu->cap))
1489 set_bit(0, iommu->domain_ids);
1490 return 0;
1491}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001492
Jiang Liuffebeb42014-11-09 22:48:02 +08001493static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001494{
1495 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001496 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001497
Donald Dutile94a91b52009-08-20 16:51:34 -04001498 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001499 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001500 /*
1501 * Domain id 0 is reserved for invalid translation
1502 * if hardware supports caching mode.
1503 */
1504 if (cap_caching_mode(iommu->cap) && i == 0)
1505 continue;
1506
Donald Dutile94a91b52009-08-20 16:51:34 -04001507 domain = iommu->domains[i];
1508 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001509 if (domain_detach_iommu(domain, iommu) == 0 &&
1510 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001511 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001512 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001513 }
1514
1515 if (iommu->gcmd & DMA_GCMD_TE)
1516 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001517}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001518
Jiang Liuffebeb42014-11-09 22:48:02 +08001519static void free_dmar_iommu(struct intel_iommu *iommu)
1520{
1521 if ((iommu->domains) && (iommu->domain_ids)) {
1522 kfree(iommu->domains);
1523 kfree(iommu->domain_ids);
1524 iommu->domains = NULL;
1525 iommu->domain_ids = NULL;
1526 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001527
Weidong Hand9630fe2008-12-08 11:06:32 +08001528 g_iommus[iommu->seq_id] = NULL;
1529
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001530 /* free context mapping */
1531 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001532}
1533
Jiang Liuab8dfe22014-07-11 14:19:27 +08001534static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001536 /* domain id for virtual machine, it won't be set in context */
1537 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001539
1540 domain = alloc_domain_mem();
1541 if (!domain)
1542 return NULL;
1543
Jiang Liuab8dfe22014-07-11 14:19:27 +08001544 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001545 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001546 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001547 spin_lock_init(&domain->iommu_lock);
1548 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001549 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001550 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001551
1552 return domain;
1553}
1554
Jiang Liufb170fb2014-07-11 14:19:28 +08001555static int __iommu_attach_domain(struct dmar_domain *domain,
1556 struct intel_iommu *iommu)
1557{
1558 int num;
1559 unsigned long ndomains;
1560
1561 ndomains = cap_ndoms(iommu->cap);
1562 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1563 if (num < ndomains) {
1564 set_bit(num, iommu->domain_ids);
1565 iommu->domains[num] = domain;
1566 } else {
1567 num = -ENOSPC;
1568 }
1569
1570 return num;
1571}
1572
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001573static int iommu_attach_domain(struct dmar_domain *domain,
1574 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001575{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001576 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577 unsigned long flags;
1578
Weidong Han8c11e792008-12-08 15:29:22 +08001579 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001580 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001581 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001582 if (num < 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001583 pr_err("%s: No free domain ids\n", iommu->name);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001584
Jiang Liufb170fb2014-07-11 14:19:28 +08001585 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001586}
1587
Jiang Liu44bde612014-07-11 14:19:29 +08001588static int iommu_attach_vm_domain(struct dmar_domain *domain,
1589 struct intel_iommu *iommu)
1590{
1591 int num;
1592 unsigned long ndomains;
1593
1594 ndomains = cap_ndoms(iommu->cap);
1595 for_each_set_bit(num, iommu->domain_ids, ndomains)
1596 if (iommu->domains[num] == domain)
1597 return num;
1598
1599 return __iommu_attach_domain(domain, iommu);
1600}
1601
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001602static void iommu_detach_domain(struct dmar_domain *domain,
1603 struct intel_iommu *iommu)
1604{
1605 unsigned long flags;
1606 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001607
1608 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001609 if (domain_type_is_vm_or_si(domain)) {
1610 ndomains = cap_ndoms(iommu->cap);
1611 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1612 if (iommu->domains[num] == domain) {
1613 clear_bit(num, iommu->domain_ids);
1614 iommu->domains[num] = NULL;
1615 break;
1616 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001617 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001618 } else {
1619 clear_bit(domain->id, iommu->domain_ids);
1620 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001621 }
Weidong Han8c11e792008-12-08 15:29:22 +08001622 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001623}
1624
Jiang Liufb170fb2014-07-11 14:19:28 +08001625static void domain_attach_iommu(struct dmar_domain *domain,
1626 struct intel_iommu *iommu)
1627{
1628 unsigned long flags;
1629
1630 spin_lock_irqsave(&domain->iommu_lock, flags);
1631 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1632 domain->iommu_count++;
1633 if (domain->iommu_count == 1)
1634 domain->nid = iommu->node;
1635 domain_update_iommu_cap(domain);
1636 }
1637 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1638}
1639
1640static int domain_detach_iommu(struct dmar_domain *domain,
1641 struct intel_iommu *iommu)
1642{
1643 unsigned long flags;
1644 int count = INT_MAX;
1645
1646 spin_lock_irqsave(&domain->iommu_lock, flags);
1647 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1648 count = --domain->iommu_count;
1649 domain_update_iommu_cap(domain);
1650 }
1651 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1652
1653 return count;
1654}
1655
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001657static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658
Joseph Cihula51a63e62011-03-21 11:04:24 -07001659static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660{
1661 struct pci_dev *pdev = NULL;
1662 struct iova *iova;
1663 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001664
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001665 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1666 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667
Mark Gross8a443df2008-03-04 14:59:31 -08001668 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1669 &reserved_rbtree_key);
1670
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671 /* IOAPIC ranges shouldn't be accessed by DMA */
1672 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1673 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001674 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001675 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001676 return -ENODEV;
1677 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678
1679 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1680 for_each_pci_dev(pdev) {
1681 struct resource *r;
1682
1683 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1684 r = &pdev->resource[i];
1685 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1686 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001687 iova = reserve_iova(&reserved_iova_list,
1688 IOVA_PFN(r->start),
1689 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001690 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001691 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001692 return -ENODEV;
1693 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001694 }
1695 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001696 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697}
1698
1699static void domain_reserve_special_ranges(struct dmar_domain *domain)
1700{
1701 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1702}
1703
1704static inline int guestwidth_to_adjustwidth(int gaw)
1705{
1706 int agaw;
1707 int r = (gaw - 12) % 9;
1708
1709 if (r == 0)
1710 agaw = gaw;
1711 else
1712 agaw = gaw + 9 - r;
1713 if (agaw > 64)
1714 agaw = 64;
1715 return agaw;
1716}
1717
1718static int domain_init(struct dmar_domain *domain, int guest_width)
1719{
1720 struct intel_iommu *iommu;
1721 int adjust_width, agaw;
1722 unsigned long sagaw;
1723
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001724 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1725 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726 domain_reserve_special_ranges(domain);
1727
1728 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001729 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730 if (guest_width > cap_mgaw(iommu->cap))
1731 guest_width = cap_mgaw(iommu->cap);
1732 domain->gaw = guest_width;
1733 adjust_width = guestwidth_to_adjustwidth(guest_width);
1734 agaw = width_to_agaw(adjust_width);
1735 sagaw = cap_sagaw(iommu->cap);
1736 if (!test_bit(agaw, &sagaw)) {
1737 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001738 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739 agaw = find_next_bit(&sagaw, 5, agaw);
1740 if (agaw >= 5)
1741 return -ENODEV;
1742 }
1743 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001744
Weidong Han8e6040972008-12-08 15:49:06 +08001745 if (ecap_coherent(iommu->ecap))
1746 domain->iommu_coherency = 1;
1747 else
1748 domain->iommu_coherency = 0;
1749
Sheng Yang58c610b2009-03-18 15:33:05 +08001750 if (ecap_sc_support(iommu->ecap))
1751 domain->iommu_snooping = 1;
1752 else
1753 domain->iommu_snooping = 0;
1754
David Woodhouse214e39a2014-03-19 10:38:49 +00001755 if (intel_iommu_superpage)
1756 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1757 else
1758 domain->iommu_superpage = 0;
1759
Suresh Siddha4c923d42009-10-02 11:01:24 -07001760 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001761
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001763 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001764 if (!domain->pgd)
1765 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001766 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767 return 0;
1768}
1769
1770static void domain_exit(struct dmar_domain *domain)
1771{
David Woodhouseea8ea462014-03-05 17:09:32 +00001772 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001773 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001774
1775 /* Domain 0 is reserved, so dont process it */
1776 if (!domain)
1777 return;
1778
Alex Williamson7b668352011-05-24 12:02:41 +01001779 /* Flush any lazy unmaps that may reference this domain */
1780 if (!intel_iommu_strict)
1781 flush_unmaps_timeout(0);
1782
Jiang Liu92d03cc2014-02-19 14:07:28 +08001783 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001785
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786 /* destroy iovas */
1787 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001788
David Woodhouseea8ea462014-03-05 17:09:32 +00001789 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790
Jiang Liu92d03cc2014-02-19 14:07:28 +08001791 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001792 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001793 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1794 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001795 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001796
David Woodhouseea8ea462014-03-05 17:09:32 +00001797 dma_free_pagelist(freelist);
1798
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 free_domain_mem(domain);
1800}
1801
David Woodhouse64ae8922014-03-09 12:52:30 -07001802static int domain_context_mapping_one(struct dmar_domain *domain,
1803 struct intel_iommu *iommu,
1804 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805{
1806 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001807 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001808 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001809 int id;
1810 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001811 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001812
1813 pr_debug("Set context mapping for %02x:%02x.%d\n",
1814 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001815
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001816 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001817 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1818 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001819
David Woodhouse03ecc322015-02-13 14:35:21 +00001820 spin_lock_irqsave(&iommu->lock, flags);
1821 context = iommu_context_addr(iommu, bus, devfn, 1);
1822 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823 if (!context)
1824 return -ENOMEM;
1825 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001826 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001827 spin_unlock_irqrestore(&iommu->lock, flags);
1828 return 0;
1829 }
1830
Weidong Hanea6606b2008-12-08 23:08:15 +08001831 id = domain->id;
1832 pgd = domain->pgd;
1833
Jiang Liuab8dfe22014-07-11 14:19:27 +08001834 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001835 if (domain_type_is_vm(domain)) {
1836 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001837 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001838 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001839 pr_err("%s: No free domain ids\n", iommu->name);
Weidong Hanea6606b2008-12-08 23:08:15 +08001840 return -EFAULT;
1841 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001842 }
1843
1844 /* Skip top levels of page tables for
1845 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001846 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001847 */
Chris Wright1672af12009-12-02 12:06:34 -08001848 if (translation != CONTEXT_TT_PASS_THROUGH) {
1849 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1850 pgd = phys_to_virt(dma_pte_addr(pgd));
1851 if (!dma_pte_present(pgd)) {
1852 spin_unlock_irqrestore(&iommu->lock, flags);
1853 return -ENOMEM;
1854 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001855 }
1856 }
1857 }
1858
1859 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001860
Yu Zhao93a23a72009-05-18 13:51:37 +08001861 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001862 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001863 translation = info ? CONTEXT_TT_DEV_IOTLB :
1864 CONTEXT_TT_MULTI_LEVEL;
1865 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001866 /*
1867 * In pass through mode, AW must be programmed to indicate the largest
1868 * AGAW value supported by hardware. And ASR is ignored by hardware.
1869 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001870 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001871 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001872 else {
1873 context_set_address_root(context, virt_to_phys(pgd));
1874 context_set_address_width(context, iommu->agaw);
1875 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001876
1877 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001878 context_set_fault_enable(context);
1879 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001880 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001882 /*
1883 * It's a non-present to present mapping. If hardware doesn't cache
1884 * non-present entry we only need to flush the write-buffer. If the
1885 * _does_ cache non-present entries, then it does so in the special
1886 * domain #0, which we have to flush:
1887 */
1888 if (cap_caching_mode(iommu->cap)) {
1889 iommu->flush.flush_context(iommu, 0,
1890 (((u16)bus) << 8) | devfn,
1891 DMA_CCMD_MASK_NOBIT,
1892 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001893 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001894 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001895 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001896 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001897 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001898 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001899
Jiang Liufb170fb2014-07-11 14:19:28 +08001900 domain_attach_iommu(domain, iommu);
1901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001902 return 0;
1903}
1904
Alex Williamson579305f2014-07-03 09:51:43 -06001905struct domain_context_mapping_data {
1906 struct dmar_domain *domain;
1907 struct intel_iommu *iommu;
1908 int translation;
1909};
1910
1911static int domain_context_mapping_cb(struct pci_dev *pdev,
1912 u16 alias, void *opaque)
1913{
1914 struct domain_context_mapping_data *data = opaque;
1915
1916 return domain_context_mapping_one(data->domain, data->iommu,
1917 PCI_BUS_NUM(alias), alias & 0xff,
1918 data->translation);
1919}
1920
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001922domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1923 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924{
David Woodhouse64ae8922014-03-09 12:52:30 -07001925 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001926 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001927 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928
David Woodhousee1f167f2014-03-09 15:24:46 -07001929 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001930 if (!iommu)
1931 return -ENODEV;
1932
Alex Williamson579305f2014-07-03 09:51:43 -06001933 if (!dev_is_pci(dev))
1934 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001935 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001936
1937 data.domain = domain;
1938 data.iommu = iommu;
1939 data.translation = translation;
1940
1941 return pci_for_each_dma_alias(to_pci_dev(dev),
1942 &domain_context_mapping_cb, &data);
1943}
1944
1945static int domain_context_mapped_cb(struct pci_dev *pdev,
1946 u16 alias, void *opaque)
1947{
1948 struct intel_iommu *iommu = opaque;
1949
1950 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001951}
1952
David Woodhousee1f167f2014-03-09 15:24:46 -07001953static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954{
Weidong Han5331fe62008-12-08 23:00:00 +08001955 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001956 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001957
David Woodhousee1f167f2014-03-09 15:24:46 -07001958 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001959 if (!iommu)
1960 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001961
Alex Williamson579305f2014-07-03 09:51:43 -06001962 if (!dev_is_pci(dev))
1963 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001964
Alex Williamson579305f2014-07-03 09:51:43 -06001965 return !pci_for_each_dma_alias(to_pci_dev(dev),
1966 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001967}
1968
Fenghua Yuf5329592009-08-04 15:09:37 -07001969/* Returns a number of VTD pages, but aligned to MM page size */
1970static inline unsigned long aligned_nrpages(unsigned long host_addr,
1971 size_t size)
1972{
1973 host_addr &= ~PAGE_MASK;
1974 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1975}
1976
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001977/* Return largest possible superpage level for a given mapping */
1978static inline int hardware_largepage_caps(struct dmar_domain *domain,
1979 unsigned long iov_pfn,
1980 unsigned long phy_pfn,
1981 unsigned long pages)
1982{
1983 int support, level = 1;
1984 unsigned long pfnmerge;
1985
1986 support = domain->iommu_superpage;
1987
1988 /* To use a large page, the virtual *and* physical addresses
1989 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1990 of them will mean we have to use smaller pages. So just
1991 merge them and check both at once. */
1992 pfnmerge = iov_pfn | phy_pfn;
1993
1994 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1995 pages >>= VTD_STRIDE_SHIFT;
1996 if (!pages)
1997 break;
1998 pfnmerge >>= VTD_STRIDE_SHIFT;
1999 level++;
2000 support--;
2001 }
2002 return level;
2003}
2004
David Woodhouse9051aa02009-06-29 12:30:54 +01002005static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2006 struct scatterlist *sg, unsigned long phys_pfn,
2007 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002008{
2009 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002010 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002011 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002012 unsigned int largepage_lvl = 0;
2013 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002014
Jiang Liu162d1b12014-07-11 14:19:35 +08002015 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002016
2017 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2018 return -EINVAL;
2019
2020 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2021
Jiang Liucc4f14a2014-11-26 09:42:10 +08002022 if (!sg) {
2023 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002024 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2025 }
2026
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002027 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002028 uint64_t tmp;
2029
David Woodhousee1605492009-06-29 11:17:38 +01002030 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002031 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002032 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2033 sg->dma_length = sg->length;
2034 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002035 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002036 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002037
David Woodhousee1605492009-06-29 11:17:38 +01002038 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002039 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2040
David Woodhouse5cf0a762014-03-19 16:07:49 +00002041 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002042 if (!pte)
2043 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002044 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002045 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002046 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002047 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2048 /*
2049 * Ensure that old small page tables are
2050 * removed to make room for superpage,
2051 * if they exist.
2052 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002053 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002054 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002055 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002056 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002057 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002058
David Woodhousee1605492009-06-29 11:17:38 +01002059 }
2060 /* We don't need lock here, nobody else
2061 * touches the iova range
2062 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002063 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002064 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002065 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002066 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2067 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002068 if (dumps) {
2069 dumps--;
2070 debug_dma_dump_mappings(NULL);
2071 }
2072 WARN_ON(1);
2073 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002074
2075 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2076
2077 BUG_ON(nr_pages < lvl_pages);
2078 BUG_ON(sg_res < lvl_pages);
2079
2080 nr_pages -= lvl_pages;
2081 iov_pfn += lvl_pages;
2082 phys_pfn += lvl_pages;
2083 pteval += lvl_pages * VTD_PAGE_SIZE;
2084 sg_res -= lvl_pages;
2085
2086 /* If the next PTE would be the first in a new page, then we
2087 need to flush the cache on the entries we've just written.
2088 And then we'll need to recalculate 'pte', so clear it and
2089 let it get set again in the if (!pte) block above.
2090
2091 If we're done (!nr_pages) we need to flush the cache too.
2092
2093 Also if we've been setting superpages, we may need to
2094 recalculate 'pte' and switch back to smaller pages for the
2095 end of the mapping, if the trailing size is not enough to
2096 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002097 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002098 if (!nr_pages || first_pte_in_page(pte) ||
2099 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002100 domain_flush_cache(domain, first_pte,
2101 (void *)pte - (void *)first_pte);
2102 pte = NULL;
2103 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002104
2105 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002106 sg = sg_next(sg);
2107 }
2108 return 0;
2109}
2110
David Woodhouse9051aa02009-06-29 12:30:54 +01002111static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2112 struct scatterlist *sg, unsigned long nr_pages,
2113 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002114{
David Woodhouse9051aa02009-06-29 12:30:54 +01002115 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2116}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002117
David Woodhouse9051aa02009-06-29 12:30:54 +01002118static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2119 unsigned long phys_pfn, unsigned long nr_pages,
2120 int prot)
2121{
2122 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123}
2124
Weidong Hanc7151a82008-12-08 22:51:37 +08002125static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002126{
Weidong Hanc7151a82008-12-08 22:51:37 +08002127 if (!iommu)
2128 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002129
2130 clear_context_table(iommu, bus, devfn);
2131 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002132 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002133 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002134}
2135
David Woodhouse109b9b02012-05-25 17:43:02 +01002136static inline void unlink_domain_info(struct device_domain_info *info)
2137{
2138 assert_spin_locked(&device_domain_lock);
2139 list_del(&info->link);
2140 list_del(&info->global);
2141 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002142 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002143}
2144
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145static void domain_remove_dev_info(struct dmar_domain *domain)
2146{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002147 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002148 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149
2150 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002151 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002152 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002153 spin_unlock_irqrestore(&device_domain_lock, flags);
2154
Yu Zhao93a23a72009-05-18 13:51:37 +08002155 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002156 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002157
Jiang Liuab8dfe22014-07-11 14:19:27 +08002158 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002159 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002160 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002161 }
2162
2163 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002164 spin_lock_irqsave(&device_domain_lock, flags);
2165 }
2166 spin_unlock_irqrestore(&device_domain_lock, flags);
2167}
2168
2169/*
2170 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002171 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172 */
David Woodhouse1525a292014-03-06 16:19:30 +00002173static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002174{
2175 struct device_domain_info *info;
2176
2177 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002178 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002179 if (info)
2180 return info->domain;
2181 return NULL;
2182}
2183
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002184static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002185dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2186{
2187 struct device_domain_info *info;
2188
2189 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002190 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002191 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002192 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002193
2194 return NULL;
2195}
2196
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002197static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002198 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002199 struct device *dev,
2200 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002201{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002202 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002203 struct device_domain_info *info;
2204 unsigned long flags;
2205
2206 info = alloc_devinfo_mem();
2207 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002208 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002209
Jiang Liu745f2582014-02-19 14:07:26 +08002210 info->bus = bus;
2211 info->devfn = devfn;
2212 info->dev = dev;
2213 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002214 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002215
2216 spin_lock_irqsave(&device_domain_lock, flags);
2217 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002218 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002219 else {
2220 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002221 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002222 if (info2)
2223 found = info2->domain;
2224 }
Jiang Liu745f2582014-02-19 14:07:26 +08002225 if (found) {
2226 spin_unlock_irqrestore(&device_domain_lock, flags);
2227 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002228 /* Caller must free the original domain */
2229 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002230 }
2231
David Woodhouseb718cd32014-03-09 13:11:33 -07002232 list_add(&info->link, &domain->devices);
2233 list_add(&info->global, &device_domain_list);
2234 if (dev)
2235 dev->archdata.iommu = info;
2236 spin_unlock_irqrestore(&device_domain_lock, flags);
2237
2238 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002239}
2240
Alex Williamson579305f2014-07-03 09:51:43 -06002241static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2242{
2243 *(u16 *)opaque = alias;
2244 return 0;
2245}
2246
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002247/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002248static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002249{
Alex Williamson579305f2014-07-03 09:51:43 -06002250 struct dmar_domain *domain, *tmp;
2251 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002252 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002253 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002254 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002255 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002256
David Woodhouse146922e2014-03-09 15:44:17 -07002257 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002258 if (domain)
2259 return domain;
2260
David Woodhouse146922e2014-03-09 15:44:17 -07002261 iommu = device_to_iommu(dev, &bus, &devfn);
2262 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002263 return NULL;
2264
2265 if (dev_is_pci(dev)) {
2266 struct pci_dev *pdev = to_pci_dev(dev);
2267
2268 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2269
2270 spin_lock_irqsave(&device_domain_lock, flags);
2271 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2272 PCI_BUS_NUM(dma_alias),
2273 dma_alias & 0xff);
2274 if (info) {
2275 iommu = info->iommu;
2276 domain = info->domain;
2277 }
2278 spin_unlock_irqrestore(&device_domain_lock, flags);
2279
2280 /* DMA alias already has a domain, uses it */
2281 if (info)
2282 goto found_domain;
2283 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002284
David Woodhouse146922e2014-03-09 15:44:17 -07002285 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002286 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002287 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002288 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002289 domain->id = iommu_attach_domain(domain, iommu);
2290 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002291 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002292 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002293 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002294 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002295 if (domain_init(domain, gaw)) {
2296 domain_exit(domain);
2297 return NULL;
2298 }
2299
2300 /* register PCI DMA alias device */
2301 if (dev_is_pci(dev)) {
2302 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2303 dma_alias & 0xff, NULL, domain);
2304
2305 if (!tmp || tmp != domain) {
2306 domain_exit(domain);
2307 domain = tmp;
2308 }
2309
David Woodhouseb718cd32014-03-09 13:11:33 -07002310 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002311 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312 }
2313
2314found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002315 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2316
2317 if (!tmp || tmp != domain) {
2318 domain_exit(domain);
2319 domain = tmp;
2320 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002321
2322 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002323}
2324
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002325static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002326#define IDENTMAP_ALL 1
2327#define IDENTMAP_GFX 2
2328#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002329
David Woodhouseb2132032009-06-26 18:50:28 +01002330static int iommu_domain_identity_map(struct dmar_domain *domain,
2331 unsigned long long start,
2332 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002333{
David Woodhousec5395d52009-06-28 16:35:56 +01002334 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2335 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002336
David Woodhousec5395d52009-06-28 16:35:56 +01002337 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2338 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002339 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002340 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002341 }
2342
David Woodhousec5395d52009-06-28 16:35:56 +01002343 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2344 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002345 /*
2346 * RMRR range might have overlap with physical memory range,
2347 * clear it first
2348 */
David Woodhousec5395d52009-06-28 16:35:56 +01002349 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002350
David Woodhousec5395d52009-06-28 16:35:56 +01002351 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2352 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002353 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002354}
2355
David Woodhouse0b9d9752014-03-09 15:48:15 -07002356static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002357 unsigned long long start,
2358 unsigned long long end)
2359{
2360 struct dmar_domain *domain;
2361 int ret;
2362
David Woodhouse0b9d9752014-03-09 15:48:15 -07002363 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002364 if (!domain)
2365 return -ENOMEM;
2366
David Woodhouse19943b02009-08-04 16:19:20 +01002367 /* For _hardware_ passthrough, don't bother. But for software
2368 passthrough, we do it anyway -- it may indicate a memory
2369 range which is reserved in E820, so which didn't get set
2370 up to start with in si_domain */
2371 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002372 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2373 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002374 return 0;
2375 }
2376
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002377 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2378 dev_name(dev), start, end);
2379
David Woodhouse5595b522009-12-02 09:21:55 +00002380 if (end < start) {
2381 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2382 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2383 dmi_get_system_info(DMI_BIOS_VENDOR),
2384 dmi_get_system_info(DMI_BIOS_VERSION),
2385 dmi_get_system_info(DMI_PRODUCT_VERSION));
2386 ret = -EIO;
2387 goto error;
2388 }
2389
David Woodhouse2ff729f2009-08-26 14:25:41 +01002390 if (end >> agaw_to_width(domain->agaw)) {
2391 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2392 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2393 agaw_to_width(domain->agaw),
2394 dmi_get_system_info(DMI_BIOS_VENDOR),
2395 dmi_get_system_info(DMI_BIOS_VERSION),
2396 dmi_get_system_info(DMI_PRODUCT_VERSION));
2397 ret = -EIO;
2398 goto error;
2399 }
David Woodhouse19943b02009-08-04 16:19:20 +01002400
David Woodhouseb2132032009-06-26 18:50:28 +01002401 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002402 if (ret)
2403 goto error;
2404
2405 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002406 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002407 if (ret)
2408 goto error;
2409
2410 return 0;
2411
2412 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002413 domain_exit(domain);
2414 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002415}
2416
2417static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002418 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002419{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002420 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002421 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002422 return iommu_prepare_identity_map(dev, rmrr->base_address,
2423 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002424}
2425
Suresh Siddhad3f13812011-08-23 17:05:25 -07002426#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002427static inline void iommu_prepare_isa(void)
2428{
2429 struct pci_dev *pdev;
2430 int ret;
2431
2432 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2433 if (!pdev)
2434 return;
2435
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002436 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002437 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002438
2439 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002440 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002441
Yijing Wang9b27e822014-05-20 20:37:52 +08002442 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002443}
2444#else
2445static inline void iommu_prepare_isa(void)
2446{
2447 return;
2448}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002449#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002450
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002451static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002452
Matt Kraai071e1372009-08-23 22:30:22 -07002453static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002454{
2455 struct dmar_drhd_unit *drhd;
2456 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002457 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002458 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002459
Jiang Liuab8dfe22014-07-11 14:19:27 +08002460 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002461 if (!si_domain)
2462 return -EFAULT;
2463
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002464 for_each_active_iommu(iommu, drhd) {
2465 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002466 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002467 domain_exit(si_domain);
2468 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002469 } else if (first) {
2470 si_domain->id = ret;
2471 first = false;
2472 } else if (si_domain->id != ret) {
2473 domain_exit(si_domain);
2474 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002475 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002476 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002477 }
2478
2479 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2480 domain_exit(si_domain);
2481 return -EFAULT;
2482 }
2483
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002484 pr_debug("Identity mapping domain is domain %d\n",
Jiang Liu9544c002014-01-06 14:18:13 +08002485 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002486
David Woodhouse19943b02009-08-04 16:19:20 +01002487 if (hw)
2488 return 0;
2489
David Woodhousec7ab48d2009-06-26 19:10:36 +01002490 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002491 unsigned long start_pfn, end_pfn;
2492 int i;
2493
2494 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2495 ret = iommu_domain_identity_map(si_domain,
2496 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2497 if (ret)
2498 return ret;
2499 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002500 }
2501
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002502 return 0;
2503}
2504
David Woodhouse9b226622014-03-09 14:03:28 -07002505static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002506{
2507 struct device_domain_info *info;
2508
2509 if (likely(!iommu_identity_mapping))
2510 return 0;
2511
David Woodhouse9b226622014-03-09 14:03:28 -07002512 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002513 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2514 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002515
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002516 return 0;
2517}
2518
2519static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002520 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002521{
David Woodhouse0ac72662014-03-09 13:19:22 -07002522 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002523 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002524 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002525 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002526
David Woodhouse5913c9b2014-03-09 16:27:31 -07002527 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002528 if (!iommu)
2529 return -ENODEV;
2530
David Woodhouse5913c9b2014-03-09 16:27:31 -07002531 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002532 if (ndomain != domain)
2533 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002534
David Woodhouse5913c9b2014-03-09 16:27:31 -07002535 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002536 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002537 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002538 return ret;
2539 }
2540
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002541 return 0;
2542}
2543
David Woodhouse0b9d9752014-03-09 15:48:15 -07002544static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002545{
2546 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002547 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002548 int i;
2549
Jiang Liu0e242612014-02-19 14:07:34 +08002550 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002551 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002552 /*
2553 * Return TRUE if this RMRR contains the device that
2554 * is passed in.
2555 */
2556 for_each_active_dev_scope(rmrr->devices,
2557 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002558 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002559 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002560 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002561 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002562 }
Jiang Liu0e242612014-02-19 14:07:34 +08002563 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002564 return false;
2565}
2566
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002567/*
2568 * There are a couple cases where we need to restrict the functionality of
2569 * devices associated with RMRRs. The first is when evaluating a device for
2570 * identity mapping because problems exist when devices are moved in and out
2571 * of domains and their respective RMRR information is lost. This means that
2572 * a device with associated RMRRs will never be in a "passthrough" domain.
2573 * The second is use of the device through the IOMMU API. This interface
2574 * expects to have full control of the IOVA space for the device. We cannot
2575 * satisfy both the requirement that RMRR access is maintained and have an
2576 * unencumbered IOVA space. We also have no ability to quiesce the device's
2577 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2578 * We therefore prevent devices associated with an RMRR from participating in
2579 * the IOMMU API, which eliminates them from device assignment.
2580 *
2581 * In both cases we assume that PCI USB devices with RMRRs have them largely
2582 * for historical reasons and that the RMRR space is not actively used post
2583 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002584 *
2585 * The same exception is made for graphics devices, with the requirement that
2586 * any use of the RMRR regions will be torn down before assigning the device
2587 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002588 */
2589static bool device_is_rmrr_locked(struct device *dev)
2590{
2591 if (!device_has_rmrr(dev))
2592 return false;
2593
2594 if (dev_is_pci(dev)) {
2595 struct pci_dev *pdev = to_pci_dev(dev);
2596
David Woodhouse18436af2015-03-25 15:05:47 +00002597 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002598 return false;
2599 }
2600
2601 return true;
2602}
2603
David Woodhouse3bdb2592014-03-09 16:03:08 -07002604static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002605{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002606
David Woodhouse3bdb2592014-03-09 16:03:08 -07002607 if (dev_is_pci(dev)) {
2608 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002609
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002610 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002611 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002612
David Woodhouse3bdb2592014-03-09 16:03:08 -07002613 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2614 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002615
David Woodhouse3bdb2592014-03-09 16:03:08 -07002616 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2617 return 1;
2618
2619 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2620 return 0;
2621
2622 /*
2623 * We want to start off with all devices in the 1:1 domain, and
2624 * take them out later if we find they can't access all of memory.
2625 *
2626 * However, we can't do this for PCI devices behind bridges,
2627 * because all PCI devices behind the same bridge will end up
2628 * with the same source-id on their transactions.
2629 *
2630 * Practically speaking, we can't change things around for these
2631 * devices at run-time, because we can't be sure there'll be no
2632 * DMA transactions in flight for any of their siblings.
2633 *
2634 * So PCI devices (unless they're on the root bus) as well as
2635 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2636 * the 1:1 domain, just in _case_ one of their siblings turns out
2637 * not to be able to map all of memory.
2638 */
2639 if (!pci_is_pcie(pdev)) {
2640 if (!pci_is_root_bus(pdev->bus))
2641 return 0;
2642 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2643 return 0;
2644 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2645 return 0;
2646 } else {
2647 if (device_has_rmrr(dev))
2648 return 0;
2649 }
David Woodhouse6941af22009-07-04 18:24:27 +01002650
David Woodhouse3dfc8132009-07-04 19:11:08 +01002651 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002652 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002653 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002654 * take them out of the 1:1 domain later.
2655 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002656 if (!startup) {
2657 /*
2658 * If the device's dma_mask is less than the system's memory
2659 * size then this is not a candidate for identity mapping.
2660 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002661 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002662
David Woodhouse3bdb2592014-03-09 16:03:08 -07002663 if (dev->coherent_dma_mask &&
2664 dev->coherent_dma_mask < dma_mask)
2665 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002666
David Woodhouse3bdb2592014-03-09 16:03:08 -07002667 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002668 }
David Woodhouse6941af22009-07-04 18:24:27 +01002669
2670 return 1;
2671}
2672
David Woodhousecf04eee2014-03-21 16:49:04 +00002673static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2674{
2675 int ret;
2676
2677 if (!iommu_should_identity_map(dev, 1))
2678 return 0;
2679
2680 ret = domain_add_dev_info(si_domain, dev,
2681 hw ? CONTEXT_TT_PASS_THROUGH :
2682 CONTEXT_TT_MULTI_LEVEL);
2683 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002684 pr_info("%s identity mapping for device %s\n",
2685 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002686 else if (ret == -ENODEV)
2687 /* device not associated with an iommu */
2688 ret = 0;
2689
2690 return ret;
2691}
2692
2693
Matt Kraai071e1372009-08-23 22:30:22 -07002694static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002695{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002696 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002697 struct dmar_drhd_unit *drhd;
2698 struct intel_iommu *iommu;
2699 struct device *dev;
2700 int i;
2701 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002702
David Woodhouse19943b02009-08-04 16:19:20 +01002703 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002704 if (ret)
2705 return -EFAULT;
2706
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002707 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002708 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2709 if (ret)
2710 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002711 }
2712
David Woodhousecf04eee2014-03-21 16:49:04 +00002713 for_each_active_iommu(iommu, drhd)
2714 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2715 struct acpi_device_physical_node *pn;
2716 struct acpi_device *adev;
2717
2718 if (dev->bus != &acpi_bus_type)
2719 continue;
2720
2721 adev= to_acpi_device(dev);
2722 mutex_lock(&adev->physical_node_lock);
2723 list_for_each_entry(pn, &adev->physical_node_list, node) {
2724 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2725 if (ret)
2726 break;
2727 }
2728 mutex_unlock(&adev->physical_node_lock);
2729 if (ret)
2730 return ret;
2731 }
2732
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002733 return 0;
2734}
2735
Jiang Liuffebeb42014-11-09 22:48:02 +08002736static void intel_iommu_init_qi(struct intel_iommu *iommu)
2737{
2738 /*
2739 * Start from the sane iommu hardware state.
2740 * If the queued invalidation is already initialized by us
2741 * (for example, while enabling interrupt-remapping) then
2742 * we got the things already rolling from a sane state.
2743 */
2744 if (!iommu->qi) {
2745 /*
2746 * Clear any previous faults.
2747 */
2748 dmar_fault(-1, iommu);
2749 /*
2750 * Disable queued invalidation if supported and already enabled
2751 * before OS handover.
2752 */
2753 dmar_disable_qi(iommu);
2754 }
2755
2756 if (dmar_enable_qi(iommu)) {
2757 /*
2758 * Queued Invalidate not enabled, use Register Based Invalidate
2759 */
2760 iommu->flush.flush_context = __iommu_flush_context;
2761 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002762 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002763 iommu->name);
2764 } else {
2765 iommu->flush.flush_context = qi_flush_context;
2766 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002767 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002768 }
2769}
2770
Joseph Cihulab7792602011-05-03 00:08:37 -07002771static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002772{
2773 struct dmar_drhd_unit *drhd;
2774 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002775 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002776 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002777 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002778
2779 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002780 * for each drhd
2781 * allocate root
2782 * initialize and program root entry to not present
2783 * endfor
2784 */
2785 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002786 /*
2787 * lock not needed as this is only incremented in the single
2788 * threaded kernel __init code path all other access are read
2789 * only
2790 */
Jiang Liu78d8e702014-11-09 22:47:57 +08002791 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08002792 g_num_of_iommus++;
2793 continue;
2794 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002795 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002796 }
2797
Jiang Liuffebeb42014-11-09 22:48:02 +08002798 /* Preallocate enough resources for IOMMU hot-addition */
2799 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2800 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2801
Weidong Hand9630fe2008-12-08 11:06:32 +08002802 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2803 GFP_KERNEL);
2804 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002805 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08002806 ret = -ENOMEM;
2807 goto error;
2808 }
2809
mark gross80b20dd2008-04-18 13:53:58 -07002810 deferred_flush = kzalloc(g_num_of_iommus *
2811 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2812 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002813 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002814 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002815 }
2816
Jiang Liu7c919772014-01-06 14:18:18 +08002817 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002818 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002819
Joerg Roedelb63d80d2015-06-12 09:14:34 +02002820 intel_iommu_init_qi(iommu);
2821
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002822 ret = iommu_init_domains(iommu);
2823 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002824 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002825
Joerg Roedel4158c2e2015-06-12 10:14:02 +02002826 init_translation_status(iommu);
2827
2828 if (translation_pre_enabled(iommu))
2829 pr_info("Translation already enabled - trying to copy translation structures\n");
2830
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002831 /*
2832 * TBD:
2833 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002834 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002835 */
2836 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002837 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002838 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02002839
2840 iommu_flush_write_buffer(iommu);
2841 iommu_set_root_entry(iommu);
2842 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2843 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2844
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002845 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002846 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002847 }
2848
David Woodhouse19943b02009-08-04 16:19:20 +01002849 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002850 iommu_identity_mapping |= IDENTMAP_ALL;
2851
Suresh Siddhad3f13812011-08-23 17:05:25 -07002852#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002853 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002854#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002855
2856 check_tylersburg_isoch();
2857
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002858 /*
2859 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002860 * identity mappings for rmrr, gfx, and isa and may fall back to static
2861 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002862 */
David Woodhouse19943b02009-08-04 16:19:20 +01002863 if (iommu_identity_mapping) {
2864 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2865 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002866 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002867 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002868 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002869 }
David Woodhouse19943b02009-08-04 16:19:20 +01002870 /*
2871 * For each rmrr
2872 * for each dev attached to rmrr
2873 * do
2874 * locate drhd for dev, alloc domain for dev
2875 * allocate free domain
2876 * allocate page table entries for rmrr
2877 * if context not allocated for bus
2878 * allocate and init context
2879 * set present in root table for this bus
2880 * init context with domain, translation etc
2881 * endfor
2882 * endfor
2883 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002884 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01002885 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002886 /* some BIOS lists non-exist devices in DMAR table. */
2887 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002888 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002889 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002890 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002891 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01002892 }
2893 }
2894
2895 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002896
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002897 /*
2898 * for each drhd
2899 * enable fault log
2900 * global invalidate context cache
2901 * global invalidate iotlb
2902 * enable translation
2903 */
Jiang Liu7c919772014-01-06 14:18:18 +08002904 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002905 if (drhd->ignored) {
2906 /*
2907 * we always have to disable PMRs or DMA may fail on
2908 * this device
2909 */
2910 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002911 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002912 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002913 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002914
2915 iommu_flush_write_buffer(iommu);
2916
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002917 ret = dmar_set_interrupt(iommu);
2918 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002919 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002920
Jiang Liu2a41cce2014-07-11 14:19:33 +08002921 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07002922 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002923 }
2924
2925 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002926
2927free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08002928 for_each_active_iommu(iommu, drhd) {
2929 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08002930 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08002931 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08002932 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002933free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002934 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002935error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002936 return ret;
2937}
2938
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002939/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002940static struct iova *intel_alloc_iova(struct device *dev,
2941 struct dmar_domain *domain,
2942 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002943{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002944 struct iova *iova = NULL;
2945
David Woodhouse875764d2009-06-28 21:20:51 +01002946 /* Restrict dma_mask to the width that the iommu can handle */
2947 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2948
2949 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002950 /*
2951 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002952 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002953 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002954 */
David Woodhouse875764d2009-06-28 21:20:51 +01002955 iova = alloc_iova(&domain->iovad, nrpages,
2956 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2957 if (iova)
2958 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002959 }
David Woodhouse875764d2009-06-28 21:20:51 +01002960 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2961 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002962 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002963 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002964 return NULL;
2965 }
2966
2967 return iova;
2968}
2969
David Woodhoused4b709f2014-03-09 16:07:40 -07002970static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971{
2972 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002973 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002974
David Woodhoused4b709f2014-03-09 16:07:40 -07002975 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002976 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002977 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07002978 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002979 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002980 }
2981
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002982 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002983 if (unlikely(!domain_context_mapped(dev))) {
2984 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002985 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002986 pr_err("Domain context map for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07002987 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002988 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002989 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002990 }
2991
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002992 return domain;
2993}
2994
David Woodhoused4b709f2014-03-09 16:07:40 -07002995static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002996{
2997 struct device_domain_info *info;
2998
2999 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003000 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003001 if (likely(info))
3002 return info->domain;
3003
3004 return __get_valid_domain_for_dev(dev);
3005}
3006
David Woodhouseecb509e2014-03-09 16:29:55 -07003007/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003008static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003009{
3010 int found;
3011
David Woodhouse3d891942014-03-06 15:59:26 +00003012 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003013 return 1;
3014
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003015 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003016 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003017
David Woodhouse9b226622014-03-09 14:03:28 -07003018 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003019 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003020 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003021 return 1;
3022 else {
3023 /*
3024 * 32 bit DMA is removed from si_domain and fall back
3025 * to non-identity mapping.
3026 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003027 domain_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003028 pr_info("32bit %s uses non-identity mapping\n",
3029 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003030 return 0;
3031 }
3032 } else {
3033 /*
3034 * In case of a detached 64 bit DMA device from vm, the device
3035 * is put into si_domain for identity mapping.
3036 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003037 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003038 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003039 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003040 hw_pass_through ?
3041 CONTEXT_TT_PASS_THROUGH :
3042 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003043 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003044 pr_info("64bit %s uses identity mapping\n",
3045 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003046 return 1;
3047 }
3048 }
3049 }
3050
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003051 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003052}
3053
David Woodhouse5040a912014-03-09 16:14:00 -07003054static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003055 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003056{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003057 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003058 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003059 struct iova *iova;
3060 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003061 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003062 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003063 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003064
3065 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003066
David Woodhouse5040a912014-03-09 16:14:00 -07003067 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003068 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003069
David Woodhouse5040a912014-03-09 16:14:00 -07003070 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003071 if (!domain)
3072 return 0;
3073
Weidong Han8c11e792008-12-08 15:29:22 +08003074 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003075 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003076
David Woodhouse5040a912014-03-09 16:14:00 -07003077 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003078 if (!iova)
3079 goto error;
3080
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003081 /*
3082 * Check if DMAR supports zero-length reads on write only
3083 * mappings..
3084 */
3085 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003086 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003087 prot |= DMA_PTE_READ;
3088 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3089 prot |= DMA_PTE_WRITE;
3090 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003091 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003092 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003093 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003094 * is not a big problem
3095 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003096 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003097 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003098 if (ret)
3099 goto error;
3100
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003101 /* it's a non-present to present mapping. Only flush if caching mode */
3102 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003103 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003104 else
Weidong Han8c11e792008-12-08 15:29:22 +08003105 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003106
David Woodhouse03d6a242009-06-28 15:33:46 +01003107 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3108 start_paddr += paddr & ~PAGE_MASK;
3109 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003110
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003111error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003112 if (iova)
3113 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003114 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003115 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003116 return 0;
3117}
3118
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003119static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3120 unsigned long offset, size_t size,
3121 enum dma_data_direction dir,
3122 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003123{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003124 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003125 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003126}
3127
mark gross5e0d2a62008-03-04 15:22:08 -08003128static void flush_unmaps(void)
3129{
mark gross80b20dd2008-04-18 13:53:58 -07003130 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003131
mark gross5e0d2a62008-03-04 15:22:08 -08003132 timer_on = 0;
3133
3134 /* just flush them all */
3135 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003136 struct intel_iommu *iommu = g_iommus[i];
3137 if (!iommu)
3138 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003139
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003140 if (!deferred_flush[i].next)
3141 continue;
3142
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003143 /* In caching mode, global flushes turn emulation expensive */
3144 if (!cap_caching_mode(iommu->cap))
3145 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003146 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003147 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003148 unsigned long mask;
3149 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003150 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003151
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003152 /* On real hardware multiple invalidations are expensive */
3153 if (cap_caching_mode(iommu->cap))
3154 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003155 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003156 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003157 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003158 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003159 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3160 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3161 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003162 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003163 if (deferred_flush[i].freelist[j])
3164 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003165 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003166 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003167 }
3168
mark gross5e0d2a62008-03-04 15:22:08 -08003169 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003170}
3171
3172static void flush_unmaps_timeout(unsigned long data)
3173{
mark gross80b20dd2008-04-18 13:53:58 -07003174 unsigned long flags;
3175
3176 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003177 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003178 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003179}
3180
David Woodhouseea8ea462014-03-05 17:09:32 +00003181static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003182{
3183 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003184 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003185 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003186
3187 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003188 if (list_size == HIGH_WATER_MARK)
3189 flush_unmaps();
3190
Weidong Han8c11e792008-12-08 15:29:22 +08003191 iommu = domain_get_iommu(dom);
3192 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003193
mark gross80b20dd2008-04-18 13:53:58 -07003194 next = deferred_flush[iommu_id].next;
3195 deferred_flush[iommu_id].domain[next] = dom;
3196 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003197 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003198 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003199
3200 if (!timer_on) {
3201 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3202 timer_on = 1;
3203 }
3204 list_size++;
3205 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3206}
3207
Jiang Liud41a4ad2014-07-11 14:19:34 +08003208static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003209{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003210 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003211 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003212 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003213 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003214 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003215
David Woodhouse73676832009-07-04 14:08:36 +01003216 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003217 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003218
David Woodhouse1525a292014-03-06 16:19:30 +00003219 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003220 BUG_ON(!domain);
3221
Weidong Han8c11e792008-12-08 15:29:22 +08003222 iommu = domain_get_iommu(domain);
3223
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003224 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003225 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3226 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003227 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003228
David Woodhoused794dc92009-06-28 00:27:49 +01003229 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3230 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003231
David Woodhoused794dc92009-06-28 00:27:49 +01003232 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003233 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003234
David Woodhouseea8ea462014-03-05 17:09:32 +00003235 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003236
mark gross5e0d2a62008-03-04 15:22:08 -08003237 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003238 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003239 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003240 /* free iova */
3241 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003242 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003243 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003244 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003245 /*
3246 * queue up the release of the unmap to save the 1/6th of the
3247 * cpu used up by the iotlb flush operation...
3248 */
mark gross5e0d2a62008-03-04 15:22:08 -08003249 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250}
3251
Jiang Liud41a4ad2014-07-11 14:19:34 +08003252static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3253 size_t size, enum dma_data_direction dir,
3254 struct dma_attrs *attrs)
3255{
3256 intel_unmap(dev, dev_addr);
3257}
3258
David Woodhouse5040a912014-03-09 16:14:00 -07003259static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003260 dma_addr_t *dma_handle, gfp_t flags,
3261 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262{
Akinobu Mita36746432014-06-04 16:06:51 -07003263 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003264 int order;
3265
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003266 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003267 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003268
David Woodhouse5040a912014-03-09 16:14:00 -07003269 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003270 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003271 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3272 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003273 flags |= GFP_DMA;
3274 else
3275 flags |= GFP_DMA32;
3276 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003277
Akinobu Mita36746432014-06-04 16:06:51 -07003278 if (flags & __GFP_WAIT) {
3279 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280
Akinobu Mita36746432014-06-04 16:06:51 -07003281 page = dma_alloc_from_contiguous(dev, count, order);
3282 if (page && iommu_no_mapping(dev) &&
3283 page_to_phys(page) + size > dev->coherent_dma_mask) {
3284 dma_release_from_contiguous(dev, page, count);
3285 page = NULL;
3286 }
3287 }
3288
3289 if (!page)
3290 page = alloc_pages(flags, order);
3291 if (!page)
3292 return NULL;
3293 memset(page_address(page), 0, size);
3294
3295 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003296 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003297 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003299 return page_address(page);
3300 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3301 __free_pages(page, order);
3302
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003303 return NULL;
3304}
3305
David Woodhouse5040a912014-03-09 16:14:00 -07003306static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003307 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308{
3309 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003310 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003311
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003312 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313 order = get_order(size);
3314
Jiang Liud41a4ad2014-07-11 14:19:34 +08003315 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003316 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3317 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003318}
3319
David Woodhouse5040a912014-03-09 16:14:00 -07003320static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003321 int nelems, enum dma_data_direction dir,
3322 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003324 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003325}
3326
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003327static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003328 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003329{
3330 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003331 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003332
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003333 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003334 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003335 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003336 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003337 }
3338 return nelems;
3339}
3340
David Woodhouse5040a912014-03-09 16:14:00 -07003341static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003342 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003343{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003344 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003345 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003346 size_t size = 0;
3347 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003348 struct iova *iova = NULL;
3349 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003350 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003351 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003352 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003353
3354 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003355 if (iommu_no_mapping(dev))
3356 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003357
David Woodhouse5040a912014-03-09 16:14:00 -07003358 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003359 if (!domain)
3360 return 0;
3361
Weidong Han8c11e792008-12-08 15:29:22 +08003362 iommu = domain_get_iommu(domain);
3363
David Woodhouseb536d242009-06-28 14:49:31 +01003364 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003365 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003366
David Woodhouse5040a912014-03-09 16:14:00 -07003367 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3368 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003369 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003370 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003371 return 0;
3372 }
3373
3374 /*
3375 * Check if DMAR supports zero-length reads on write only
3376 * mappings..
3377 */
3378 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003379 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003380 prot |= DMA_PTE_READ;
3381 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3382 prot |= DMA_PTE_WRITE;
3383
David Woodhouseb536d242009-06-28 14:49:31 +01003384 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003385
Fenghua Yuf5329592009-08-04 15:09:37 -07003386 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003387 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003388 dma_pte_free_pagetable(domain, start_vpfn,
3389 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003390 __free_iova(&domain->iovad, iova);
3391 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003392 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003393
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003394 /* it's a non-present to present mapping. Only flush if caching mode */
3395 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003396 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003397 else
Weidong Han8c11e792008-12-08 15:29:22 +08003398 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003399
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003400 return nelems;
3401}
3402
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003403static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3404{
3405 return !dma_addr;
3406}
3407
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003408struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003409 .alloc = intel_alloc_coherent,
3410 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003411 .map_sg = intel_map_sg,
3412 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003413 .map_page = intel_map_page,
3414 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003415 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003416};
3417
3418static inline int iommu_domain_cache_init(void)
3419{
3420 int ret = 0;
3421
3422 iommu_domain_cache = kmem_cache_create("iommu_domain",
3423 sizeof(struct dmar_domain),
3424 0,
3425 SLAB_HWCACHE_ALIGN,
3426
3427 NULL);
3428 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003429 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003430 ret = -ENOMEM;
3431 }
3432
3433 return ret;
3434}
3435
3436static inline int iommu_devinfo_cache_init(void)
3437{
3438 int ret = 0;
3439
3440 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3441 sizeof(struct device_domain_info),
3442 0,
3443 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003444 NULL);
3445 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003446 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003447 ret = -ENOMEM;
3448 }
3449
3450 return ret;
3451}
3452
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003453static int __init iommu_init_mempool(void)
3454{
3455 int ret;
3456 ret = iommu_iova_cache_init();
3457 if (ret)
3458 return ret;
3459
3460 ret = iommu_domain_cache_init();
3461 if (ret)
3462 goto domain_error;
3463
3464 ret = iommu_devinfo_cache_init();
3465 if (!ret)
3466 return ret;
3467
3468 kmem_cache_destroy(iommu_domain_cache);
3469domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003470 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003471
3472 return -ENOMEM;
3473}
3474
3475static void __init iommu_exit_mempool(void)
3476{
3477 kmem_cache_destroy(iommu_devinfo_cache);
3478 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003479 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003480}
3481
Dan Williams556ab452010-07-23 15:47:56 -07003482static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3483{
3484 struct dmar_drhd_unit *drhd;
3485 u32 vtbar;
3486 int rc;
3487
3488 /* We know that this device on this chipset has its own IOMMU.
3489 * If we find it under a different IOMMU, then the BIOS is lying
3490 * to us. Hope that the IOMMU for this device is actually
3491 * disabled, and it needs no translation...
3492 */
3493 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3494 if (rc) {
3495 /* "can't" happen */
3496 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3497 return;
3498 }
3499 vtbar &= 0xffff0000;
3500
3501 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3502 drhd = dmar_find_matched_drhd_unit(pdev);
3503 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3504 TAINT_FIRMWARE_WORKAROUND,
3505 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3506 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3507}
3508DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3509
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003510static void __init init_no_remapping_devices(void)
3511{
3512 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003513 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003514 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003515
3516 for_each_drhd_unit(drhd) {
3517 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003518 for_each_active_dev_scope(drhd->devices,
3519 drhd->devices_cnt, i, dev)
3520 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003521 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003522 if (i == drhd->devices_cnt)
3523 drhd->ignored = 1;
3524 }
3525 }
3526
Jiang Liu7c919772014-01-06 14:18:18 +08003527 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003528 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003529 continue;
3530
Jiang Liub683b232014-02-19 14:07:32 +08003531 for_each_active_dev_scope(drhd->devices,
3532 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003533 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003534 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003535 if (i < drhd->devices_cnt)
3536 continue;
3537
David Woodhousec0771df2011-10-14 20:59:46 +01003538 /* This IOMMU has *only* gfx devices. Either bypass it or
3539 set the gfx_mapped flag, as appropriate */
3540 if (dmar_map_gfx) {
3541 intel_iommu_gfx_mapped = 1;
3542 } else {
3543 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003544 for_each_active_dev_scope(drhd->devices,
3545 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003546 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003547 }
3548 }
3549}
3550
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003551#ifdef CONFIG_SUSPEND
3552static int init_iommu_hw(void)
3553{
3554 struct dmar_drhd_unit *drhd;
3555 struct intel_iommu *iommu = NULL;
3556
3557 for_each_active_iommu(iommu, drhd)
3558 if (iommu->qi)
3559 dmar_reenable_qi(iommu);
3560
Joseph Cihulab7792602011-05-03 00:08:37 -07003561 for_each_iommu(iommu, drhd) {
3562 if (drhd->ignored) {
3563 /*
3564 * we always have to disable PMRs or DMA may fail on
3565 * this device
3566 */
3567 if (force_on)
3568 iommu_disable_protect_mem_regions(iommu);
3569 continue;
3570 }
3571
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003572 iommu_flush_write_buffer(iommu);
3573
3574 iommu_set_root_entry(iommu);
3575
3576 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003577 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003578 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3579 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003580 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003581 }
3582
3583 return 0;
3584}
3585
3586static void iommu_flush_all(void)
3587{
3588 struct dmar_drhd_unit *drhd;
3589 struct intel_iommu *iommu;
3590
3591 for_each_active_iommu(iommu, drhd) {
3592 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003593 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003594 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003595 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003596 }
3597}
3598
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003599static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003600{
3601 struct dmar_drhd_unit *drhd;
3602 struct intel_iommu *iommu = NULL;
3603 unsigned long flag;
3604
3605 for_each_active_iommu(iommu, drhd) {
3606 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3607 GFP_ATOMIC);
3608 if (!iommu->iommu_state)
3609 goto nomem;
3610 }
3611
3612 iommu_flush_all();
3613
3614 for_each_active_iommu(iommu, drhd) {
3615 iommu_disable_translation(iommu);
3616
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003617 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003618
3619 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3620 readl(iommu->reg + DMAR_FECTL_REG);
3621 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3622 readl(iommu->reg + DMAR_FEDATA_REG);
3623 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3624 readl(iommu->reg + DMAR_FEADDR_REG);
3625 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3626 readl(iommu->reg + DMAR_FEUADDR_REG);
3627
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003628 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003629 }
3630 return 0;
3631
3632nomem:
3633 for_each_active_iommu(iommu, drhd)
3634 kfree(iommu->iommu_state);
3635
3636 return -ENOMEM;
3637}
3638
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003639static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003640{
3641 struct dmar_drhd_unit *drhd;
3642 struct intel_iommu *iommu = NULL;
3643 unsigned long flag;
3644
3645 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003646 if (force_on)
3647 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3648 else
3649 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003650 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003651 }
3652
3653 for_each_active_iommu(iommu, drhd) {
3654
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003655 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003656
3657 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3658 iommu->reg + DMAR_FECTL_REG);
3659 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3660 iommu->reg + DMAR_FEDATA_REG);
3661 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3662 iommu->reg + DMAR_FEADDR_REG);
3663 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3664 iommu->reg + DMAR_FEUADDR_REG);
3665
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003666 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003667 }
3668
3669 for_each_active_iommu(iommu, drhd)
3670 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003671}
3672
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003673static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003674 .resume = iommu_resume,
3675 .suspend = iommu_suspend,
3676};
3677
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003678static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003679{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003680 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003681}
3682
3683#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003684static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003685#endif /* CONFIG_PM */
3686
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003687
Jiang Liuc2a0b532014-11-09 22:47:56 +08003688int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003689{
3690 struct acpi_dmar_reserved_memory *rmrr;
3691 struct dmar_rmrr_unit *rmrru;
3692
3693 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3694 if (!rmrru)
3695 return -ENOMEM;
3696
3697 rmrru->hdr = header;
3698 rmrr = (struct acpi_dmar_reserved_memory *)header;
3699 rmrru->base_address = rmrr->base_address;
3700 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003701 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3702 ((void *)rmrr) + rmrr->header.length,
3703 &rmrru->devices_cnt);
3704 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3705 kfree(rmrru);
3706 return -ENOMEM;
3707 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003708
Jiang Liu2e455282014-02-19 14:07:36 +08003709 list_add(&rmrru->list, &dmar_rmrr_units);
3710
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003711 return 0;
3712}
3713
Jiang Liu6b197242014-11-09 22:47:58 +08003714static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3715{
3716 struct dmar_atsr_unit *atsru;
3717 struct acpi_dmar_atsr *tmp;
3718
3719 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3720 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3721 if (atsr->segment != tmp->segment)
3722 continue;
3723 if (atsr->header.length != tmp->header.length)
3724 continue;
3725 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3726 return atsru;
3727 }
3728
3729 return NULL;
3730}
3731
3732int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003733{
3734 struct acpi_dmar_atsr *atsr;
3735 struct dmar_atsr_unit *atsru;
3736
Jiang Liu6b197242014-11-09 22:47:58 +08003737 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3738 return 0;
3739
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003740 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08003741 atsru = dmar_find_atsr(atsr);
3742 if (atsru)
3743 return 0;
3744
3745 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003746 if (!atsru)
3747 return -ENOMEM;
3748
Jiang Liu6b197242014-11-09 22:47:58 +08003749 /*
3750 * If memory is allocated from slab by ACPI _DSM method, we need to
3751 * copy the memory content because the memory buffer will be freed
3752 * on return.
3753 */
3754 atsru->hdr = (void *)(atsru + 1);
3755 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003756 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003757 if (!atsru->include_all) {
3758 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3759 (void *)atsr + atsr->header.length,
3760 &atsru->devices_cnt);
3761 if (atsru->devices_cnt && atsru->devices == NULL) {
3762 kfree(atsru);
3763 return -ENOMEM;
3764 }
3765 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003766
Jiang Liu0e242612014-02-19 14:07:34 +08003767 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003768
3769 return 0;
3770}
3771
Jiang Liu9bdc5312014-01-06 14:18:27 +08003772static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3773{
3774 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3775 kfree(atsru);
3776}
3777
Jiang Liu6b197242014-11-09 22:47:58 +08003778int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3779{
3780 struct acpi_dmar_atsr *atsr;
3781 struct dmar_atsr_unit *atsru;
3782
3783 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3784 atsru = dmar_find_atsr(atsr);
3785 if (atsru) {
3786 list_del_rcu(&atsru->list);
3787 synchronize_rcu();
3788 intel_iommu_free_atsr(atsru);
3789 }
3790
3791 return 0;
3792}
3793
3794int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3795{
3796 int i;
3797 struct device *dev;
3798 struct acpi_dmar_atsr *atsr;
3799 struct dmar_atsr_unit *atsru;
3800
3801 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3802 atsru = dmar_find_atsr(atsr);
3803 if (!atsru)
3804 return 0;
3805
3806 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3807 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3808 i, dev)
3809 return -EBUSY;
3810
3811 return 0;
3812}
3813
Jiang Liuffebeb42014-11-09 22:48:02 +08003814static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3815{
3816 int sp, ret = 0;
3817 struct intel_iommu *iommu = dmaru->iommu;
3818
3819 if (g_iommus[iommu->seq_id])
3820 return 0;
3821
3822 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003823 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003824 iommu->name);
3825 return -ENXIO;
3826 }
3827 if (!ecap_sc_support(iommu->ecap) &&
3828 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003829 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003830 iommu->name);
3831 return -ENXIO;
3832 }
3833 sp = domain_update_iommu_superpage(iommu) - 1;
3834 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003835 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003836 iommu->name);
3837 return -ENXIO;
3838 }
3839
3840 /*
3841 * Disable translation if already enabled prior to OS handover.
3842 */
3843 if (iommu->gcmd & DMA_GCMD_TE)
3844 iommu_disable_translation(iommu);
3845
3846 g_iommus[iommu->seq_id] = iommu;
3847 ret = iommu_init_domains(iommu);
3848 if (ret == 0)
3849 ret = iommu_alloc_root_entry(iommu);
3850 if (ret)
3851 goto out;
3852
3853 if (dmaru->ignored) {
3854 /*
3855 * we always have to disable PMRs or DMA may fail on this device
3856 */
3857 if (force_on)
3858 iommu_disable_protect_mem_regions(iommu);
3859 return 0;
3860 }
3861
3862 intel_iommu_init_qi(iommu);
3863 iommu_flush_write_buffer(iommu);
3864 ret = dmar_set_interrupt(iommu);
3865 if (ret)
3866 goto disable_iommu;
3867
3868 iommu_set_root_entry(iommu);
3869 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3870 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3871 iommu_enable_translation(iommu);
3872
3873 if (si_domain) {
3874 ret = iommu_attach_domain(si_domain, iommu);
3875 if (ret < 0 || si_domain->id != ret)
3876 goto disable_iommu;
3877 domain_attach_iommu(si_domain, iommu);
3878 }
3879
3880 iommu_disable_protect_mem_regions(iommu);
3881 return 0;
3882
3883disable_iommu:
3884 disable_dmar_iommu(iommu);
3885out:
3886 free_dmar_iommu(iommu);
3887 return ret;
3888}
3889
Jiang Liu6b197242014-11-09 22:47:58 +08003890int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3891{
Jiang Liuffebeb42014-11-09 22:48:02 +08003892 int ret = 0;
3893 struct intel_iommu *iommu = dmaru->iommu;
3894
3895 if (!intel_iommu_enabled)
3896 return 0;
3897 if (iommu == NULL)
3898 return -EINVAL;
3899
3900 if (insert) {
3901 ret = intel_iommu_add(dmaru);
3902 } else {
3903 disable_dmar_iommu(iommu);
3904 free_dmar_iommu(iommu);
3905 }
3906
3907 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08003908}
3909
Jiang Liu9bdc5312014-01-06 14:18:27 +08003910static void intel_iommu_free_dmars(void)
3911{
3912 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3913 struct dmar_atsr_unit *atsru, *atsr_n;
3914
3915 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3916 list_del(&rmrru->list);
3917 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3918 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003919 }
3920
Jiang Liu9bdc5312014-01-06 14:18:27 +08003921 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3922 list_del(&atsru->list);
3923 intel_iommu_free_atsr(atsru);
3924 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003925}
3926
3927int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3928{
Jiang Liub683b232014-02-19 14:07:32 +08003929 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003930 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003931 struct pci_dev *bridge = NULL;
3932 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003933 struct acpi_dmar_atsr *atsr;
3934 struct dmar_atsr_unit *atsru;
3935
3936 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003937 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003938 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003939 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003940 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003941 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003942 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003943 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003944 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003945 if (!bridge)
3946 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003947
Jiang Liu0e242612014-02-19 14:07:34 +08003948 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003949 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3950 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3951 if (atsr->segment != pci_domain_nr(dev->bus))
3952 continue;
3953
Jiang Liub683b232014-02-19 14:07:32 +08003954 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003955 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003956 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003957
3958 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003959 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003960 }
Jiang Liub683b232014-02-19 14:07:32 +08003961 ret = 0;
3962out:
Jiang Liu0e242612014-02-19 14:07:34 +08003963 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003964
Jiang Liub683b232014-02-19 14:07:32 +08003965 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003966}
3967
Jiang Liu59ce0512014-02-19 14:07:35 +08003968int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3969{
3970 int ret = 0;
3971 struct dmar_rmrr_unit *rmrru;
3972 struct dmar_atsr_unit *atsru;
3973 struct acpi_dmar_atsr *atsr;
3974 struct acpi_dmar_reserved_memory *rmrr;
3975
3976 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3977 return 0;
3978
3979 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3980 rmrr = container_of(rmrru->hdr,
3981 struct acpi_dmar_reserved_memory, header);
3982 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3983 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3984 ((void *)rmrr) + rmrr->header.length,
3985 rmrr->segment, rmrru->devices,
3986 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003987 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003988 return ret;
3989 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003990 dmar_remove_dev_scope(info, rmrr->segment,
3991 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003992 }
3993 }
3994
3995 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3996 if (atsru->include_all)
3997 continue;
3998
3999 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4000 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4001 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4002 (void *)atsr + atsr->header.length,
4003 atsr->segment, atsru->devices,
4004 atsru->devices_cnt);
4005 if (ret > 0)
4006 break;
4007 else if(ret < 0)
4008 return ret;
4009 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4010 if (dmar_remove_dev_scope(info, atsr->segment,
4011 atsru->devices, atsru->devices_cnt))
4012 break;
4013 }
4014 }
4015
4016 return 0;
4017}
4018
Fenghua Yu99dcade2009-11-11 07:23:06 -08004019/*
4020 * Here we only respond to action of unbound device from driver.
4021 *
4022 * Added device is not attached to its DMAR domain here yet. That will happen
4023 * when mapping the device to iova.
4024 */
4025static int device_notifier(struct notifier_block *nb,
4026 unsigned long action, void *data)
4027{
4028 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004029 struct dmar_domain *domain;
4030
David Woodhouse3d891942014-03-06 15:59:26 +00004031 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004032 return 0;
4033
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004034 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004035 return 0;
4036
David Woodhouse1525a292014-03-06 16:19:30 +00004037 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004038 if (!domain)
4039 return 0;
4040
Jiang Liu3a5670e2014-02-19 14:07:33 +08004041 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004042 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004043 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004044 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004045 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004046
Fenghua Yu99dcade2009-11-11 07:23:06 -08004047 return 0;
4048}
4049
4050static struct notifier_block device_nb = {
4051 .notifier_call = device_notifier,
4052};
4053
Jiang Liu75f05562014-02-19 14:07:37 +08004054static int intel_iommu_memory_notifier(struct notifier_block *nb,
4055 unsigned long val, void *v)
4056{
4057 struct memory_notify *mhp = v;
4058 unsigned long long start, end;
4059 unsigned long start_vpfn, last_vpfn;
4060
4061 switch (val) {
4062 case MEM_GOING_ONLINE:
4063 start = mhp->start_pfn << PAGE_SHIFT;
4064 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4065 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004066 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004067 start, end);
4068 return NOTIFY_BAD;
4069 }
4070 break;
4071
4072 case MEM_OFFLINE:
4073 case MEM_CANCEL_ONLINE:
4074 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4075 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4076 while (start_vpfn <= last_vpfn) {
4077 struct iova *iova;
4078 struct dmar_drhd_unit *drhd;
4079 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004080 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004081
4082 iova = find_iova(&si_domain->iovad, start_vpfn);
4083 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004084 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004085 start_vpfn);
4086 break;
4087 }
4088
4089 iova = split_and_remove_iova(&si_domain->iovad, iova,
4090 start_vpfn, last_vpfn);
4091 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004092 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004093 start_vpfn, last_vpfn);
4094 return NOTIFY_BAD;
4095 }
4096
David Woodhouseea8ea462014-03-05 17:09:32 +00004097 freelist = domain_unmap(si_domain, iova->pfn_lo,
4098 iova->pfn_hi);
4099
Jiang Liu75f05562014-02-19 14:07:37 +08004100 rcu_read_lock();
4101 for_each_active_iommu(iommu, drhd)
4102 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004103 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004104 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004105 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004106 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004107
4108 start_vpfn = iova->pfn_hi + 1;
4109 free_iova_mem(iova);
4110 }
4111 break;
4112 }
4113
4114 return NOTIFY_OK;
4115}
4116
4117static struct notifier_block intel_iommu_memory_nb = {
4118 .notifier_call = intel_iommu_memory_notifier,
4119 .priority = 0
4120};
4121
Alex Williamsona5459cf2014-06-12 16:12:31 -06004122
4123static ssize_t intel_iommu_show_version(struct device *dev,
4124 struct device_attribute *attr,
4125 char *buf)
4126{
4127 struct intel_iommu *iommu = dev_get_drvdata(dev);
4128 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4129 return sprintf(buf, "%d:%d\n",
4130 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4131}
4132static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4133
4134static ssize_t intel_iommu_show_address(struct device *dev,
4135 struct device_attribute *attr,
4136 char *buf)
4137{
4138 struct intel_iommu *iommu = dev_get_drvdata(dev);
4139 return sprintf(buf, "%llx\n", iommu->reg_phys);
4140}
4141static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4142
4143static ssize_t intel_iommu_show_cap(struct device *dev,
4144 struct device_attribute *attr,
4145 char *buf)
4146{
4147 struct intel_iommu *iommu = dev_get_drvdata(dev);
4148 return sprintf(buf, "%llx\n", iommu->cap);
4149}
4150static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4151
4152static ssize_t intel_iommu_show_ecap(struct device *dev,
4153 struct device_attribute *attr,
4154 char *buf)
4155{
4156 struct intel_iommu *iommu = dev_get_drvdata(dev);
4157 return sprintf(buf, "%llx\n", iommu->ecap);
4158}
4159static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4160
4161static struct attribute *intel_iommu_attrs[] = {
4162 &dev_attr_version.attr,
4163 &dev_attr_address.attr,
4164 &dev_attr_cap.attr,
4165 &dev_attr_ecap.attr,
4166 NULL,
4167};
4168
4169static struct attribute_group intel_iommu_group = {
4170 .name = "intel-iommu",
4171 .attrs = intel_iommu_attrs,
4172};
4173
4174const struct attribute_group *intel_iommu_groups[] = {
4175 &intel_iommu_group,
4176 NULL,
4177};
4178
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004179int __init intel_iommu_init(void)
4180{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004181 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004182 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004183 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004184
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004185 /* VT-d is required for a TXT/tboot launch, so enforce that */
4186 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004187
Jiang Liu3a5670e2014-02-19 14:07:33 +08004188 if (iommu_init_mempool()) {
4189 if (force_on)
4190 panic("tboot: Failed to initialize iommu memory\n");
4191 return -ENOMEM;
4192 }
4193
4194 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004195 if (dmar_table_init()) {
4196 if (force_on)
4197 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004198 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004199 }
4200
Takao Indoh3a93c842013-04-23 17:35:03 +09004201 /*
4202 * Disable translation if already enabled prior to OS handover.
4203 */
Jiang Liu7c919772014-01-06 14:18:18 +08004204 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004205 if (iommu->gcmd & DMA_GCMD_TE)
4206 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004207
Suresh Siddhac2c72862011-08-23 17:05:19 -07004208 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004209 if (force_on)
4210 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004211 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004212 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004213
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004214 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004215 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004216
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004217 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004218 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004219
4220 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004221 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004222
Joseph Cihula51a63e62011-03-21 11:04:24 -07004223 if (dmar_init_reserved_ranges()) {
4224 if (force_on)
4225 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004226 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004227 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004228
4229 init_no_remapping_devices();
4230
Joseph Cihulab7792602011-05-03 00:08:37 -07004231 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004232 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004233 if (force_on)
4234 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004235 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004236 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004237 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004238 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004239 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004240
mark gross5e0d2a62008-03-04 15:22:08 -08004241 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004242#ifdef CONFIG_SWIOTLB
4243 swiotlb = 0;
4244#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004245 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004246
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004247 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004248
Alex Williamsona5459cf2014-06-12 16:12:31 -06004249 for_each_active_iommu(iommu, drhd)
4250 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4251 intel_iommu_groups,
4252 iommu->name);
4253
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004254 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004255 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004256 if (si_domain && !hw_pass_through)
4257 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004258
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004259 intel_iommu_enabled = 1;
4260
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004261 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004262
4263out_free_reserved_range:
4264 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004265out_free_dmar:
4266 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004267 up_write(&dmar_global_lock);
4268 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004269 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004270}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004271
Alex Williamson579305f2014-07-03 09:51:43 -06004272static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4273{
4274 struct intel_iommu *iommu = opaque;
4275
4276 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4277 return 0;
4278}
4279
4280/*
4281 * NB - intel-iommu lacks any sort of reference counting for the users of
4282 * dependent devices. If multiple endpoints have intersecting dependent
4283 * devices, unbinding the driver from any one of them will possibly leave
4284 * the others unable to operate.
4285 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004286static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004287 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004288{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004289 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004290 return;
4291
Alex Williamson579305f2014-07-03 09:51:43 -06004292 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004293}
4294
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004295static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004296 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004297{
Yijing Wangbca2b912013-10-31 17:26:04 +08004298 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004299 struct intel_iommu *iommu;
4300 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004301 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004302 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004303
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004304 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004305 if (!iommu)
4306 return;
4307
4308 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004309 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004310 if (info->iommu == iommu && info->bus == bus &&
4311 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004312 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004313 spin_unlock_irqrestore(&device_domain_lock, flags);
4314
Yu Zhao93a23a72009-05-18 13:51:37 +08004315 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004316 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004317 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004318 free_devinfo_mem(info);
4319
4320 spin_lock_irqsave(&device_domain_lock, flags);
4321
4322 if (found)
4323 break;
4324 else
4325 continue;
4326 }
4327
4328 /* if there is no other devices under the same iommu
4329 * owned by this domain, clear this iommu in iommu_bmp
4330 * update iommu count and coherency
4331 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004332 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004333 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004334 }
4335
Roland Dreier3e7abe22011-07-20 06:22:21 -07004336 spin_unlock_irqrestore(&device_domain_lock, flags);
4337
Weidong Hanc7151a82008-12-08 22:51:37 +08004338 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004339 domain_detach_iommu(domain, iommu);
4340 if (!domain_type_is_vm_or_si(domain))
4341 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004342 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004343}
4344
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004345static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004346{
4347 int adjust_width;
4348
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004349 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4350 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004351 domain_reserve_special_ranges(domain);
4352
4353 /* calculate AGAW */
4354 domain->gaw = guest_width;
4355 adjust_width = guestwidth_to_adjustwidth(guest_width);
4356 domain->agaw = width_to_agaw(adjust_width);
4357
Weidong Han5e98c4b2008-12-08 23:03:27 +08004358 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004359 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004360 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004361 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004362
4363 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004364 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004365 if (!domain->pgd)
4366 return -ENOMEM;
4367 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4368 return 0;
4369}
4370
Joerg Roedel00a77de2015-03-26 13:43:08 +01004371static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004372{
Joerg Roedel5d450802008-12-03 14:52:32 +01004373 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004374 struct iommu_domain *domain;
4375
4376 if (type != IOMMU_DOMAIN_UNMANAGED)
4377 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004378
Jiang Liuab8dfe22014-07-11 14:19:27 +08004379 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004380 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004381 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004382 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004383 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004384 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004385 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004386 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004387 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004388 }
Allen Kay8140a952011-10-14 12:32:17 -07004389 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004390
Joerg Roedel00a77de2015-03-26 13:43:08 +01004391 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004392 domain->geometry.aperture_start = 0;
4393 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4394 domain->geometry.force_aperture = true;
4395
Joerg Roedel00a77de2015-03-26 13:43:08 +01004396 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004397}
Kay, Allen M38717942008-09-09 18:37:29 +03004398
Joerg Roedel00a77de2015-03-26 13:43:08 +01004399static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004400{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004401 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004402}
Kay, Allen M38717942008-09-09 18:37:29 +03004403
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004404static int intel_iommu_attach_device(struct iommu_domain *domain,
4405 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004406{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004407 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004408 struct intel_iommu *iommu;
4409 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004410 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004411
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004412 if (device_is_rmrr_locked(dev)) {
4413 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4414 return -EPERM;
4415 }
4416
David Woodhouse7207d8f2014-03-09 16:31:06 -07004417 /* normally dev is not mapped */
4418 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004419 struct dmar_domain *old_domain;
4420
David Woodhouse1525a292014-03-06 16:19:30 +00004421 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004422 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004423 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004424 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004425 else
4426 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004427
4428 if (!domain_type_is_vm_or_si(old_domain) &&
4429 list_empty(&old_domain->devices))
4430 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004431 }
4432 }
4433
David Woodhouse156baca2014-03-09 14:00:57 -07004434 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004435 if (!iommu)
4436 return -ENODEV;
4437
4438 /* check if this iommu agaw is sufficient for max mapped address */
4439 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004440 if (addr_width > cap_mgaw(iommu->cap))
4441 addr_width = cap_mgaw(iommu->cap);
4442
4443 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004444 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004445 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004446 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004447 return -EFAULT;
4448 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004449 dmar_domain->gaw = addr_width;
4450
4451 /*
4452 * Knock out extra levels of page tables if necessary
4453 */
4454 while (iommu->agaw < dmar_domain->agaw) {
4455 struct dma_pte *pte;
4456
4457 pte = dmar_domain->pgd;
4458 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004459 dmar_domain->pgd = (struct dma_pte *)
4460 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004461 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004462 }
4463 dmar_domain->agaw--;
4464 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004465
David Woodhouse5913c9b2014-03-09 16:27:31 -07004466 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004467}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004468
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004469static void intel_iommu_detach_device(struct iommu_domain *domain,
4470 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004471{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004472 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004473}
Kay, Allen M38717942008-09-09 18:37:29 +03004474
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004475static int intel_iommu_map(struct iommu_domain *domain,
4476 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004477 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004478{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004479 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004480 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004481 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004482 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004483
Joerg Roedeldde57a22008-12-03 15:04:09 +01004484 if (iommu_prot & IOMMU_READ)
4485 prot |= DMA_PTE_READ;
4486 if (iommu_prot & IOMMU_WRITE)
4487 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004488 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4489 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004490
David Woodhouse163cc522009-06-28 00:51:17 +01004491 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004492 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004493 u64 end;
4494
4495 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004496 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004497 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004498 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004499 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004500 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004501 return -EFAULT;
4502 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004503 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004504 }
David Woodhousead051222009-06-28 14:22:28 +01004505 /* Round up size to next multiple of PAGE_SIZE, if it and
4506 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004507 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004508 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4509 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004510 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004511}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004512
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004513static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004514 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004515{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004516 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004517 struct page *freelist = NULL;
4518 struct intel_iommu *iommu;
4519 unsigned long start_pfn, last_pfn;
4520 unsigned int npages;
4521 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004522
David Woodhouse5cf0a762014-03-19 16:07:49 +00004523 /* Cope with horrid API which requires us to unmap more than the
4524 size argument if it happens to be a large-page mapping. */
4525 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4526 BUG();
4527
4528 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4529 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4530
David Woodhouseea8ea462014-03-05 17:09:32 +00004531 start_pfn = iova >> VTD_PAGE_SHIFT;
4532 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4533
4534 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4535
4536 npages = last_pfn - start_pfn + 1;
4537
4538 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4539 iommu = g_iommus[iommu_id];
4540
4541 /*
4542 * find bit position of dmar_domain
4543 */
4544 ndomains = cap_ndoms(iommu->cap);
4545 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4546 if (iommu->domains[num] == dmar_domain)
4547 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4548 npages, !freelist, 0);
4549 }
4550
4551 }
4552
4553 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004554
David Woodhouse163cc522009-06-28 00:51:17 +01004555 if (dmar_domain->max_addr == iova + size)
4556 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004557
David Woodhouse5cf0a762014-03-19 16:07:49 +00004558 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004559}
Kay, Allen M38717942008-09-09 18:37:29 +03004560
Joerg Roedeld14d6572008-12-03 15:06:57 +01004561static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304562 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004563{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004564 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004565 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004566 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004567 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004568
David Woodhouse5cf0a762014-03-19 16:07:49 +00004569 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004570 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004571 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004572
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004573 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004574}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004575
Joerg Roedel5d587b82014-09-05 10:50:45 +02004576static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004577{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004578 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004579 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004580 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004581 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004582
Joerg Roedel5d587b82014-09-05 10:50:45 +02004583 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004584}
4585
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004586static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004587{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004588 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004589 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004590 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004591
Alex Williamsona5459cf2014-06-12 16:12:31 -06004592 iommu = device_to_iommu(dev, &bus, &devfn);
4593 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004594 return -ENODEV;
4595
Alex Williamsona5459cf2014-06-12 16:12:31 -06004596 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004597
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004598 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004599
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004600 if (IS_ERR(group))
4601 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004602
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004603 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004604 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004605}
4606
4607static void intel_iommu_remove_device(struct device *dev)
4608{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004609 struct intel_iommu *iommu;
4610 u8 bus, devfn;
4611
4612 iommu = device_to_iommu(dev, &bus, &devfn);
4613 if (!iommu)
4614 return;
4615
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004616 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004617
4618 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004619}
4620
Thierry Redingb22f6432014-06-27 09:03:12 +02004621static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004622 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004623 .domain_alloc = intel_iommu_domain_alloc,
4624 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004625 .attach_dev = intel_iommu_attach_device,
4626 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004627 .map = intel_iommu_map,
4628 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004629 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004630 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004631 .add_device = intel_iommu_add_device,
4632 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004633 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004634};
David Woodhouse9af88142009-02-13 23:18:03 +00004635
Daniel Vetter94526182013-01-20 23:50:13 +01004636static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4637{
4638 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004639 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01004640 dmar_map_gfx = 0;
4641}
4642
4643DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4644DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4645DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4646DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4647DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4648DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4649DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4650
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004651static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004652{
4653 /*
4654 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004655 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004656 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004657 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00004658 rwbf_quirk = 1;
4659}
4660
4661DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004662DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4663DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4664DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4665DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4666DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4667DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004668
Adam Jacksoneecfd572010-08-25 21:17:34 +01004669#define GGC 0x52
4670#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4671#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4672#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4673#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4674#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4675#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4676#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4677#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4678
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004679static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004680{
4681 unsigned short ggc;
4682
Adam Jacksoneecfd572010-08-25 21:17:34 +01004683 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004684 return;
4685
Adam Jacksoneecfd572010-08-25 21:17:34 +01004686 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004687 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01004688 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004689 } else if (dmar_map_gfx) {
4690 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004691 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004692 intel_iommu_strict = 1;
4693 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004694}
4695DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4696DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4697DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4699
David Woodhousee0fc7e02009-09-30 09:12:17 -07004700/* On Tylersburg chipsets, some BIOSes have been known to enable the
4701 ISOCH DMAR unit for the Azalia sound device, but not give it any
4702 TLB entries, which causes it to deadlock. Check for that. We do
4703 this in a function called from init_dmars(), instead of in a PCI
4704 quirk, because we don't want to print the obnoxious "BIOS broken"
4705 message if VT-d is actually disabled.
4706*/
4707static void __init check_tylersburg_isoch(void)
4708{
4709 struct pci_dev *pdev;
4710 uint32_t vtisochctrl;
4711
4712 /* If there's no Azalia in the system anyway, forget it. */
4713 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4714 if (!pdev)
4715 return;
4716 pci_dev_put(pdev);
4717
4718 /* System Management Registers. Might be hidden, in which case
4719 we can't do the sanity check. But that's OK, because the
4720 known-broken BIOSes _don't_ actually hide it, so far. */
4721 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4722 if (!pdev)
4723 return;
4724
4725 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4726 pci_dev_put(pdev);
4727 return;
4728 }
4729
4730 pci_dev_put(pdev);
4731
4732 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4733 if (vtisochctrl & 1)
4734 return;
4735
4736 /* Drop all bits other than the number of TLB entries */
4737 vtisochctrl &= 0x1c;
4738
4739 /* If we have the recommended number of TLB entries (16), fine. */
4740 if (vtisochctrl == 0x10)
4741 return;
4742
4743 /* Zero TLB entries? You get to ride the short bus to school. */
4744 if (!vtisochctrl) {
4745 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4746 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4747 dmi_get_system_info(DMI_BIOS_VENDOR),
4748 dmi_get_system_info(DMI_BIOS_VERSION),
4749 dmi_get_system_info(DMI_PRODUCT_VERSION));
4750 iommu_identity_mapping |= IDENTMAP_AZALIA;
4751 return;
4752 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004753
4754 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07004755 vtisochctrl);
4756}