blob: a2e1b7f14df29cc78b625ec88455438d0fa1fe07 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Dan Williamsdfddb962015-10-09 18:16:46 -040037#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010039#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030040#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010041#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070042#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100043#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020044#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080045#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070046#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020047#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070048#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090050#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070051
Joerg Roedel078e1ee2012-09-26 12:44:43 +020052#include "irq_remapping.h"
53
Fenghua Yu5b6985c2008-10-16 18:02:32 -070054#define ROOT_SIZE VTD_PAGE_SIZE
55#define CONTEXT_SIZE VTD_PAGE_SIZE
56
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070057#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000058#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070059#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070060#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070061
62#define IOAPIC_RANGE_START (0xfee00000)
63#define IOAPIC_RANGE_END (0xfeefffff)
64#define IOVA_START_ADDR (0x1000)
65
66#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
67
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070068#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080069#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070070
David Woodhouse2ebe3152009-09-19 07:34:04 -070071#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
72#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
73
74/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
75 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
76#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
77 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
78#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070079
Robin Murphy1b722502015-01-12 17:51:15 +000080/* IO virtual address start page frame number */
81#define IOVA_START_PFN (1)
82
Mark McLoughlinf27be032008-11-20 15:49:43 +000083#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070084#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070085#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080086
Andrew Mortondf08cdc2010-09-22 13:05:11 -070087/* page table handling */
88#define LEVEL_STRIDE (9)
89#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
90
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020091/*
92 * This bitmap is used to advertise the page sizes our hardware support
93 * to the IOMMU core, which will then use this information to split
94 * physically contiguous memory regions it is mapping into page sizes
95 * that we support.
96 *
97 * Traditionally the IOMMU core just handed us the mappings directly,
98 * after making sure the size is an order of a 4KiB page and that the
99 * mapping has natural alignment.
100 *
101 * To retain this behavior, we currently advertise that we support
102 * all page sizes that are an order of 4KiB.
103 *
104 * If at some point we'd like to utilize the IOMMU core's new behavior,
105 * we could change this to advertise the real page sizes we support.
106 */
107#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
108
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700109static inline int agaw_to_level(int agaw)
110{
111 return agaw + 2;
112}
113
114static inline int agaw_to_width(int agaw)
115{
Jiang Liu5c645b32014-01-06 14:18:12 +0800116 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700117}
118
119static inline int width_to_agaw(int width)
120{
Jiang Liu5c645b32014-01-06 14:18:12 +0800121 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700122}
123
124static inline unsigned int level_to_offset_bits(int level)
125{
126 return (level - 1) * LEVEL_STRIDE;
127}
128
129static inline int pfn_level_offset(unsigned long pfn, int level)
130{
131 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
132}
133
134static inline unsigned long level_mask(int level)
135{
136 return -1UL << level_to_offset_bits(level);
137}
138
139static inline unsigned long level_size(int level)
140{
141 return 1UL << level_to_offset_bits(level);
142}
143
144static inline unsigned long align_to_level(unsigned long pfn, int level)
145{
146 return (pfn + level_size(level) - 1) & level_mask(level);
147}
David Woodhousefd18de52009-05-10 23:57:41 +0100148
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100149static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
150{
Jiang Liu5c645b32014-01-06 14:18:12 +0800151 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100152}
153
David Woodhousedd4e8312009-06-27 16:21:20 +0100154/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
155 are never going to work. */
156static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
157{
158 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
159}
160
161static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
162{
163 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
164}
165static inline unsigned long page_to_dma_pfn(struct page *pg)
166{
167 return mm_to_dma_pfn(page_to_pfn(pg));
168}
169static inline unsigned long virt_to_dma_pfn(void *p)
170{
171 return page_to_dma_pfn(virt_to_page(p));
172}
173
Weidong Hand9630fe2008-12-08 11:06:32 +0800174/* global iommu list, set NULL for ignored DMAR units */
175static struct intel_iommu **g_iommus;
176
David Woodhousee0fc7e02009-09-30 09:12:17 -0700177static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000178static int rwbf_quirk;
179
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000180/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700181 * set to 1 to panic kernel if can't successfully enable VT-d
182 * (used when kernel is launched w/ TXT)
183 */
184static int force_on = 0;
185
186/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000187 * 0: Present
188 * 1-11: Reserved
189 * 12-63: Context Ptr (12 - (haw-1))
190 * 64-127: Reserved
191 */
192struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000193 u64 lo;
194 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000195};
196#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000197
Joerg Roedel091d42e2015-06-12 11:56:10 +0200198/*
199 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
200 * if marked present.
201 */
202static phys_addr_t root_entry_lctp(struct root_entry *re)
203{
204 if (!(re->lo & 1))
205 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000206
Joerg Roedel091d42e2015-06-12 11:56:10 +0200207 return re->lo & VTD_PAGE_MASK;
208}
209
210/*
211 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
212 * if marked present.
213 */
214static phys_addr_t root_entry_uctp(struct root_entry *re)
215{
216 if (!(re->hi & 1))
217 return 0;
218
219 return re->hi & VTD_PAGE_MASK;
220}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000221/*
222 * low 64 bits:
223 * 0: present
224 * 1: fault processing disable
225 * 2-3: translation type
226 * 12-63: address space root
227 * high 64 bits:
228 * 0-2: address width
229 * 3-6: aval
230 * 8-23: domain id
231 */
232struct context_entry {
233 u64 lo;
234 u64 hi;
235};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000236
Joerg Roedelcf484d02015-06-12 12:21:46 +0200237static inline void context_clear_pasid_enable(struct context_entry *context)
238{
239 context->lo &= ~(1ULL << 11);
240}
241
242static inline bool context_pasid_enabled(struct context_entry *context)
243{
244 return !!(context->lo & (1ULL << 11));
245}
246
247static inline void context_set_copied(struct context_entry *context)
248{
249 context->hi |= (1ull << 3);
250}
251
252static inline bool context_copied(struct context_entry *context)
253{
254 return !!(context->hi & (1ULL << 3));
255}
256
257static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000258{
259 return (context->lo & 1);
260}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200261
262static inline bool context_present(struct context_entry *context)
263{
264 return context_pasid_enabled(context) ?
265 __context_present(context) :
266 __context_present(context) && !context_copied(context);
267}
268
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000269static inline void context_set_present(struct context_entry *context)
270{
271 context->lo |= 1;
272}
273
274static inline void context_set_fault_enable(struct context_entry *context)
275{
276 context->lo &= (((u64)-1) << 2) | 1;
277}
278
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000279static inline void context_set_translation_type(struct context_entry *context,
280 unsigned long value)
281{
282 context->lo &= (((u64)-1) << 4) | 3;
283 context->lo |= (value & 3) << 2;
284}
285
286static inline void context_set_address_root(struct context_entry *context,
287 unsigned long value)
288{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800289 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000290 context->lo |= value & VTD_PAGE_MASK;
291}
292
293static inline void context_set_address_width(struct context_entry *context,
294 unsigned long value)
295{
296 context->hi |= value & 7;
297}
298
299static inline void context_set_domain_id(struct context_entry *context,
300 unsigned long value)
301{
302 context->hi |= (value & ((1 << 16) - 1)) << 8;
303}
304
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200305static inline int context_domain_id(struct context_entry *c)
306{
307 return((c->hi >> 8) & 0xffff);
308}
309
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000310static inline void context_clear_entry(struct context_entry *context)
311{
312 context->lo = 0;
313 context->hi = 0;
314}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000315
Mark McLoughlin622ba122008-11-20 15:49:46 +0000316/*
317 * 0: readable
318 * 1: writable
319 * 2-6: reserved
320 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800321 * 8-10: available
322 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000323 * 12-63: Host physcial address
324 */
325struct dma_pte {
326 u64 val;
327};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000328
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000329static inline void dma_clear_pte(struct dma_pte *pte)
330{
331 pte->val = 0;
332}
333
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000334static inline u64 dma_pte_addr(struct dma_pte *pte)
335{
David Woodhousec85994e2009-07-01 19:21:24 +0100336#ifdef CONFIG_64BIT
337 return pte->val & VTD_PAGE_MASK;
338#else
339 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100340 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100341#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000342}
343
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000344static inline bool dma_pte_present(struct dma_pte *pte)
345{
346 return (pte->val & 3) != 0;
347}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000348
Allen Kay4399c8b2011-10-14 12:32:46 -0700349static inline bool dma_pte_superpage(struct dma_pte *pte)
350{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200351 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700352}
353
David Woodhouse75e6bf92009-07-02 11:21:16 +0100354static inline int first_pte_in_page(struct dma_pte *pte)
355{
356 return !((unsigned long)pte & ~VTD_PAGE_MASK);
357}
358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700359/*
360 * This domain is a statically identity mapping domain.
361 * 1. This domain creats a static 1:1 mapping to all usable memory.
362 * 2. It maps to each iommu if successful.
363 * 3. Each iommu mapps to this domain if successful.
364 */
David Woodhouse19943b02009-08-04 16:19:20 +0100365static struct dmar_domain *si_domain;
366static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700367
Joerg Roedel28ccce02015-07-21 14:45:31 +0200368/*
369 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800370 * across iommus may be owned in one domain, e.g. kvm guest.
371 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800372#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800373
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800375#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700376
Joerg Roedel29a27712015-07-21 17:17:12 +0200377#define for_each_domain_iommu(idx, domain) \
378 for (idx = 0; idx < g_num_of_iommus; idx++) \
379 if (domain->iommu_refcnt[idx])
380
Mark McLoughlin99126f72008-11-20 15:49:47 +0000381struct dmar_domain {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700382 int nid; /* node id */
Joerg Roedel29a27712015-07-21 17:17:12 +0200383
384 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
385 /* Refcount of devices per iommu */
386
Mark McLoughlin99126f72008-11-20 15:49:47 +0000387
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200388 u16 iommu_did[DMAR_UNITS_SUPPORTED];
389 /* Domain ids per IOMMU. Use u16 since
390 * domain ids are 16 bit wide according
391 * to VT-d spec, section 9.3 */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000392
Joerg Roedel00a77de2015-03-26 13:43:08 +0100393 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000394 struct iova_domain iovad; /* iova's that belong to this domain */
395
396 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000397 int gaw; /* max guest address width */
398
399 /* adjusted guest address width, 0 is level 2 30-bit */
400 int agaw;
401
Weidong Han3b5410e2008-12-08 09:17:15 +0800402 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800403
404 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800405 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800406 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100407 int iommu_superpage;/* Level of superpages supported:
408 0 == 4KiB (no superpages), 1 == 2MiB,
409 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800410 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100411
412 struct iommu_domain domain; /* generic domain data structure for
413 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000414};
415
Mark McLoughlina647dac2008-11-20 15:49:48 +0000416/* PCI domain-device relationship */
417struct device_domain_info {
418 struct list_head link; /* link to domain siblings */
419 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100420 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000421 u8 devfn; /* PCI devfn number */
David Woodhouseb16d0cb2015-10-12 14:17:37 +0100422 u8 pasid_supported:3;
423 u8 pasid_enabled:1;
424 u8 pri_supported:1;
425 u8 pri_enabled:1;
426 u8 ats_supported:1;
427 u8 ats_enabled:1;
428 u8 ats_qdep;
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000429 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800430 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000431 struct dmar_domain *domain; /* pointer to domain */
432};
433
Jiang Liub94e4112014-02-19 14:07:25 +0800434struct dmar_rmrr_unit {
435 struct list_head list; /* list of rmrr units */
436 struct acpi_dmar_header *hdr; /* ACPI header */
437 u64 base_address; /* reserved base address*/
438 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000439 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800440 int devices_cnt; /* target device count */
441};
442
443struct dmar_atsr_unit {
444 struct list_head list; /* list of ATSR units */
445 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000446 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800447 int devices_cnt; /* target device count */
448 u8 include_all:1; /* include all ports */
449};
450
451static LIST_HEAD(dmar_atsr_units);
452static LIST_HEAD(dmar_rmrr_units);
453
454#define for_each_rmrr_units(rmrr) \
455 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
456
mark gross5e0d2a62008-03-04 15:22:08 -0800457static void flush_unmaps_timeout(unsigned long data);
458
Jiang Liub707cb02014-01-06 14:18:26 +0800459static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800460
mark gross80b20dd2008-04-18 13:53:58 -0700461#define HIGH_WATER_MARK 250
462struct deferred_flush_tables {
463 int next;
464 struct iova *iova[HIGH_WATER_MARK];
465 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000466 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700467};
468
469static struct deferred_flush_tables *deferred_flush;
470
mark gross5e0d2a62008-03-04 15:22:08 -0800471/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800472static int g_num_of_iommus;
473
474static DEFINE_SPINLOCK(async_umap_flush_lock);
475static LIST_HEAD(unmaps_to_do);
476
477static int timer_on;
478static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800479
Jiang Liu92d03cc2014-02-19 14:07:28 +0800480static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200482static void dmar_remove_one_dev_info(struct dmar_domain *domain,
483 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200484static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200485static void domain_context_clear(struct intel_iommu *iommu,
486 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800487static int domain_detach_iommu(struct dmar_domain *domain,
488 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700489
Suresh Siddhad3f13812011-08-23 17:05:25 -0700490#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800491int dmar_disabled = 0;
492#else
493int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700494#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800495
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200496int intel_iommu_enabled = 0;
497EXPORT_SYMBOL_GPL(intel_iommu_enabled);
498
David Woodhouse2d9e6672010-06-15 10:57:57 +0100499static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700500static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800501static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100502static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100503static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100504static int intel_iommu_pasid28;
505static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100506
David Woodhouseae853dd2015-09-09 11:58:59 +0100507#define IDENTMAP_ALL 1
508#define IDENTMAP_GFX 2
509#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100510
David Woodhoused42fde72015-10-24 21:33:01 +0200511/* Broadwell and Skylake have broken ECS support — normal so-called "second
512 * level" translation of DMA requests-without-PASID doesn't actually happen
513 * unless you also set the NESTE bit in an extended context-entry. Which of
514 * course means that SVM doesn't work because it's trying to do nested
515 * translation of the physical addresses it finds in the process page tables,
516 * through the IOVA->phys mapping found in the "second level" page tables.
517 *
518 * The VT-d specification was retroactively changed to change the definition
519 * of the capability bits and pretend that Broadwell/Skylake never happened...
520 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
521 * for some reason it was the PASID capability bit which was redefined (from
522 * bit 28 on BDW/SKL to bit 40 in future).
523 *
524 * So our test for ECS needs to eschew those implementations which set the old
525 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
526 * Unless we are working around the 'pasid28' limitations, that is, by putting
527 * the device into passthrough mode for normal DMA and thus masking the bug.
528 */
David Woodhousec83b2f22015-06-12 10:15:49 +0100529#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
David Woodhoused42fde72015-10-24 21:33:01 +0200530 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
531/* PASID support is thus enabled if ECS is enabled and *either* of the old
532 * or new capability bits are set. */
533#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
534 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700535
David Woodhousec0771df2011-10-14 20:59:46 +0100536int intel_iommu_gfx_mapped;
537EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
538
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700539#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
540static DEFINE_SPINLOCK(device_domain_lock);
541static LIST_HEAD(device_domain_list);
542
Thierry Redingb22f6432014-06-27 09:03:12 +0200543static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100544
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200545static bool translation_pre_enabled(struct intel_iommu *iommu)
546{
547 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
548}
549
Joerg Roedel091d42e2015-06-12 11:56:10 +0200550static void clear_translation_pre_enabled(struct intel_iommu *iommu)
551{
552 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
553}
554
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200555static void init_translation_status(struct intel_iommu *iommu)
556{
557 u32 gsts;
558
559 gsts = readl(iommu->reg + DMAR_GSTS_REG);
560 if (gsts & DMA_GSTS_TES)
561 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
562}
563
Joerg Roedel00a77de2015-03-26 13:43:08 +0100564/* Convert generic 'struct iommu_domain to private struct dmar_domain */
565static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
566{
567 return container_of(dom, struct dmar_domain, domain);
568}
569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700570static int __init intel_iommu_setup(char *str)
571{
572 if (!str)
573 return -EINVAL;
574 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800575 if (!strncmp(str, "on", 2)) {
576 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200577 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800578 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700579 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200580 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700581 } else if (!strncmp(str, "igfx_off", 8)) {
582 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200583 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700584 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200585 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700586 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800587 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200588 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800589 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100590 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200591 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100592 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100593 } else if (!strncmp(str, "ecs_off", 7)) {
594 printk(KERN_INFO
595 "Intel-IOMMU: disable extended context table support\n");
596 intel_iommu_ecs = 0;
David Woodhouseae853dd2015-09-09 11:58:59 +0100597 } else if (!strncmp(str, "pasid28", 7)) {
598 printk(KERN_INFO
599 "Intel-IOMMU: enable pre-production PASID support\n");
600 intel_iommu_pasid28 = 1;
601 iommu_identity_mapping |= IDENTMAP_GFX;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700602 }
603
604 str += strcspn(str, ",");
605 while (*str == ',')
606 str++;
607 }
608 return 0;
609}
610__setup("intel_iommu=", intel_iommu_setup);
611
612static struct kmem_cache *iommu_domain_cache;
613static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700614
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200615static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
616{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200617 struct dmar_domain **domains;
618 int idx = did >> 8;
619
620 domains = iommu->domains[idx];
621 if (!domains)
622 return NULL;
623
624 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200625}
626
627static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
628 struct dmar_domain *domain)
629{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200630 struct dmar_domain **domains;
631 int idx = did >> 8;
632
633 if (!iommu->domains[idx]) {
634 size_t size = 256 * sizeof(struct dmar_domain *);
635 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
636 }
637
638 domains = iommu->domains[idx];
639 if (WARN_ON(!domains))
640 return;
641 else
642 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200643}
644
Suresh Siddha4c923d42009-10-02 11:01:24 -0700645static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700646{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700647 struct page *page;
648 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700649
Suresh Siddha4c923d42009-10-02 11:01:24 -0700650 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
651 if (page)
652 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700653 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700654}
655
656static inline void free_pgtable_page(void *vaddr)
657{
658 free_page((unsigned long)vaddr);
659}
660
661static inline void *alloc_domain_mem(void)
662{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900663 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700664}
665
Kay, Allen M38717942008-09-09 18:37:29 +0300666static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700667{
668 kmem_cache_free(iommu_domain_cache, vaddr);
669}
670
671static inline void * alloc_devinfo_mem(void)
672{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900673 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700674}
675
676static inline void free_devinfo_mem(void *vaddr)
677{
678 kmem_cache_free(iommu_devinfo_cache, vaddr);
679}
680
Jiang Liuab8dfe22014-07-11 14:19:27 +0800681static inline int domain_type_is_vm(struct dmar_domain *domain)
682{
683 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
684}
685
Joerg Roedel28ccce02015-07-21 14:45:31 +0200686static inline int domain_type_is_si(struct dmar_domain *domain)
687{
688 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
689}
690
Jiang Liuab8dfe22014-07-11 14:19:27 +0800691static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
692{
693 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
694 DOMAIN_FLAG_STATIC_IDENTITY);
695}
Weidong Han1b573682008-12-08 15:34:06 +0800696
Jiang Liu162d1b12014-07-11 14:19:35 +0800697static inline int domain_pfn_supported(struct dmar_domain *domain,
698 unsigned long pfn)
699{
700 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
701
702 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
703}
704
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700705static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800706{
707 unsigned long sagaw;
708 int agaw = -1;
709
710 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700711 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800712 agaw >= 0; agaw--) {
713 if (test_bit(agaw, &sagaw))
714 break;
715 }
716
717 return agaw;
718}
719
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700720/*
721 * Calculate max SAGAW for each iommu.
722 */
723int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
724{
725 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
726}
727
728/*
729 * calculate agaw for each iommu.
730 * "SAGAW" may be different across iommus, use a default agaw, and
731 * get a supported less agaw for iommus that don't support the default agaw.
732 */
733int iommu_calculate_agaw(struct intel_iommu *iommu)
734{
735 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
736}
737
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700738/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800739static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
740{
741 int iommu_id;
742
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700743 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800744 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200745 for_each_domain_iommu(iommu_id, domain)
746 break;
747
Weidong Han8c11e792008-12-08 15:29:22 +0800748 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
749 return NULL;
750
751 return g_iommus[iommu_id];
752}
753
Weidong Han8e6040972008-12-08 15:49:06 +0800754static void domain_update_iommu_coherency(struct dmar_domain *domain)
755{
David Woodhoused0501962014-03-11 17:10:29 -0700756 struct dmar_drhd_unit *drhd;
757 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100758 bool found = false;
759 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800760
David Woodhoused0501962014-03-11 17:10:29 -0700761 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800762
Joerg Roedel29a27712015-07-21 17:17:12 +0200763 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100764 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800765 if (!ecap_coherent(g_iommus[i]->ecap)) {
766 domain->iommu_coherency = 0;
767 break;
768 }
Weidong Han8e6040972008-12-08 15:49:06 +0800769 }
David Woodhoused0501962014-03-11 17:10:29 -0700770 if (found)
771 return;
772
773 /* No hardware attached; use lowest common denominator */
774 rcu_read_lock();
775 for_each_active_iommu(iommu, drhd) {
776 if (!ecap_coherent(iommu->ecap)) {
777 domain->iommu_coherency = 0;
778 break;
779 }
780 }
781 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800782}
783
Jiang Liu161f6932014-07-11 14:19:37 +0800784static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100785{
Allen Kay8140a952011-10-14 12:32:17 -0700786 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800787 struct intel_iommu *iommu;
788 int ret = 1;
789
790 rcu_read_lock();
791 for_each_active_iommu(iommu, drhd) {
792 if (iommu != skip) {
793 if (!ecap_sc_support(iommu->ecap)) {
794 ret = 0;
795 break;
796 }
797 }
798 }
799 rcu_read_unlock();
800
801 return ret;
802}
803
804static int domain_update_iommu_superpage(struct intel_iommu *skip)
805{
806 struct dmar_drhd_unit *drhd;
807 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700808 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100809
810 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800811 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100812 }
813
Allen Kay8140a952011-10-14 12:32:17 -0700814 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800815 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700816 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800817 if (iommu != skip) {
818 mask &= cap_super_page_val(iommu->cap);
819 if (!mask)
820 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100821 }
822 }
Jiang Liu0e242612014-02-19 14:07:34 +0800823 rcu_read_unlock();
824
Jiang Liu161f6932014-07-11 14:19:37 +0800825 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100826}
827
Sheng Yang58c610b2009-03-18 15:33:05 +0800828/* Some capabilities may be different across iommus */
829static void domain_update_iommu_cap(struct dmar_domain *domain)
830{
831 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800832 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
833 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800834}
835
David Woodhouse03ecc322015-02-13 14:35:21 +0000836static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
837 u8 bus, u8 devfn, int alloc)
838{
839 struct root_entry *root = &iommu->root_entry[bus];
840 struct context_entry *context;
841 u64 *entry;
842
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200843 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100844 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000845 if (devfn >= 0x80) {
846 devfn -= 0x80;
847 entry = &root->hi;
848 }
849 devfn *= 2;
850 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000851 if (*entry & 1)
852 context = phys_to_virt(*entry & VTD_PAGE_MASK);
853 else {
854 unsigned long phy_addr;
855 if (!alloc)
856 return NULL;
857
858 context = alloc_pgtable_page(iommu->node);
859 if (!context)
860 return NULL;
861
862 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
863 phy_addr = virt_to_phys((void *)context);
864 *entry = phy_addr | 1;
865 __iommu_flush_cache(iommu, entry, sizeof(*entry));
866 }
867 return &context[devfn];
868}
869
David Woodhouse4ed6a542015-05-11 14:59:20 +0100870static int iommu_dummy(struct device *dev)
871{
872 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
873}
874
David Woodhouse156baca2014-03-09 14:00:57 -0700875static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800876{
877 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800878 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700879 struct device *tmp;
880 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800881 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800882 int i;
883
David Woodhouse4ed6a542015-05-11 14:59:20 +0100884 if (iommu_dummy(dev))
885 return NULL;
886
David Woodhouse156baca2014-03-09 14:00:57 -0700887 if (dev_is_pci(dev)) {
888 pdev = to_pci_dev(dev);
889 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100890 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700891 dev = &ACPI_COMPANION(dev)->dev;
892
Jiang Liu0e242612014-02-19 14:07:34 +0800893 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800894 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700895 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100896 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800897
Jiang Liub683b232014-02-19 14:07:32 +0800898 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700899 drhd->devices_cnt, i, tmp) {
900 if (tmp == dev) {
901 *bus = drhd->devices[i].bus;
902 *devfn = drhd->devices[i].devfn;
903 goto out;
904 }
905
906 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000907 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700908
909 ptmp = to_pci_dev(tmp);
910 if (ptmp->subordinate &&
911 ptmp->subordinate->number <= pdev->bus->number &&
912 ptmp->subordinate->busn_res.end >= pdev->bus->number)
913 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100914 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800915
David Woodhouse156baca2014-03-09 14:00:57 -0700916 if (pdev && drhd->include_all) {
917 got_pdev:
918 *bus = pdev->bus->number;
919 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800920 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700921 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800922 }
Jiang Liub683b232014-02-19 14:07:32 +0800923 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700924 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800925 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800926
Jiang Liub683b232014-02-19 14:07:32 +0800927 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800928}
929
Weidong Han5331fe62008-12-08 23:00:00 +0800930static void domain_flush_cache(struct dmar_domain *domain,
931 void *addr, int size)
932{
933 if (!domain->iommu_coherency)
934 clflush_cache_range(addr, size);
935}
936
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
938{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000940 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941 unsigned long flags;
942
943 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000944 context = iommu_context_addr(iommu, bus, devfn, 0);
945 if (context)
946 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 spin_unlock_irqrestore(&iommu->lock, flags);
948 return ret;
949}
950
951static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
952{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953 struct context_entry *context;
954 unsigned long flags;
955
956 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000957 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000959 context_clear_entry(context);
960 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961 }
962 spin_unlock_irqrestore(&iommu->lock, flags);
963}
964
965static void free_context_table(struct intel_iommu *iommu)
966{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700967 int i;
968 unsigned long flags;
969 struct context_entry *context;
970
971 spin_lock_irqsave(&iommu->lock, flags);
972 if (!iommu->root_entry) {
973 goto out;
974 }
975 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000976 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700977 if (context)
978 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000979
David Woodhousec83b2f22015-06-12 10:15:49 +0100980 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000981 continue;
982
983 context = iommu_context_addr(iommu, i, 0x80, 0);
984 if (context)
985 free_pgtable_page(context);
986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 }
988 free_pgtable_page(iommu->root_entry);
989 iommu->root_entry = NULL;
990out:
991 spin_unlock_irqrestore(&iommu->lock, flags);
992}
993
David Woodhouseb026fd22009-06-28 10:37:25 +0100994static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000995 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997 struct dma_pte *parent, *pte = NULL;
998 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700999 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000
1001 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +02001002
Jiang Liu162d1b12014-07-11 14:19:35 +08001003 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +02001004 /* Address beyond IOMMU's addressing capabilities. */
1005 return NULL;
1006
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001007 parent = domain->pgd;
1008
David Woodhouse5cf0a762014-03-19 16:07:49 +00001009 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010 void *tmp_page;
1011
David Woodhouseb026fd22009-06-28 10:37:25 +01001012 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +00001014 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001015 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +00001016 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 break;
1018
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001019 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +01001020 uint64_t pteval;
1021
Suresh Siddha4c923d42009-10-02 11:01:24 -07001022 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001023
David Woodhouse206a73c12009-07-01 19:30:28 +01001024 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001025 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +01001026
David Woodhousec85994e2009-07-01 19:21:24 +01001027 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04001028 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +08001029 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +01001030 /* Someone else set it while we were thinking; use theirs. */
1031 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +08001032 else
David Woodhousec85994e2009-07-01 19:21:24 +01001033 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001034 }
David Woodhouse5cf0a762014-03-19 16:07:49 +00001035 if (level == 1)
1036 break;
1037
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001038 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001039 level--;
1040 }
1041
David Woodhouse5cf0a762014-03-19 16:07:49 +00001042 if (!*target_level)
1043 *target_level = level;
1044
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045 return pte;
1046}
1047
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001048
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001049/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +01001050static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1051 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001052 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053{
1054 struct dma_pte *parent, *pte = NULL;
1055 int total = agaw_to_level(domain->agaw);
1056 int offset;
1057
1058 parent = domain->pgd;
1059 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001060 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001061 pte = &parent[offset];
1062 if (level == total)
1063 return pte;
1064
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001065 if (!dma_pte_present(pte)) {
1066 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001067 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001068 }
1069
Yijing Wange16922a2014-05-20 20:37:51 +08001070 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001071 *large_page = total;
1072 return pte;
1073 }
1074
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001075 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076 total--;
1077 }
1078 return NULL;
1079}
1080
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001081/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001082static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +01001083 unsigned long start_pfn,
1084 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001085{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001086 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001087 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001088
Jiang Liu162d1b12014-07-11 14:19:35 +08001089 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1090 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001091 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001092
David Woodhouse04b18e62009-06-27 19:15:01 +01001093 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001094 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001095 large_page = 1;
1096 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001097 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001098 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001099 continue;
1100 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001101 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001102 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001103 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001104 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001105 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1106
David Woodhouse310a5ab2009-06-28 18:52:20 +01001107 domain_flush_cache(domain, first_pte,
1108 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001109
1110 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001111}
1112
Alex Williamson3269ee02013-06-15 10:27:19 -06001113static void dma_pte_free_level(struct dmar_domain *domain, int level,
1114 struct dma_pte *pte, unsigned long pfn,
1115 unsigned long start_pfn, unsigned long last_pfn)
1116{
1117 pfn = max(start_pfn, pfn);
1118 pte = &pte[pfn_level_offset(pfn, level)];
1119
1120 do {
1121 unsigned long level_pfn;
1122 struct dma_pte *level_pte;
1123
1124 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1125 goto next;
1126
1127 level_pfn = pfn & level_mask(level - 1);
1128 level_pte = phys_to_virt(dma_pte_addr(pte));
1129
1130 if (level > 2)
1131 dma_pte_free_level(domain, level - 1, level_pte,
1132 level_pfn, start_pfn, last_pfn);
1133
1134 /* If range covers entire pagetable, free it */
1135 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001136 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001137 dma_clear_pte(pte);
1138 domain_flush_cache(domain, pte, sizeof(*pte));
1139 free_pgtable_page(level_pte);
1140 }
1141next:
1142 pfn += level_size(level);
1143 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1144}
1145
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146/* free page table pages. last level pte should already be cleared */
1147static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001148 unsigned long start_pfn,
1149 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150{
Jiang Liu162d1b12014-07-11 14:19:35 +08001151 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1152 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001153 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154
Jiang Liud41a4ad2014-07-11 14:19:34 +08001155 dma_pte_clear_range(domain, start_pfn, last_pfn);
1156
David Woodhousef3a0a522009-06-30 03:40:07 +01001157 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001158 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1159 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001160
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001161 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001162 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 free_pgtable_page(domain->pgd);
1164 domain->pgd = NULL;
1165 }
1166}
1167
David Woodhouseea8ea462014-03-05 17:09:32 +00001168/* When a page at a given level is being unlinked from its parent, we don't
1169 need to *modify* it at all. All we need to do is make a list of all the
1170 pages which can be freed just as soon as we've flushed the IOTLB and we
1171 know the hardware page-walk will no longer touch them.
1172 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1173 be freed. */
1174static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1175 int level, struct dma_pte *pte,
1176 struct page *freelist)
1177{
1178 struct page *pg;
1179
1180 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1181 pg->freelist = freelist;
1182 freelist = pg;
1183
1184 if (level == 1)
1185 return freelist;
1186
Jiang Liuadeb2592014-04-09 10:20:39 +08001187 pte = page_address(pg);
1188 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001189 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1190 freelist = dma_pte_list_pagetables(domain, level - 1,
1191 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001192 pte++;
1193 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001194
1195 return freelist;
1196}
1197
1198static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1199 struct dma_pte *pte, unsigned long pfn,
1200 unsigned long start_pfn,
1201 unsigned long last_pfn,
1202 struct page *freelist)
1203{
1204 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1205
1206 pfn = max(start_pfn, pfn);
1207 pte = &pte[pfn_level_offset(pfn, level)];
1208
1209 do {
1210 unsigned long level_pfn;
1211
1212 if (!dma_pte_present(pte))
1213 goto next;
1214
1215 level_pfn = pfn & level_mask(level);
1216
1217 /* If range covers entire pagetable, free it */
1218 if (start_pfn <= level_pfn &&
1219 last_pfn >= level_pfn + level_size(level) - 1) {
1220 /* These suborbinate page tables are going away entirely. Don't
1221 bother to clear them; we're just going to *free* them. */
1222 if (level > 1 && !dma_pte_superpage(pte))
1223 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1224
1225 dma_clear_pte(pte);
1226 if (!first_pte)
1227 first_pte = pte;
1228 last_pte = pte;
1229 } else if (level > 1) {
1230 /* Recurse down into a level that isn't *entirely* obsolete */
1231 freelist = dma_pte_clear_level(domain, level - 1,
1232 phys_to_virt(dma_pte_addr(pte)),
1233 level_pfn, start_pfn, last_pfn,
1234 freelist);
1235 }
1236next:
1237 pfn += level_size(level);
1238 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1239
1240 if (first_pte)
1241 domain_flush_cache(domain, first_pte,
1242 (void *)++last_pte - (void *)first_pte);
1243
1244 return freelist;
1245}
1246
1247/* We can't just free the pages because the IOMMU may still be walking
1248 the page tables, and may have cached the intermediate levels. The
1249 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001250static struct page *domain_unmap(struct dmar_domain *domain,
1251 unsigned long start_pfn,
1252 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001253{
David Woodhouseea8ea462014-03-05 17:09:32 +00001254 struct page *freelist = NULL;
1255
Jiang Liu162d1b12014-07-11 14:19:35 +08001256 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1257 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001258 BUG_ON(start_pfn > last_pfn);
1259
1260 /* we don't need lock here; nobody else touches the iova range */
1261 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1262 domain->pgd, 0, start_pfn, last_pfn, NULL);
1263
1264 /* free pgd */
1265 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1266 struct page *pgd_page = virt_to_page(domain->pgd);
1267 pgd_page->freelist = freelist;
1268 freelist = pgd_page;
1269
1270 domain->pgd = NULL;
1271 }
1272
1273 return freelist;
1274}
1275
Joerg Roedelb6904202015-08-13 11:32:18 +02001276static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001277{
1278 struct page *pg;
1279
1280 while ((pg = freelist)) {
1281 freelist = pg->freelist;
1282 free_pgtable_page(page_address(pg));
1283 }
1284}
1285
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001286/* iommu handling */
1287static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1288{
1289 struct root_entry *root;
1290 unsigned long flags;
1291
Suresh Siddha4c923d42009-10-02 11:01:24 -07001292 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001293 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001294 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001295 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001296 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001297 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001299 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001300
1301 spin_lock_irqsave(&iommu->lock, flags);
1302 iommu->root_entry = root;
1303 spin_unlock_irqrestore(&iommu->lock, flags);
1304
1305 return 0;
1306}
1307
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001308static void iommu_set_root_entry(struct intel_iommu *iommu)
1309{
David Woodhouse03ecc322015-02-13 14:35:21 +00001310 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001311 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312 unsigned long flag;
1313
David Woodhouse03ecc322015-02-13 14:35:21 +00001314 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001315 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001316 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001318 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001319 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001320
David Woodhousec416daa2009-05-10 20:30:58 +01001321 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322
1323 /* Make sure hardware complete it */
1324 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001325 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001327 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328}
1329
1330static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1331{
1332 u32 val;
1333 unsigned long flag;
1334
David Woodhouse9af88142009-02-13 23:18:03 +00001335 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001338 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001339 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340
1341 /* Make sure hardware complete it */
1342 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001343 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001345 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346}
1347
1348/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001349static void __iommu_flush_context(struct intel_iommu *iommu,
1350 u16 did, u16 source_id, u8 function_mask,
1351 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352{
1353 u64 val = 0;
1354 unsigned long flag;
1355
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001356 switch (type) {
1357 case DMA_CCMD_GLOBAL_INVL:
1358 val = DMA_CCMD_GLOBAL_INVL;
1359 break;
1360 case DMA_CCMD_DOMAIN_INVL:
1361 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1362 break;
1363 case DMA_CCMD_DEVICE_INVL:
1364 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1365 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1366 break;
1367 default:
1368 BUG();
1369 }
1370 val |= DMA_CCMD_ICC;
1371
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001372 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1374
1375 /* Make sure hardware complete it */
1376 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1377 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1378
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001379 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001380}
1381
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001383static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1384 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385{
1386 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1387 u64 val = 0, val_iva = 0;
1388 unsigned long flag;
1389
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001390 switch (type) {
1391 case DMA_TLB_GLOBAL_FLUSH:
1392 /* global flush doesn't need set IVA_REG */
1393 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1394 break;
1395 case DMA_TLB_DSI_FLUSH:
1396 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1397 break;
1398 case DMA_TLB_PSI_FLUSH:
1399 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001400 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401 val_iva = size_order | addr;
1402 break;
1403 default:
1404 BUG();
1405 }
1406 /* Note: set drain read/write */
1407#if 0
1408 /*
1409 * This is probably to be super secure.. Looks like we can
1410 * ignore it without any impact.
1411 */
1412 if (cap_read_drain(iommu->cap))
1413 val |= DMA_TLB_READ_DRAIN;
1414#endif
1415 if (cap_write_drain(iommu->cap))
1416 val |= DMA_TLB_WRITE_DRAIN;
1417
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001418 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419 /* Note: Only uses first TLB reg currently */
1420 if (val_iva)
1421 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1422 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1423
1424 /* Make sure hardware complete it */
1425 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1426 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1427
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001428 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 /* check IOTLB invalidation granularity */
1431 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001432 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001434 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001435 (unsigned long long)DMA_TLB_IIRG(type),
1436 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437}
1438
David Woodhouse64ae8922014-03-09 12:52:30 -07001439static struct device_domain_info *
1440iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1441 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442{
Yu Zhao93a23a72009-05-18 13:51:37 +08001443 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001444
Joerg Roedel55d94042015-07-22 16:50:40 +02001445 assert_spin_locked(&device_domain_lock);
1446
Yu Zhao93a23a72009-05-18 13:51:37 +08001447 if (!iommu->qi)
1448 return NULL;
1449
Yu Zhao93a23a72009-05-18 13:51:37 +08001450 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001451 if (info->iommu == iommu && info->bus == bus &&
1452 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001453 if (info->ats_supported && info->dev)
1454 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001455 break;
1456 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001457
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001458 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001459}
1460
1461static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1462{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001463 struct pci_dev *pdev;
1464
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001465 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001466 return;
1467
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001468 pdev = to_pci_dev(info->dev);
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001469
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001470#ifdef CONFIG_INTEL_IOMMU_SVM
1471 /* The PCIe spec, in its wisdom, declares that the behaviour of
1472 the device if you enable PASID support after ATS support is
1473 undefined. So always enable PASID support on devices which
1474 have it, even if we can't yet know if we're ever going to
1475 use it. */
1476 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1477 info->pasid_enabled = 1;
1478
1479 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1480 info->pri_enabled = 1;
1481#endif
1482 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1483 info->ats_enabled = 1;
1484 info->ats_qdep = pci_ats_queue_depth(pdev);
1485 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001486}
1487
1488static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1489{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001490 struct pci_dev *pdev;
1491
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001492 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001493 return;
1494
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001495 pdev = to_pci_dev(info->dev);
1496
1497 if (info->ats_enabled) {
1498 pci_disable_ats(pdev);
1499 info->ats_enabled = 0;
1500 }
1501#ifdef CONFIG_INTEL_IOMMU_SVM
1502 if (info->pri_enabled) {
1503 pci_disable_pri(pdev);
1504 info->pri_enabled = 0;
1505 }
1506 if (info->pasid_enabled) {
1507 pci_disable_pasid(pdev);
1508 info->pasid_enabled = 0;
1509 }
1510#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001511}
1512
1513static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1514 u64 addr, unsigned mask)
1515{
1516 u16 sid, qdep;
1517 unsigned long flags;
1518 struct device_domain_info *info;
1519
1520 spin_lock_irqsave(&device_domain_lock, flags);
1521 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001522 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001523 continue;
1524
1525 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001526 qdep = info->ats_qdep;
Yu Zhao93a23a72009-05-18 13:51:37 +08001527 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1528 }
1529 spin_unlock_irqrestore(&device_domain_lock, flags);
1530}
1531
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001532static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1533 struct dmar_domain *domain,
1534 unsigned long pfn, unsigned int pages,
1535 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001536{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001537 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001538 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001539 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541 BUG_ON(pages == 0);
1542
David Woodhouseea8ea462014-03-05 17:09:32 +00001543 if (ih)
1544 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001545 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001546 * Fallback to domain selective flush if no PSI support or the size is
1547 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548 * PSI requires page size to be 2 ^ x, and the base address is naturally
1549 * aligned to the size
1550 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001551 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1552 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001553 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001554 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001555 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001556 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001557
1558 /*
Nadav Amit82653632010-04-01 13:24:40 +03001559 * In caching mode, changes of pages from non-present to present require
1560 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001561 */
Nadav Amit82653632010-04-01 13:24:40 +03001562 if (!cap_caching_mode(iommu->cap) || !map)
Joerg Roedel9452d5b2015-07-21 10:00:56 +02001563 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1564 addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001565}
1566
mark grossf8bab732008-02-08 04:18:38 -08001567static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1568{
1569 u32 pmen;
1570 unsigned long flags;
1571
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001572 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001573 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1574 pmen &= ~DMA_PMEN_EPM;
1575 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1576
1577 /* wait for the protected region status bit to clear */
1578 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1579 readl, !(pmen & DMA_PMEN_PRS), pmen);
1580
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001581 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001582}
1583
Jiang Liu2a41cce2014-07-11 14:19:33 +08001584static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001585{
1586 u32 sts;
1587 unsigned long flags;
1588
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001589 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001590 iommu->gcmd |= DMA_GCMD_TE;
1591 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001592
1593 /* Make sure hardware complete it */
1594 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001595 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001597 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001598}
1599
Jiang Liu2a41cce2014-07-11 14:19:33 +08001600static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601{
1602 u32 sts;
1603 unsigned long flag;
1604
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001605 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606 iommu->gcmd &= ~DMA_GCMD_TE;
1607 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1608
1609 /* Make sure hardware complete it */
1610 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001611 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001613 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001614}
1615
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001616
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001617static int iommu_init_domains(struct intel_iommu *iommu)
1618{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001619 u32 ndomains, nlongs;
1620 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001621
1622 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001623 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001624 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001625 nlongs = BITS_TO_LONGS(ndomains);
1626
Donald Dutile94a91b52009-08-20 16:51:34 -04001627 spin_lock_init(&iommu->lock);
1628
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001629 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1630 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001631 pr_err("%s: Allocating domain id array failed\n",
1632 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 return -ENOMEM;
1634 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001635
1636 size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
1637 iommu->domains = kzalloc(size, GFP_KERNEL);
1638
1639 if (iommu->domains) {
1640 size = 256 * sizeof(struct dmar_domain *);
1641 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1642 }
1643
1644 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001645 pr_err("%s: Allocating domain array failed\n",
1646 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001647 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001648 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001649 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001650 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651 return -ENOMEM;
1652 }
1653
Joerg Roedel8bf47812015-07-21 10:41:21 +02001654
1655
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001657 * If Caching mode is set, then invalid translations are tagged
1658 * with domain-id 0, hence we need to pre-allocate it. We also
1659 * use domain-id 0 as a marker for non-allocated domain-id, so
1660 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001662 set_bit(0, iommu->domain_ids);
1663
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001664 return 0;
1665}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666
Jiang Liuffebeb42014-11-09 22:48:02 +08001667static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001668{
Joerg Roedel29a27712015-07-21 17:17:12 +02001669 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001670 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671
Joerg Roedel29a27712015-07-21 17:17:12 +02001672 if (!iommu->domains || !iommu->domain_ids)
1673 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001674
Joerg Roedel55d94042015-07-22 16:50:40 +02001675 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001676 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1677 struct dmar_domain *domain;
1678
1679 if (info->iommu != iommu)
1680 continue;
1681
1682 if (!info->dev || !info->domain)
1683 continue;
1684
1685 domain = info->domain;
1686
Joerg Roedele6de0f82015-07-22 16:30:36 +02001687 dmar_remove_one_dev_info(domain, info->dev);
Joerg Roedel29a27712015-07-21 17:17:12 +02001688
1689 if (!domain_type_is_vm_or_si(domain))
1690 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001692 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693
1694 if (iommu->gcmd & DMA_GCMD_TE)
1695 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001696}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697
Jiang Liuffebeb42014-11-09 22:48:02 +08001698static void free_dmar_iommu(struct intel_iommu *iommu)
1699{
1700 if ((iommu->domains) && (iommu->domain_ids)) {
Joerg Roedel8bf47812015-07-21 10:41:21 +02001701 int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
1702 int i;
1703
1704 for (i = 0; i < elems; i++)
1705 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001706 kfree(iommu->domains);
1707 kfree(iommu->domain_ids);
1708 iommu->domains = NULL;
1709 iommu->domain_ids = NULL;
1710 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711
Weidong Hand9630fe2008-12-08 11:06:32 +08001712 g_iommus[iommu->seq_id] = NULL;
1713
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714 /* free context mapping */
1715 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001716
1717#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001718 if (pasid_enabled(iommu)) {
1719 if (ecap_prs(iommu->ecap))
1720 intel_svm_finish_prq(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001721 intel_svm_free_pasid_tables(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001722 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001723#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724}
1725
Jiang Liuab8dfe22014-07-11 14:19:27 +08001726static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729
1730 domain = alloc_domain_mem();
1731 if (!domain)
1732 return NULL;
1733
Jiang Liuab8dfe22014-07-11 14:19:27 +08001734 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001735 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001736 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001737 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001738
1739 return domain;
1740}
1741
Joerg Roedeld160aca2015-07-22 11:52:53 +02001742/* Must be called with iommu->lock */
1743static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001744 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001745{
Jiang Liu44bde612014-07-11 14:19:29 +08001746 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001747 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001748
Joerg Roedel55d94042015-07-22 16:50:40 +02001749 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001750 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001751
Joerg Roedel29a27712015-07-21 17:17:12 +02001752 domain->iommu_refcnt[iommu->seq_id] += 1;
1753 domain->iommu_count += 1;
1754 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001755 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001756 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1757
1758 if (num >= ndomains) {
1759 pr_err("%s: No free domain ids\n", iommu->name);
1760 domain->iommu_refcnt[iommu->seq_id] -= 1;
1761 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001762 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001763 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001764
Joerg Roedeld160aca2015-07-22 11:52:53 +02001765 set_bit(num, iommu->domain_ids);
1766 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001767
Joerg Roedeld160aca2015-07-22 11:52:53 +02001768 domain->iommu_did[iommu->seq_id] = num;
1769 domain->nid = iommu->node;
1770
Jiang Liufb170fb2014-07-11 14:19:28 +08001771 domain_update_iommu_cap(domain);
1772 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001773
Joerg Roedel55d94042015-07-22 16:50:40 +02001774 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001775}
1776
1777static int domain_detach_iommu(struct dmar_domain *domain,
1778 struct intel_iommu *iommu)
1779{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001780 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001781
Joerg Roedel55d94042015-07-22 16:50:40 +02001782 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001783 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001784
Joerg Roedel29a27712015-07-21 17:17:12 +02001785 domain->iommu_refcnt[iommu->seq_id] -= 1;
1786 count = --domain->iommu_count;
1787 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001788 num = domain->iommu_did[iommu->seq_id];
1789 clear_bit(num, iommu->domain_ids);
1790 set_iommu_domain(iommu, num, NULL);
1791
Jiang Liufb170fb2014-07-11 14:19:28 +08001792 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001793 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001794 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001795
1796 return count;
1797}
1798
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001800static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001801
Joseph Cihula51a63e62011-03-21 11:04:24 -07001802static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803{
1804 struct pci_dev *pdev = NULL;
1805 struct iova *iova;
1806 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001807
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001808 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1809 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001810
Mark Gross8a443df2008-03-04 14:59:31 -08001811 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1812 &reserved_rbtree_key);
1813
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001814 /* IOAPIC ranges shouldn't be accessed by DMA */
1815 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1816 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001817 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001818 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001819 return -ENODEV;
1820 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001821
1822 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1823 for_each_pci_dev(pdev) {
1824 struct resource *r;
1825
1826 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1827 r = &pdev->resource[i];
1828 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1829 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001830 iova = reserve_iova(&reserved_iova_list,
1831 IOVA_PFN(r->start),
1832 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001833 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001834 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001835 return -ENODEV;
1836 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001837 }
1838 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001839 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840}
1841
1842static void domain_reserve_special_ranges(struct dmar_domain *domain)
1843{
1844 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1845}
1846
1847static inline int guestwidth_to_adjustwidth(int gaw)
1848{
1849 int agaw;
1850 int r = (gaw - 12) % 9;
1851
1852 if (r == 0)
1853 agaw = gaw;
1854 else
1855 agaw = gaw + 9 - r;
1856 if (agaw > 64)
1857 agaw = 64;
1858 return agaw;
1859}
1860
Joerg Roedeldc534b22015-07-22 12:44:02 +02001861static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1862 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001864 int adjust_width, agaw;
1865 unsigned long sagaw;
1866
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001867 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1868 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869 domain_reserve_special_ranges(domain);
1870
1871 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001872 if (guest_width > cap_mgaw(iommu->cap))
1873 guest_width = cap_mgaw(iommu->cap);
1874 domain->gaw = guest_width;
1875 adjust_width = guestwidth_to_adjustwidth(guest_width);
1876 agaw = width_to_agaw(adjust_width);
1877 sagaw = cap_sagaw(iommu->cap);
1878 if (!test_bit(agaw, &sagaw)) {
1879 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001880 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881 agaw = find_next_bit(&sagaw, 5, agaw);
1882 if (agaw >= 5)
1883 return -ENODEV;
1884 }
1885 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886
Weidong Han8e6040972008-12-08 15:49:06 +08001887 if (ecap_coherent(iommu->ecap))
1888 domain->iommu_coherency = 1;
1889 else
1890 domain->iommu_coherency = 0;
1891
Sheng Yang58c610b2009-03-18 15:33:05 +08001892 if (ecap_sc_support(iommu->ecap))
1893 domain->iommu_snooping = 1;
1894 else
1895 domain->iommu_snooping = 0;
1896
David Woodhouse214e39a2014-03-19 10:38:49 +00001897 if (intel_iommu_superpage)
1898 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1899 else
1900 domain->iommu_superpage = 0;
1901
Suresh Siddha4c923d42009-10-02 11:01:24 -07001902 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001903
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001905 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 if (!domain->pgd)
1907 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001908 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909 return 0;
1910}
1911
1912static void domain_exit(struct dmar_domain *domain)
1913{
David Woodhouseea8ea462014-03-05 17:09:32 +00001914 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915
1916 /* Domain 0 is reserved, so dont process it */
1917 if (!domain)
1918 return;
1919
Alex Williamson7b668352011-05-24 12:02:41 +01001920 /* Flush any lazy unmaps that may reference this domain */
1921 if (!intel_iommu_strict)
1922 flush_unmaps_timeout(0);
1923
Joerg Roedeld160aca2015-07-22 11:52:53 +02001924 /* Remove associated devices and clear attached or cached domains */
1925 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001927 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08001928
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929 /* destroy iovas */
1930 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931
David Woodhouseea8ea462014-03-05 17:09:32 +00001932 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933
David Woodhouseea8ea462014-03-05 17:09:32 +00001934 dma_free_pagelist(freelist);
1935
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936 free_domain_mem(domain);
1937}
1938
David Woodhouse64ae8922014-03-09 12:52:30 -07001939static int domain_context_mapping_one(struct dmar_domain *domain,
1940 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02001941 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02001943 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02001944 int translation = CONTEXT_TT_MULTI_LEVEL;
1945 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001946 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001948 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02001949 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02001950
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02001951 WARN_ON(did == 0);
1952
Joerg Roedel28ccce02015-07-21 14:45:31 +02001953 if (hw_pass_through && domain_type_is_si(domain))
1954 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001955
1956 pr_debug("Set context mapping for %02x:%02x.%d\n",
1957 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001958
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001959 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08001960
Joerg Roedel55d94042015-07-22 16:50:40 +02001961 spin_lock_irqsave(&device_domain_lock, flags);
1962 spin_lock(&iommu->lock);
1963
1964 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00001965 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001966 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02001967 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968
Joerg Roedel55d94042015-07-22 16:50:40 +02001969 ret = 0;
1970 if (context_present(context))
1971 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02001972
Weidong Hanea6606b2008-12-08 23:08:15 +08001973 pgd = domain->pgd;
1974
Joerg Roedelde24e552015-07-21 14:53:04 +02001975 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02001976 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08001977
Joerg Roedelde24e552015-07-21 14:53:04 +02001978 /*
1979 * Skip top levels of page tables for iommu which has less agaw
1980 * than default. Unnecessary for PT mode.
1981 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001982 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02001983 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02001984 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02001985 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02001986 if (!dma_pte_present(pgd))
1987 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02001988 }
1989
David Woodhouse64ae8922014-03-09 12:52:30 -07001990 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001991 if (info && info->ats_supported)
1992 translation = CONTEXT_TT_DEV_IOTLB;
1993 else
1994 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02001995
Yu Zhao93a23a72009-05-18 13:51:37 +08001996 context_set_address_root(context, virt_to_phys(pgd));
1997 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02001998 } else {
1999 /*
2000 * In pass through mode, AW must be programmed to
2001 * indicate the largest AGAW value supported by
2002 * hardware. And ASR is ignored by hardware.
2003 */
2004 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002005 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002006
2007 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002008 context_set_fault_enable(context);
2009 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002010 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002011
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002012 /*
2013 * It's a non-present to present mapping. If hardware doesn't cache
2014 * non-present entry we only need to flush the write-buffer. If the
2015 * _does_ cache non-present entries, then it does so in the special
2016 * domain #0, which we have to flush:
2017 */
2018 if (cap_caching_mode(iommu->cap)) {
2019 iommu->flush.flush_context(iommu, 0,
2020 (((u16)bus) << 8) | devfn,
2021 DMA_CCMD_MASK_NOBIT,
2022 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002023 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002024 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002025 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002026 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002027 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002028
Joerg Roedel55d94042015-07-22 16:50:40 +02002029 ret = 0;
2030
2031out_unlock:
2032 spin_unlock(&iommu->lock);
2033 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002035 return 0;
2036}
2037
Alex Williamson579305f2014-07-03 09:51:43 -06002038struct domain_context_mapping_data {
2039 struct dmar_domain *domain;
2040 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002041};
2042
2043static int domain_context_mapping_cb(struct pci_dev *pdev,
2044 u16 alias, void *opaque)
2045{
2046 struct domain_context_mapping_data *data = opaque;
2047
2048 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002049 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002050}
2051
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002052static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002053domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002054{
David Woodhouse64ae8922014-03-09 12:52:30 -07002055 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002056 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002057 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002058
David Woodhousee1f167f2014-03-09 15:24:46 -07002059 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002060 if (!iommu)
2061 return -ENODEV;
2062
Alex Williamson579305f2014-07-03 09:51:43 -06002063 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002064 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002065
2066 data.domain = domain;
2067 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002068
2069 return pci_for_each_dma_alias(to_pci_dev(dev),
2070 &domain_context_mapping_cb, &data);
2071}
2072
2073static int domain_context_mapped_cb(struct pci_dev *pdev,
2074 u16 alias, void *opaque)
2075{
2076 struct intel_iommu *iommu = opaque;
2077
2078 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002079}
2080
David Woodhousee1f167f2014-03-09 15:24:46 -07002081static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002082{
Weidong Han5331fe62008-12-08 23:00:00 +08002083 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002084 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002085
David Woodhousee1f167f2014-03-09 15:24:46 -07002086 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002087 if (!iommu)
2088 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002089
Alex Williamson579305f2014-07-03 09:51:43 -06002090 if (!dev_is_pci(dev))
2091 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002092
Alex Williamson579305f2014-07-03 09:51:43 -06002093 return !pci_for_each_dma_alias(to_pci_dev(dev),
2094 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095}
2096
Fenghua Yuf5329592009-08-04 15:09:37 -07002097/* Returns a number of VTD pages, but aligned to MM page size */
2098static inline unsigned long aligned_nrpages(unsigned long host_addr,
2099 size_t size)
2100{
2101 host_addr &= ~PAGE_MASK;
2102 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2103}
2104
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002105/* Return largest possible superpage level for a given mapping */
2106static inline int hardware_largepage_caps(struct dmar_domain *domain,
2107 unsigned long iov_pfn,
2108 unsigned long phy_pfn,
2109 unsigned long pages)
2110{
2111 int support, level = 1;
2112 unsigned long pfnmerge;
2113
2114 support = domain->iommu_superpage;
2115
2116 /* To use a large page, the virtual *and* physical addresses
2117 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2118 of them will mean we have to use smaller pages. So just
2119 merge them and check both at once. */
2120 pfnmerge = iov_pfn | phy_pfn;
2121
2122 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2123 pages >>= VTD_STRIDE_SHIFT;
2124 if (!pages)
2125 break;
2126 pfnmerge >>= VTD_STRIDE_SHIFT;
2127 level++;
2128 support--;
2129 }
2130 return level;
2131}
2132
David Woodhouse9051aa02009-06-29 12:30:54 +01002133static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2134 struct scatterlist *sg, unsigned long phys_pfn,
2135 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002136{
2137 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002138 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002139 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002140 unsigned int largepage_lvl = 0;
2141 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002142
Jiang Liu162d1b12014-07-11 14:19:35 +08002143 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002144
2145 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2146 return -EINVAL;
2147
2148 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2149
Jiang Liucc4f14a2014-11-26 09:42:10 +08002150 if (!sg) {
2151 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002152 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2153 }
2154
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002155 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002156 uint64_t tmp;
2157
David Woodhousee1605492009-06-29 11:17:38 +01002158 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002159 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2161 sg->dma_length = sg->length;
Dan Williams3e6110f2015-12-15 12:54:06 -08002162 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002163 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002164 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002165
David Woodhousee1605492009-06-29 11:17:38 +01002166 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002167 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2168
David Woodhouse5cf0a762014-03-19 16:07:49 +00002169 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002170 if (!pte)
2171 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002172 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002173 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002174 unsigned long nr_superpages, end_pfn;
2175
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002176 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002177 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002178
2179 nr_superpages = sg_res / lvl_pages;
2180 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2181
Jiang Liud41a4ad2014-07-11 14:19:34 +08002182 /*
2183 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002184 * removed to make room for superpage(s).
Jiang Liud41a4ad2014-07-11 14:19:34 +08002185 */
Christian Zanderba2374f2015-06-10 09:41:45 -07002186 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002187 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002188 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002189 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002190
David Woodhousee1605492009-06-29 11:17:38 +01002191 }
2192 /* We don't need lock here, nobody else
2193 * touches the iova range
2194 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002195 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002196 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002197 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002198 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2199 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002200 if (dumps) {
2201 dumps--;
2202 debug_dma_dump_mappings(NULL);
2203 }
2204 WARN_ON(1);
2205 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002206
2207 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2208
2209 BUG_ON(nr_pages < lvl_pages);
2210 BUG_ON(sg_res < lvl_pages);
2211
2212 nr_pages -= lvl_pages;
2213 iov_pfn += lvl_pages;
2214 phys_pfn += lvl_pages;
2215 pteval += lvl_pages * VTD_PAGE_SIZE;
2216 sg_res -= lvl_pages;
2217
2218 /* If the next PTE would be the first in a new page, then we
2219 need to flush the cache on the entries we've just written.
2220 And then we'll need to recalculate 'pte', so clear it and
2221 let it get set again in the if (!pte) block above.
2222
2223 If we're done (!nr_pages) we need to flush the cache too.
2224
2225 Also if we've been setting superpages, we may need to
2226 recalculate 'pte' and switch back to smaller pages for the
2227 end of the mapping, if the trailing size is not enough to
2228 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002229 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002230 if (!nr_pages || first_pte_in_page(pte) ||
2231 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002232 domain_flush_cache(domain, first_pte,
2233 (void *)pte - (void *)first_pte);
2234 pte = NULL;
2235 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002236
2237 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002238 sg = sg_next(sg);
2239 }
2240 return 0;
2241}
2242
David Woodhouse9051aa02009-06-29 12:30:54 +01002243static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2244 struct scatterlist *sg, unsigned long nr_pages,
2245 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002246{
David Woodhouse9051aa02009-06-29 12:30:54 +01002247 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2248}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002249
David Woodhouse9051aa02009-06-29 12:30:54 +01002250static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2251 unsigned long phys_pfn, unsigned long nr_pages,
2252 int prot)
2253{
2254 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002255}
2256
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002257static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002258{
Weidong Hanc7151a82008-12-08 22:51:37 +08002259 if (!iommu)
2260 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002261
2262 clear_context_table(iommu, bus, devfn);
2263 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002264 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002265 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002266}
2267
David Woodhouse109b9b02012-05-25 17:43:02 +01002268static inline void unlink_domain_info(struct device_domain_info *info)
2269{
2270 assert_spin_locked(&device_domain_lock);
2271 list_del(&info->link);
2272 list_del(&info->global);
2273 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002274 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002275}
2276
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002277static void domain_remove_dev_info(struct dmar_domain *domain)
2278{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002279 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002280 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002281
2282 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002283 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002284 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002285 spin_unlock_irqrestore(&device_domain_lock, flags);
2286}
2287
2288/*
2289 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002290 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002291 */
David Woodhouse1525a292014-03-06 16:19:30 +00002292static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002293{
2294 struct device_domain_info *info;
2295
2296 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002297 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002298 if (info)
2299 return info->domain;
2300 return NULL;
2301}
2302
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002303static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002304dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2305{
2306 struct device_domain_info *info;
2307
2308 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002309 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002310 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002311 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002312
2313 return NULL;
2314}
2315
Joerg Roedel5db31562015-07-22 12:40:43 +02002316static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2317 int bus, int devfn,
2318 struct device *dev,
2319 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002320{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002321 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002322 struct device_domain_info *info;
2323 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002324 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002325
2326 info = alloc_devinfo_mem();
2327 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002328 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002329
Jiang Liu745f2582014-02-19 14:07:26 +08002330 info->bus = bus;
2331 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002332 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2333 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2334 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002335 info->dev = dev;
2336 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002337 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002338
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002339 if (dev && dev_is_pci(dev)) {
2340 struct pci_dev *pdev = to_pci_dev(info->dev);
2341
2342 if (ecap_dev_iotlb_support(iommu->ecap) &&
2343 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2344 dmar_find_matched_atsr_unit(pdev))
2345 info->ats_supported = 1;
2346
2347 if (ecs_enabled(iommu)) {
2348 if (pasid_enabled(iommu)) {
2349 int features = pci_pasid_features(pdev);
2350 if (features >= 0)
2351 info->pasid_supported = features | 1;
2352 }
2353
2354 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2355 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2356 info->pri_supported = 1;
2357 }
2358 }
2359
Jiang Liu745f2582014-02-19 14:07:26 +08002360 spin_lock_irqsave(&device_domain_lock, flags);
2361 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002362 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002363
2364 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002365 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002366 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002367 if (info2) {
2368 found = info2->domain;
2369 info2->dev = dev;
2370 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002371 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002372
Jiang Liu745f2582014-02-19 14:07:26 +08002373 if (found) {
2374 spin_unlock_irqrestore(&device_domain_lock, flags);
2375 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002376 /* Caller must free the original domain */
2377 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002378 }
2379
Joerg Roedeld160aca2015-07-22 11:52:53 +02002380 spin_lock(&iommu->lock);
2381 ret = domain_attach_iommu(domain, iommu);
2382 spin_unlock(&iommu->lock);
2383
2384 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002385 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302386 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002387 return NULL;
2388 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002389
David Woodhouseb718cd32014-03-09 13:11:33 -07002390 list_add(&info->link, &domain->devices);
2391 list_add(&info->global, &device_domain_list);
2392 if (dev)
2393 dev->archdata.iommu = info;
2394 spin_unlock_irqrestore(&device_domain_lock, flags);
2395
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002396 if (dev && domain_context_mapping(domain, dev)) {
2397 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002398 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002399 return NULL;
2400 }
2401
David Woodhouseb718cd32014-03-09 13:11:33 -07002402 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002403}
2404
Alex Williamson579305f2014-07-03 09:51:43 -06002405static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2406{
2407 *(u16 *)opaque = alias;
2408 return 0;
2409}
2410
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002412static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002413{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002414 struct device_domain_info *info = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002415 struct dmar_domain *domain, *tmp;
2416 struct intel_iommu *iommu;
Joerg Roedel08a7f452015-07-23 18:09:11 +02002417 u16 req_id, dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002418 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002419 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002420
David Woodhouse146922e2014-03-09 15:44:17 -07002421 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002422 if (domain)
2423 return domain;
2424
David Woodhouse146922e2014-03-09 15:44:17 -07002425 iommu = device_to_iommu(dev, &bus, &devfn);
2426 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002427 return NULL;
2428
Joerg Roedel08a7f452015-07-23 18:09:11 +02002429 req_id = ((u16)bus << 8) | devfn;
2430
Alex Williamson579305f2014-07-03 09:51:43 -06002431 if (dev_is_pci(dev)) {
2432 struct pci_dev *pdev = to_pci_dev(dev);
2433
2434 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2435
2436 spin_lock_irqsave(&device_domain_lock, flags);
2437 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2438 PCI_BUS_NUM(dma_alias),
2439 dma_alias & 0xff);
2440 if (info) {
2441 iommu = info->iommu;
2442 domain = info->domain;
2443 }
2444 spin_unlock_irqrestore(&device_domain_lock, flags);
2445
2446 /* DMA alias already has a domain, uses it */
2447 if (info)
2448 goto found_domain;
2449 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002450
David Woodhouse146922e2014-03-09 15:44:17 -07002451 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002452 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002453 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002454 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002455 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002456 domain_exit(domain);
2457 return NULL;
2458 }
2459
2460 /* register PCI DMA alias device */
Joerg Roedel08a7f452015-07-23 18:09:11 +02002461 if (req_id != dma_alias && dev_is_pci(dev)) {
Joerg Roedel5db31562015-07-22 12:40:43 +02002462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2463 dma_alias & 0xff, NULL, domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002464
2465 if (!tmp || tmp != domain) {
2466 domain_exit(domain);
2467 domain = tmp;
2468 }
2469
David Woodhouseb718cd32014-03-09 13:11:33 -07002470 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002471 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002472 }
2473
2474found_domain:
Joerg Roedel5db31562015-07-22 12:40:43 +02002475 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002476
2477 if (!tmp || tmp != domain) {
2478 domain_exit(domain);
2479 domain = tmp;
2480 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002481
2482 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002483}
2484
David Woodhouseb2132032009-06-26 18:50:28 +01002485static int iommu_domain_identity_map(struct dmar_domain *domain,
2486 unsigned long long start,
2487 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002488{
David Woodhousec5395d52009-06-28 16:35:56 +01002489 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2490 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002491
David Woodhousec5395d52009-06-28 16:35:56 +01002492 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2493 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002494 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002495 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002496 }
2497
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002498 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002499 /*
2500 * RMRR range might have overlap with physical memory range,
2501 * clear it first
2502 */
David Woodhousec5395d52009-06-28 16:35:56 +01002503 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002504
David Woodhousec5395d52009-06-28 16:35:56 +01002505 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2506 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002507 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002508}
2509
Joerg Roedeld66ce542015-09-23 19:00:10 +02002510static int domain_prepare_identity_map(struct device *dev,
2511 struct dmar_domain *domain,
2512 unsigned long long start,
2513 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002514{
David Woodhouse19943b02009-08-04 16:19:20 +01002515 /* For _hardware_ passthrough, don't bother. But for software
2516 passthrough, we do it anyway -- it may indicate a memory
2517 range which is reserved in E820, so which didn't get set
2518 up to start with in si_domain */
2519 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002520 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2521 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002522 return 0;
2523 }
2524
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002525 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2526 dev_name(dev), start, end);
2527
David Woodhouse5595b522009-12-02 09:21:55 +00002528 if (end < start) {
2529 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2530 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2531 dmi_get_system_info(DMI_BIOS_VENDOR),
2532 dmi_get_system_info(DMI_BIOS_VERSION),
2533 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002534 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002535 }
2536
David Woodhouse2ff729f2009-08-26 14:25:41 +01002537 if (end >> agaw_to_width(domain->agaw)) {
2538 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2539 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2540 agaw_to_width(domain->agaw),
2541 dmi_get_system_info(DMI_BIOS_VENDOR),
2542 dmi_get_system_info(DMI_BIOS_VERSION),
2543 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002544 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002545 }
David Woodhouse19943b02009-08-04 16:19:20 +01002546
Joerg Roedeld66ce542015-09-23 19:00:10 +02002547 return iommu_domain_identity_map(domain, start, end);
2548}
2549
2550static int iommu_prepare_identity_map(struct device *dev,
2551 unsigned long long start,
2552 unsigned long long end)
2553{
2554 struct dmar_domain *domain;
2555 int ret;
2556
2557 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2558 if (!domain)
2559 return -ENOMEM;
2560
2561 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002562 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002563 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002564
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002565 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002566}
2567
2568static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002569 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002570{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002571 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002572 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002573 return iommu_prepare_identity_map(dev, rmrr->base_address,
2574 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002575}
2576
Suresh Siddhad3f13812011-08-23 17:05:25 -07002577#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002578static inline void iommu_prepare_isa(void)
2579{
2580 struct pci_dev *pdev;
2581 int ret;
2582
2583 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2584 if (!pdev)
2585 return;
2586
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002587 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002588 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002589
2590 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002591 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002592
Yijing Wang9b27e822014-05-20 20:37:52 +08002593 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002594}
2595#else
2596static inline void iommu_prepare_isa(void)
2597{
2598 return;
2599}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002600#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002601
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002602static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002603
Matt Kraai071e1372009-08-23 22:30:22 -07002604static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002605{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002606 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002607
Jiang Liuab8dfe22014-07-11 14:19:27 +08002608 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002609 if (!si_domain)
2610 return -EFAULT;
2611
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002612 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2613 domain_exit(si_domain);
2614 return -EFAULT;
2615 }
2616
Joerg Roedel0dc79712015-07-21 15:40:06 +02002617 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002618
David Woodhouse19943b02009-08-04 16:19:20 +01002619 if (hw)
2620 return 0;
2621
David Woodhousec7ab48d2009-06-26 19:10:36 +01002622 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002623 unsigned long start_pfn, end_pfn;
2624 int i;
2625
2626 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2627 ret = iommu_domain_identity_map(si_domain,
2628 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2629 if (ret)
2630 return ret;
2631 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002632 }
2633
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002634 return 0;
2635}
2636
David Woodhouse9b226622014-03-09 14:03:28 -07002637static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002638{
2639 struct device_domain_info *info;
2640
2641 if (likely(!iommu_identity_mapping))
2642 return 0;
2643
David Woodhouse9b226622014-03-09 14:03:28 -07002644 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002645 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2646 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002647
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002648 return 0;
2649}
2650
Joerg Roedel28ccce02015-07-21 14:45:31 +02002651static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002652{
David Woodhouse0ac72662014-03-09 13:19:22 -07002653 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002654 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002655 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002656
David Woodhouse5913c9b2014-03-09 16:27:31 -07002657 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002658 if (!iommu)
2659 return -ENODEV;
2660
Joerg Roedel5db31562015-07-22 12:40:43 +02002661 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002662 if (ndomain != domain)
2663 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002664
2665 return 0;
2666}
2667
David Woodhouse0b9d9752014-03-09 15:48:15 -07002668static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002669{
2670 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002671 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002672 int i;
2673
Jiang Liu0e242612014-02-19 14:07:34 +08002674 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002675 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002676 /*
2677 * Return TRUE if this RMRR contains the device that
2678 * is passed in.
2679 */
2680 for_each_active_dev_scope(rmrr->devices,
2681 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002682 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002683 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002684 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002685 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002686 }
Jiang Liu0e242612014-02-19 14:07:34 +08002687 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002688 return false;
2689}
2690
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002691/*
2692 * There are a couple cases where we need to restrict the functionality of
2693 * devices associated with RMRRs. The first is when evaluating a device for
2694 * identity mapping because problems exist when devices are moved in and out
2695 * of domains and their respective RMRR information is lost. This means that
2696 * a device with associated RMRRs will never be in a "passthrough" domain.
2697 * The second is use of the device through the IOMMU API. This interface
2698 * expects to have full control of the IOVA space for the device. We cannot
2699 * satisfy both the requirement that RMRR access is maintained and have an
2700 * unencumbered IOVA space. We also have no ability to quiesce the device's
2701 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2702 * We therefore prevent devices associated with an RMRR from participating in
2703 * the IOMMU API, which eliminates them from device assignment.
2704 *
2705 * In both cases we assume that PCI USB devices with RMRRs have them largely
2706 * for historical reasons and that the RMRR space is not actively used post
2707 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002708 *
2709 * The same exception is made for graphics devices, with the requirement that
2710 * any use of the RMRR regions will be torn down before assigning the device
2711 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002712 */
2713static bool device_is_rmrr_locked(struct device *dev)
2714{
2715 if (!device_has_rmrr(dev))
2716 return false;
2717
2718 if (dev_is_pci(dev)) {
2719 struct pci_dev *pdev = to_pci_dev(dev);
2720
David Woodhouse18436af2015-03-25 15:05:47 +00002721 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002722 return false;
2723 }
2724
2725 return true;
2726}
2727
David Woodhouse3bdb2592014-03-09 16:03:08 -07002728static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002729{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002730
David Woodhouse3bdb2592014-03-09 16:03:08 -07002731 if (dev_is_pci(dev)) {
2732 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002733
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002734 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002735 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002736
David Woodhouse3bdb2592014-03-09 16:03:08 -07002737 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2738 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002739
David Woodhouse3bdb2592014-03-09 16:03:08 -07002740 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2741 return 1;
2742
2743 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2744 return 0;
2745
2746 /*
2747 * We want to start off with all devices in the 1:1 domain, and
2748 * take them out later if we find they can't access all of memory.
2749 *
2750 * However, we can't do this for PCI devices behind bridges,
2751 * because all PCI devices behind the same bridge will end up
2752 * with the same source-id on their transactions.
2753 *
2754 * Practically speaking, we can't change things around for these
2755 * devices at run-time, because we can't be sure there'll be no
2756 * DMA transactions in flight for any of their siblings.
2757 *
2758 * So PCI devices (unless they're on the root bus) as well as
2759 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2760 * the 1:1 domain, just in _case_ one of their siblings turns out
2761 * not to be able to map all of memory.
2762 */
2763 if (!pci_is_pcie(pdev)) {
2764 if (!pci_is_root_bus(pdev->bus))
2765 return 0;
2766 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2767 return 0;
2768 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2769 return 0;
2770 } else {
2771 if (device_has_rmrr(dev))
2772 return 0;
2773 }
David Woodhouse6941af22009-07-04 18:24:27 +01002774
David Woodhouse3dfc8132009-07-04 19:11:08 +01002775 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002776 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002777 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002778 * take them out of the 1:1 domain later.
2779 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002780 if (!startup) {
2781 /*
2782 * If the device's dma_mask is less than the system's memory
2783 * size then this is not a candidate for identity mapping.
2784 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002785 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002786
David Woodhouse3bdb2592014-03-09 16:03:08 -07002787 if (dev->coherent_dma_mask &&
2788 dev->coherent_dma_mask < dma_mask)
2789 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002790
David Woodhouse3bdb2592014-03-09 16:03:08 -07002791 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002792 }
David Woodhouse6941af22009-07-04 18:24:27 +01002793
2794 return 1;
2795}
2796
David Woodhousecf04eee2014-03-21 16:49:04 +00002797static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2798{
2799 int ret;
2800
2801 if (!iommu_should_identity_map(dev, 1))
2802 return 0;
2803
Joerg Roedel28ccce02015-07-21 14:45:31 +02002804 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002805 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002806 pr_info("%s identity mapping for device %s\n",
2807 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002808 else if (ret == -ENODEV)
2809 /* device not associated with an iommu */
2810 ret = 0;
2811
2812 return ret;
2813}
2814
2815
Matt Kraai071e1372009-08-23 22:30:22 -07002816static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002817{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002818 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002819 struct dmar_drhd_unit *drhd;
2820 struct intel_iommu *iommu;
2821 struct device *dev;
2822 int i;
2823 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002824
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002825 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002826 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2827 if (ret)
2828 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002829 }
2830
David Woodhousecf04eee2014-03-21 16:49:04 +00002831 for_each_active_iommu(iommu, drhd)
2832 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2833 struct acpi_device_physical_node *pn;
2834 struct acpi_device *adev;
2835
2836 if (dev->bus != &acpi_bus_type)
2837 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002838
David Woodhousecf04eee2014-03-21 16:49:04 +00002839 adev= to_acpi_device(dev);
2840 mutex_lock(&adev->physical_node_lock);
2841 list_for_each_entry(pn, &adev->physical_node_list, node) {
2842 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2843 if (ret)
2844 break;
2845 }
2846 mutex_unlock(&adev->physical_node_lock);
2847 if (ret)
2848 return ret;
2849 }
2850
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002851 return 0;
2852}
2853
Jiang Liuffebeb42014-11-09 22:48:02 +08002854static void intel_iommu_init_qi(struct intel_iommu *iommu)
2855{
2856 /*
2857 * Start from the sane iommu hardware state.
2858 * If the queued invalidation is already initialized by us
2859 * (for example, while enabling interrupt-remapping) then
2860 * we got the things already rolling from a sane state.
2861 */
2862 if (!iommu->qi) {
2863 /*
2864 * Clear any previous faults.
2865 */
2866 dmar_fault(-1, iommu);
2867 /*
2868 * Disable queued invalidation if supported and already enabled
2869 * before OS handover.
2870 */
2871 dmar_disable_qi(iommu);
2872 }
2873
2874 if (dmar_enable_qi(iommu)) {
2875 /*
2876 * Queued Invalidate not enabled, use Register Based Invalidate
2877 */
2878 iommu->flush.flush_context = __iommu_flush_context;
2879 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002880 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002881 iommu->name);
2882 } else {
2883 iommu->flush.flush_context = qi_flush_context;
2884 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002885 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002886 }
2887}
2888
Joerg Roedel091d42e2015-06-12 11:56:10 +02002889static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb962015-10-09 18:16:46 -04002890 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02002891 struct context_entry **tbl,
2892 int bus, bool ext)
2893{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002894 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002895 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb962015-10-09 18:16:46 -04002896 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002897 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002898 phys_addr_t old_ce_phys;
2899
2900 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb962015-10-09 18:16:46 -04002901 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02002902
2903 for (devfn = 0; devfn < 256; devfn++) {
2904 /* First calculate the correct index */
2905 idx = (ext ? devfn * 2 : devfn) % 256;
2906
2907 if (idx == 0) {
2908 /* First save what we may have and clean up */
2909 if (new_ce) {
2910 tbl[tbl_idx] = new_ce;
2911 __iommu_flush_cache(iommu, new_ce,
2912 VTD_PAGE_SIZE);
2913 pos = 1;
2914 }
2915
2916 if (old_ce)
2917 iounmap(old_ce);
2918
2919 ret = 0;
2920 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002921 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002922 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02002923 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002924
2925 if (!old_ce_phys) {
2926 if (ext && devfn == 0) {
2927 /* No LCTP, try UCTP */
2928 devfn = 0x7f;
2929 continue;
2930 } else {
2931 goto out;
2932 }
2933 }
2934
2935 ret = -ENOMEM;
Dan Williamsdfddb962015-10-09 18:16:46 -04002936 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2937 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002938 if (!old_ce)
2939 goto out;
2940
2941 new_ce = alloc_pgtable_page(iommu->node);
2942 if (!new_ce)
2943 goto out_unmap;
2944
2945 ret = 0;
2946 }
2947
2948 /* Now copy the context entry */
Dan Williamsdfddb962015-10-09 18:16:46 -04002949 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02002950
Joerg Roedelcf484d02015-06-12 12:21:46 +02002951 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02002952 continue;
2953
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002954 did = context_domain_id(&ce);
2955 if (did >= 0 && did < cap_ndoms(iommu->cap))
2956 set_bit(did, iommu->domain_ids);
2957
Joerg Roedelcf484d02015-06-12 12:21:46 +02002958 /*
2959 * We need a marker for copied context entries. This
2960 * marker needs to work for the old format as well as
2961 * for extended context entries.
2962 *
2963 * Bit 67 of the context entry is used. In the old
2964 * format this bit is available to software, in the
2965 * extended format it is the PGE bit, but PGE is ignored
2966 * by HW if PASIDs are disabled (and thus still
2967 * available).
2968 *
2969 * So disable PASIDs first and then mark the entry
2970 * copied. This means that we don't copy PASID
2971 * translations from the old kernel, but this is fine as
2972 * faults there are not fatal.
2973 */
2974 context_clear_pasid_enable(&ce);
2975 context_set_copied(&ce);
2976
Joerg Roedel091d42e2015-06-12 11:56:10 +02002977 new_ce[idx] = ce;
2978 }
2979
2980 tbl[tbl_idx + pos] = new_ce;
2981
2982 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2983
2984out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04002985 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02002986
2987out:
2988 return ret;
2989}
2990
2991static int copy_translation_tables(struct intel_iommu *iommu)
2992{
2993 struct context_entry **ctxt_tbls;
Dan Williamsdfddb962015-10-09 18:16:46 -04002994 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002995 phys_addr_t old_rt_phys;
2996 int ctxt_table_entries;
2997 unsigned long flags;
2998 u64 rtaddr_reg;
2999 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003000 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003001
3002 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3003 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003004 new_ext = !!ecap_ecs(iommu->ecap);
3005
3006 /*
3007 * The RTT bit can only be changed when translation is disabled,
3008 * but disabling translation means to open a window for data
3009 * corruption. So bail out and don't copy anything if we would
3010 * have to change the bit.
3011 */
3012 if (new_ext != ext)
3013 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003014
3015 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3016 if (!old_rt_phys)
3017 return -EINVAL;
3018
Dan Williamsdfddb962015-10-09 18:16:46 -04003019 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003020 if (!old_rt)
3021 return -ENOMEM;
3022
3023 /* This is too big for the stack - allocate it from slab */
3024 ctxt_table_entries = ext ? 512 : 256;
3025 ret = -ENOMEM;
3026 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3027 if (!ctxt_tbls)
3028 goto out_unmap;
3029
3030 for (bus = 0; bus < 256; bus++) {
3031 ret = copy_context_table(iommu, &old_rt[bus],
3032 ctxt_tbls, bus, ext);
3033 if (ret) {
3034 pr_err("%s: Failed to copy context table for bus %d\n",
3035 iommu->name, bus);
3036 continue;
3037 }
3038 }
3039
3040 spin_lock_irqsave(&iommu->lock, flags);
3041
3042 /* Context tables are copied, now write them to the root_entry table */
3043 for (bus = 0; bus < 256; bus++) {
3044 int idx = ext ? bus * 2 : bus;
3045 u64 val;
3046
3047 if (ctxt_tbls[idx]) {
3048 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3049 iommu->root_entry[bus].lo = val;
3050 }
3051
3052 if (!ext || !ctxt_tbls[idx + 1])
3053 continue;
3054
3055 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3056 iommu->root_entry[bus].hi = val;
3057 }
3058
3059 spin_unlock_irqrestore(&iommu->lock, flags);
3060
3061 kfree(ctxt_tbls);
3062
3063 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3064
3065 ret = 0;
3066
3067out_unmap:
Dan Williamsdfddb962015-10-09 18:16:46 -04003068 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003069
3070 return ret;
3071}
3072
Joseph Cihulab7792602011-05-03 00:08:37 -07003073static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003074{
3075 struct dmar_drhd_unit *drhd;
3076 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003077 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003078 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003079 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07003080 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003081
3082 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003083 * for each drhd
3084 * allocate root
3085 * initialize and program root entry to not present
3086 * endfor
3087 */
3088 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003089 /*
3090 * lock not needed as this is only incremented in the single
3091 * threaded kernel __init code path all other access are read
3092 * only
3093 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003094 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003095 g_num_of_iommus++;
3096 continue;
3097 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003098 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003099 }
3100
Jiang Liuffebeb42014-11-09 22:48:02 +08003101 /* Preallocate enough resources for IOMMU hot-addition */
3102 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3103 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3104
Weidong Hand9630fe2008-12-08 11:06:32 +08003105 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3106 GFP_KERNEL);
3107 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003108 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003109 ret = -ENOMEM;
3110 goto error;
3111 }
3112
mark gross80b20dd2008-04-18 13:53:58 -07003113 deferred_flush = kzalloc(g_num_of_iommus *
3114 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3115 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08003116 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08003117 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08003118 }
3119
Jiang Liu7c919772014-01-06 14:18:18 +08003120 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003121 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003122
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003123 intel_iommu_init_qi(iommu);
3124
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003125 ret = iommu_init_domains(iommu);
3126 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003127 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003128
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003129 init_translation_status(iommu);
3130
Joerg Roedel091d42e2015-06-12 11:56:10 +02003131 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3132 iommu_disable_translation(iommu);
3133 clear_translation_pre_enabled(iommu);
3134 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3135 iommu->name);
3136 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003137
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003138 /*
3139 * TBD:
3140 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003141 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003142 */
3143 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003144 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003145 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003146
Joerg Roedel091d42e2015-06-12 11:56:10 +02003147 if (translation_pre_enabled(iommu)) {
3148 pr_info("Translation already enabled - trying to copy translation structures\n");
3149
3150 ret = copy_translation_tables(iommu);
3151 if (ret) {
3152 /*
3153 * We found the IOMMU with translation
3154 * enabled - but failed to copy over the
3155 * old root-entry table. Try to proceed
3156 * by disabling translation now and
3157 * allocating a clean root-entry table.
3158 * This might cause DMAR faults, but
3159 * probably the dump will still succeed.
3160 */
3161 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3162 iommu->name);
3163 iommu_disable_translation(iommu);
3164 clear_translation_pre_enabled(iommu);
3165 } else {
3166 pr_info("Copied translation tables from previous kernel for %s\n",
3167 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003168 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003169 }
3170 }
3171
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003172 iommu_flush_write_buffer(iommu);
3173 iommu_set_root_entry(iommu);
3174 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3175 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3176
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003177 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003178 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003179#ifdef CONFIG_INTEL_IOMMU_SVM
3180 if (pasid_enabled(iommu))
3181 intel_svm_alloc_pasid_tables(iommu);
3182#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003183 }
3184
David Woodhouse19943b02009-08-04 16:19:20 +01003185 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003186 iommu_identity_mapping |= IDENTMAP_ALL;
3187
Suresh Siddhad3f13812011-08-23 17:05:25 -07003188#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003189 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003190#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003191
Joerg Roedel86080cc2015-06-12 12:27:16 +02003192 if (iommu_identity_mapping) {
3193 ret = si_domain_init(hw_pass_through);
3194 if (ret)
3195 goto free_iommu;
3196 }
3197
David Woodhousee0fc7e02009-09-30 09:12:17 -07003198 check_tylersburg_isoch();
3199
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003200 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003201 * If we copied translations from a previous kernel in the kdump
3202 * case, we can not assign the devices to domains now, as that
3203 * would eliminate the old mappings. So skip this part and defer
3204 * the assignment to device driver initialization time.
3205 */
3206 if (copied_tables)
3207 goto domains_done;
3208
3209 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003210 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003211 * identity mappings for rmrr, gfx, and isa and may fall back to static
3212 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003213 */
David Woodhouse19943b02009-08-04 16:19:20 +01003214 if (iommu_identity_mapping) {
3215 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3216 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003217 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003218 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003219 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003220 }
David Woodhouse19943b02009-08-04 16:19:20 +01003221 /*
3222 * For each rmrr
3223 * for each dev attached to rmrr
3224 * do
3225 * locate drhd for dev, alloc domain for dev
3226 * allocate free domain
3227 * allocate page table entries for rmrr
3228 * if context not allocated for bus
3229 * allocate and init context
3230 * set present in root table for this bus
3231 * init context with domain, translation etc
3232 * endfor
3233 * endfor
3234 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003235 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003236 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003237 /* some BIOS lists non-exist devices in DMAR table. */
3238 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003239 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003240 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003241 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003242 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003243 }
3244 }
3245
3246 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003247
Joerg Roedela87f4912015-06-12 12:32:54 +02003248domains_done:
3249
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250 /*
3251 * for each drhd
3252 * enable fault log
3253 * global invalidate context cache
3254 * global invalidate iotlb
3255 * enable translation
3256 */
Jiang Liu7c919772014-01-06 14:18:18 +08003257 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003258 if (drhd->ignored) {
3259 /*
3260 * we always have to disable PMRs or DMA may fail on
3261 * this device
3262 */
3263 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003264 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003265 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003266 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003267
3268 iommu_flush_write_buffer(iommu);
3269
David Woodhousea222a7f2015-10-07 23:35:18 +01003270#ifdef CONFIG_INTEL_IOMMU_SVM
3271 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3272 ret = intel_svm_enable_prq(iommu);
3273 if (ret)
3274 goto free_iommu;
3275 }
3276#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003277 ret = dmar_set_interrupt(iommu);
3278 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003279 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003280
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003281 if (!translation_pre_enabled(iommu))
3282 iommu_enable_translation(iommu);
3283
David Woodhouseb94996c2009-09-19 15:28:12 -07003284 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003285 }
3286
3287 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003288
3289free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003290 for_each_active_iommu(iommu, drhd) {
3291 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003292 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003293 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08003294 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08003295free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08003296 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003297error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003298 return ret;
3299}
3300
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003301/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01003302static struct iova *intel_alloc_iova(struct device *dev,
3303 struct dmar_domain *domain,
3304 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003305{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003306 struct iova *iova = NULL;
3307
David Woodhouse875764d2009-06-28 21:20:51 +01003308 /* Restrict dma_mask to the width that the iommu can handle */
3309 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003310 /* Ensure we reserve the whole size-aligned region */
3311 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003312
3313 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003314 /*
3315 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003316 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003317 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003318 */
David Woodhouse875764d2009-06-28 21:20:51 +01003319 iova = alloc_iova(&domain->iovad, nrpages,
3320 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3321 if (iova)
3322 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003323 }
David Woodhouse875764d2009-06-28 21:20:51 +01003324 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3325 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003326 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003327 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003328 return NULL;
3329 }
3330
3331 return iova;
3332}
3333
David Woodhoused4b709f2014-03-09 16:07:40 -07003334static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003335{
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003336 struct dmar_rmrr_unit *rmrr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003337 struct dmar_domain *domain;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003338 struct device *i_dev;
3339 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003340
David Woodhoused4b709f2014-03-09 16:07:40 -07003341 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003342 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003343 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003344 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003345 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003346 }
3347
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003348 /* We have a new domain - setup possible RMRRs for the device */
3349 rcu_read_lock();
3350 for_each_rmrr_units(rmrr) {
3351 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3352 i, i_dev) {
3353 if (i_dev != dev)
3354 continue;
3355
3356 ret = domain_prepare_identity_map(dev, domain,
3357 rmrr->base_address,
3358 rmrr->end_address);
3359 if (ret)
3360 dev_err(dev, "Mapping reserved region failed\n");
3361 }
3362 }
3363 rcu_read_unlock();
3364
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003365 return domain;
3366}
3367
David Woodhoused4b709f2014-03-09 16:07:40 -07003368static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003369{
3370 struct device_domain_info *info;
3371
3372 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003373 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003374 if (likely(info))
3375 return info->domain;
3376
3377 return __get_valid_domain_for_dev(dev);
3378}
3379
David Woodhouseecb509e2014-03-09 16:29:55 -07003380/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003381static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003382{
3383 int found;
3384
David Woodhouse3d891942014-03-06 15:59:26 +00003385 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003386 return 1;
3387
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003388 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003389 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003390
David Woodhouse9b226622014-03-09 14:03:28 -07003391 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003392 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003393 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003394 return 1;
3395 else {
3396 /*
3397 * 32 bit DMA is removed from si_domain and fall back
3398 * to non-identity mapping.
3399 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003400 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003401 pr_info("32bit %s uses non-identity mapping\n",
3402 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003403 return 0;
3404 }
3405 } else {
3406 /*
3407 * In case of a detached 64 bit DMA device from vm, the device
3408 * is put into si_domain for identity mapping.
3409 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003410 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003411 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003412 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003413 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003414 pr_info("64bit %s uses identity mapping\n",
3415 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003416 return 1;
3417 }
3418 }
3419 }
3420
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003421 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003422}
3423
David Woodhouse5040a912014-03-09 16:14:00 -07003424static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003425 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003426{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003427 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003428 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003429 struct iova *iova;
3430 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003431 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003432 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003433 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003434
3435 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003436
David Woodhouse5040a912014-03-09 16:14:00 -07003437 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003438 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003439
David Woodhouse5040a912014-03-09 16:14:00 -07003440 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003441 if (!domain)
3442 return 0;
3443
Weidong Han8c11e792008-12-08 15:29:22 +08003444 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003445 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003446
David Woodhouse5040a912014-03-09 16:14:00 -07003447 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003448 if (!iova)
3449 goto error;
3450
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003451 /*
3452 * Check if DMAR supports zero-length reads on write only
3453 * mappings..
3454 */
3455 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003456 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003457 prot |= DMA_PTE_READ;
3458 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3459 prot |= DMA_PTE_WRITE;
3460 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003461 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003462 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003463 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003464 * is not a big problem
3465 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003466 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003467 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003468 if (ret)
3469 goto error;
3470
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003471 /* it's a non-present to present mapping. Only flush if caching mode */
3472 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003473 iommu_flush_iotlb_psi(iommu, domain,
3474 mm_to_dma_pfn(iova->pfn_lo),
3475 size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003476 else
Weidong Han8c11e792008-12-08 15:29:22 +08003477 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003478
David Woodhouse03d6a242009-06-28 15:33:46 +01003479 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3480 start_paddr += paddr & ~PAGE_MASK;
3481 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003482
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003483error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003484 if (iova)
3485 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003486 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003487 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003488 return 0;
3489}
3490
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003491static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3492 unsigned long offset, size_t size,
3493 enum dma_data_direction dir,
3494 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003495{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003496 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003497 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003498}
3499
mark gross5e0d2a62008-03-04 15:22:08 -08003500static void flush_unmaps(void)
3501{
mark gross80b20dd2008-04-18 13:53:58 -07003502 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003503
mark gross5e0d2a62008-03-04 15:22:08 -08003504 timer_on = 0;
3505
3506 /* just flush them all */
3507 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003508 struct intel_iommu *iommu = g_iommus[i];
3509 if (!iommu)
3510 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003511
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003512 if (!deferred_flush[i].next)
3513 continue;
3514
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003515 /* In caching mode, global flushes turn emulation expensive */
3516 if (!cap_caching_mode(iommu->cap))
3517 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003518 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003519 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003520 unsigned long mask;
3521 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003522 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003523
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003524 /* On real hardware multiple invalidations are expensive */
3525 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003526 iommu_flush_iotlb_psi(iommu, domain,
Jiang Liua156ef92014-07-11 14:19:36 +08003527 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003528 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003529 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003530 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003531 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3532 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3533 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003534 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003535 if (deferred_flush[i].freelist[j])
3536 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003537 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003538 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003539 }
3540
mark gross5e0d2a62008-03-04 15:22:08 -08003541 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003542}
3543
3544static void flush_unmaps_timeout(unsigned long data)
3545{
mark gross80b20dd2008-04-18 13:53:58 -07003546 unsigned long flags;
3547
3548 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003549 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003550 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003551}
3552
David Woodhouseea8ea462014-03-05 17:09:32 +00003553static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003554{
3555 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003556 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003557 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003558
3559 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003560 if (list_size == HIGH_WATER_MARK)
3561 flush_unmaps();
3562
Weidong Han8c11e792008-12-08 15:29:22 +08003563 iommu = domain_get_iommu(dom);
3564 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003565
mark gross80b20dd2008-04-18 13:53:58 -07003566 next = deferred_flush[iommu_id].next;
3567 deferred_flush[iommu_id].domain[next] = dom;
3568 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003569 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003570 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003571
3572 if (!timer_on) {
3573 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3574 timer_on = 1;
3575 }
3576 list_size++;
3577 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3578}
3579
Jiang Liud41a4ad2014-07-11 14:19:34 +08003580static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003581{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003582 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003583 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003584 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003585 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003586 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003587
David Woodhouse73676832009-07-04 14:08:36 +01003588 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003589 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003590
David Woodhouse1525a292014-03-06 16:19:30 +00003591 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003592 BUG_ON(!domain);
3593
Weidong Han8c11e792008-12-08 15:29:22 +08003594 iommu = domain_get_iommu(domain);
3595
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003596 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003597 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3598 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003599 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003600
David Woodhoused794dc92009-06-28 00:27:49 +01003601 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3602 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003603
David Woodhoused794dc92009-06-28 00:27:49 +01003604 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003605 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003606
David Woodhouseea8ea462014-03-05 17:09:32 +00003607 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003608
mark gross5e0d2a62008-03-04 15:22:08 -08003609 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003610 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003611 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003612 /* free iova */
3613 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003614 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003615 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003616 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003617 /*
3618 * queue up the release of the unmap to save the 1/6th of the
3619 * cpu used up by the iotlb flush operation...
3620 */
mark gross5e0d2a62008-03-04 15:22:08 -08003621 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003622}
3623
Jiang Liud41a4ad2014-07-11 14:19:34 +08003624static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3625 size_t size, enum dma_data_direction dir,
3626 struct dma_attrs *attrs)
3627{
3628 intel_unmap(dev, dev_addr);
3629}
3630
David Woodhouse5040a912014-03-09 16:14:00 -07003631static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003632 dma_addr_t *dma_handle, gfp_t flags,
3633 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003634{
Akinobu Mita36746432014-06-04 16:06:51 -07003635 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003636 int order;
3637
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003638 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003639 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003640
David Woodhouse5040a912014-03-09 16:14:00 -07003641 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003642 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003643 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3644 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003645 flags |= GFP_DMA;
3646 else
3647 flags |= GFP_DMA32;
3648 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003649
Mel Gormand0164ad2015-11-06 16:28:21 -08003650 if (gfpflags_allow_blocking(flags)) {
Akinobu Mita36746432014-06-04 16:06:51 -07003651 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003652
Akinobu Mita36746432014-06-04 16:06:51 -07003653 page = dma_alloc_from_contiguous(dev, count, order);
3654 if (page && iommu_no_mapping(dev) &&
3655 page_to_phys(page) + size > dev->coherent_dma_mask) {
3656 dma_release_from_contiguous(dev, page, count);
3657 page = NULL;
3658 }
3659 }
3660
3661 if (!page)
3662 page = alloc_pages(flags, order);
3663 if (!page)
3664 return NULL;
3665 memset(page_address(page), 0, size);
3666
3667 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003668 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003669 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003670 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003671 return page_address(page);
3672 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3673 __free_pages(page, order);
3674
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003675 return NULL;
3676}
3677
David Woodhouse5040a912014-03-09 16:14:00 -07003678static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003679 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003680{
3681 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003682 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003683
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003684 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003685 order = get_order(size);
3686
Jiang Liud41a4ad2014-07-11 14:19:34 +08003687 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003688 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3689 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003690}
3691
David Woodhouse5040a912014-03-09 16:14:00 -07003692static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003693 int nelems, enum dma_data_direction dir,
3694 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003695{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003696 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003697}
3698
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003699static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003700 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003701{
3702 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003703 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003704
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003705 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003706 BUG_ON(!sg_page(sg));
Dan Williams3e6110f2015-12-15 12:54:06 -08003707 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003708 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003709 }
3710 return nelems;
3711}
3712
David Woodhouse5040a912014-03-09 16:14:00 -07003713static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003714 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003715{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003716 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003717 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003718 size_t size = 0;
3719 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003720 struct iova *iova = NULL;
3721 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003722 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003723 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003724 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003725
3726 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003727 if (iommu_no_mapping(dev))
3728 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003729
David Woodhouse5040a912014-03-09 16:14:00 -07003730 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003731 if (!domain)
3732 return 0;
3733
Weidong Han8c11e792008-12-08 15:29:22 +08003734 iommu = domain_get_iommu(domain);
3735
David Woodhouseb536d242009-06-28 14:49:31 +01003736 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003737 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003738
David Woodhouse5040a912014-03-09 16:14:00 -07003739 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3740 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003741 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003742 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003743 return 0;
3744 }
3745
3746 /*
3747 * Check if DMAR supports zero-length reads on write only
3748 * mappings..
3749 */
3750 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003751 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003752 prot |= DMA_PTE_READ;
3753 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3754 prot |= DMA_PTE_WRITE;
3755
David Woodhouseb536d242009-06-28 14:49:31 +01003756 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003757
Fenghua Yuf5329592009-08-04 15:09:37 -07003758 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003759 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003760 dma_pte_free_pagetable(domain, start_vpfn,
3761 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003762 __free_iova(&domain->iovad, iova);
3763 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003764 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003765
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003766 /* it's a non-present to present mapping. Only flush if caching mode */
3767 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003768 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003769 else
Weidong Han8c11e792008-12-08 15:29:22 +08003770 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003771
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003772 return nelems;
3773}
3774
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003775static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3776{
3777 return !dma_addr;
3778}
3779
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003780struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003781 .alloc = intel_alloc_coherent,
3782 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003783 .map_sg = intel_map_sg,
3784 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003785 .map_page = intel_map_page,
3786 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003787 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003788};
3789
3790static inline int iommu_domain_cache_init(void)
3791{
3792 int ret = 0;
3793
3794 iommu_domain_cache = kmem_cache_create("iommu_domain",
3795 sizeof(struct dmar_domain),
3796 0,
3797 SLAB_HWCACHE_ALIGN,
3798
3799 NULL);
3800 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003801 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003802 ret = -ENOMEM;
3803 }
3804
3805 return ret;
3806}
3807
3808static inline int iommu_devinfo_cache_init(void)
3809{
3810 int ret = 0;
3811
3812 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3813 sizeof(struct device_domain_info),
3814 0,
3815 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003816 NULL);
3817 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003818 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003819 ret = -ENOMEM;
3820 }
3821
3822 return ret;
3823}
3824
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003825static int __init iommu_init_mempool(void)
3826{
3827 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003828 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003829 if (ret)
3830 return ret;
3831
3832 ret = iommu_domain_cache_init();
3833 if (ret)
3834 goto domain_error;
3835
3836 ret = iommu_devinfo_cache_init();
3837 if (!ret)
3838 return ret;
3839
3840 kmem_cache_destroy(iommu_domain_cache);
3841domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003842 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003843
3844 return -ENOMEM;
3845}
3846
3847static void __init iommu_exit_mempool(void)
3848{
3849 kmem_cache_destroy(iommu_devinfo_cache);
3850 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003851 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003852}
3853
Dan Williams556ab452010-07-23 15:47:56 -07003854static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3855{
3856 struct dmar_drhd_unit *drhd;
3857 u32 vtbar;
3858 int rc;
3859
3860 /* We know that this device on this chipset has its own IOMMU.
3861 * If we find it under a different IOMMU, then the BIOS is lying
3862 * to us. Hope that the IOMMU for this device is actually
3863 * disabled, and it needs no translation...
3864 */
3865 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3866 if (rc) {
3867 /* "can't" happen */
3868 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3869 return;
3870 }
3871 vtbar &= 0xffff0000;
3872
3873 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3874 drhd = dmar_find_matched_drhd_unit(pdev);
3875 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3876 TAINT_FIRMWARE_WORKAROUND,
3877 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3878 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3879}
3880DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3881
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003882static void __init init_no_remapping_devices(void)
3883{
3884 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003885 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003886 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003887
3888 for_each_drhd_unit(drhd) {
3889 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003890 for_each_active_dev_scope(drhd->devices,
3891 drhd->devices_cnt, i, dev)
3892 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003893 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003894 if (i == drhd->devices_cnt)
3895 drhd->ignored = 1;
3896 }
3897 }
3898
Jiang Liu7c919772014-01-06 14:18:18 +08003899 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003900 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003901 continue;
3902
Jiang Liub683b232014-02-19 14:07:32 +08003903 for_each_active_dev_scope(drhd->devices,
3904 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003905 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003906 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003907 if (i < drhd->devices_cnt)
3908 continue;
3909
David Woodhousec0771df2011-10-14 20:59:46 +01003910 /* This IOMMU has *only* gfx devices. Either bypass it or
3911 set the gfx_mapped flag, as appropriate */
3912 if (dmar_map_gfx) {
3913 intel_iommu_gfx_mapped = 1;
3914 } else {
3915 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003916 for_each_active_dev_scope(drhd->devices,
3917 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003918 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003919 }
3920 }
3921}
3922
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003923#ifdef CONFIG_SUSPEND
3924static int init_iommu_hw(void)
3925{
3926 struct dmar_drhd_unit *drhd;
3927 struct intel_iommu *iommu = NULL;
3928
3929 for_each_active_iommu(iommu, drhd)
3930 if (iommu->qi)
3931 dmar_reenable_qi(iommu);
3932
Joseph Cihulab7792602011-05-03 00:08:37 -07003933 for_each_iommu(iommu, drhd) {
3934 if (drhd->ignored) {
3935 /*
3936 * we always have to disable PMRs or DMA may fail on
3937 * this device
3938 */
3939 if (force_on)
3940 iommu_disable_protect_mem_regions(iommu);
3941 continue;
3942 }
3943
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003944 iommu_flush_write_buffer(iommu);
3945
3946 iommu_set_root_entry(iommu);
3947
3948 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003949 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003950 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3951 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003952 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003953 }
3954
3955 return 0;
3956}
3957
3958static void iommu_flush_all(void)
3959{
3960 struct dmar_drhd_unit *drhd;
3961 struct intel_iommu *iommu;
3962
3963 for_each_active_iommu(iommu, drhd) {
3964 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003965 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003966 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003967 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003968 }
3969}
3970
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003971static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003972{
3973 struct dmar_drhd_unit *drhd;
3974 struct intel_iommu *iommu = NULL;
3975 unsigned long flag;
3976
3977 for_each_active_iommu(iommu, drhd) {
3978 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3979 GFP_ATOMIC);
3980 if (!iommu->iommu_state)
3981 goto nomem;
3982 }
3983
3984 iommu_flush_all();
3985
3986 for_each_active_iommu(iommu, drhd) {
3987 iommu_disable_translation(iommu);
3988
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003989 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003990
3991 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3992 readl(iommu->reg + DMAR_FECTL_REG);
3993 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3994 readl(iommu->reg + DMAR_FEDATA_REG);
3995 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3996 readl(iommu->reg + DMAR_FEADDR_REG);
3997 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3998 readl(iommu->reg + DMAR_FEUADDR_REG);
3999
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004000 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004001 }
4002 return 0;
4003
4004nomem:
4005 for_each_active_iommu(iommu, drhd)
4006 kfree(iommu->iommu_state);
4007
4008 return -ENOMEM;
4009}
4010
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004011static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004012{
4013 struct dmar_drhd_unit *drhd;
4014 struct intel_iommu *iommu = NULL;
4015 unsigned long flag;
4016
4017 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004018 if (force_on)
4019 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4020 else
4021 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004022 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004023 }
4024
4025 for_each_active_iommu(iommu, drhd) {
4026
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004027 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004028
4029 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4030 iommu->reg + DMAR_FECTL_REG);
4031 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4032 iommu->reg + DMAR_FEDATA_REG);
4033 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4034 iommu->reg + DMAR_FEADDR_REG);
4035 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4036 iommu->reg + DMAR_FEUADDR_REG);
4037
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004038 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004039 }
4040
4041 for_each_active_iommu(iommu, drhd)
4042 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004043}
4044
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004045static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004046 .resume = iommu_resume,
4047 .suspend = iommu_suspend,
4048};
4049
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004050static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004051{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004052 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004053}
4054
4055#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004056static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004057#endif /* CONFIG_PM */
4058
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004059
Jiang Liuc2a0b532014-11-09 22:47:56 +08004060int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004061{
4062 struct acpi_dmar_reserved_memory *rmrr;
4063 struct dmar_rmrr_unit *rmrru;
4064
4065 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4066 if (!rmrru)
4067 return -ENOMEM;
4068
4069 rmrru->hdr = header;
4070 rmrr = (struct acpi_dmar_reserved_memory *)header;
4071 rmrru->base_address = rmrr->base_address;
4072 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08004073 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4074 ((void *)rmrr) + rmrr->header.length,
4075 &rmrru->devices_cnt);
4076 if (rmrru->devices_cnt && rmrru->devices == NULL) {
4077 kfree(rmrru);
4078 return -ENOMEM;
4079 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004080
Jiang Liu2e455282014-02-19 14:07:36 +08004081 list_add(&rmrru->list, &dmar_rmrr_units);
4082
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004083 return 0;
4084}
4085
Jiang Liu6b197242014-11-09 22:47:58 +08004086static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4087{
4088 struct dmar_atsr_unit *atsru;
4089 struct acpi_dmar_atsr *tmp;
4090
4091 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4092 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4093 if (atsr->segment != tmp->segment)
4094 continue;
4095 if (atsr->header.length != tmp->header.length)
4096 continue;
4097 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4098 return atsru;
4099 }
4100
4101 return NULL;
4102}
4103
4104int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004105{
4106 struct acpi_dmar_atsr *atsr;
4107 struct dmar_atsr_unit *atsru;
4108
Jiang Liu6b197242014-11-09 22:47:58 +08004109 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4110 return 0;
4111
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004112 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004113 atsru = dmar_find_atsr(atsr);
4114 if (atsru)
4115 return 0;
4116
4117 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004118 if (!atsru)
4119 return -ENOMEM;
4120
Jiang Liu6b197242014-11-09 22:47:58 +08004121 /*
4122 * If memory is allocated from slab by ACPI _DSM method, we need to
4123 * copy the memory content because the memory buffer will be freed
4124 * on return.
4125 */
4126 atsru->hdr = (void *)(atsru + 1);
4127 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004128 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004129 if (!atsru->include_all) {
4130 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4131 (void *)atsr + atsr->header.length,
4132 &atsru->devices_cnt);
4133 if (atsru->devices_cnt && atsru->devices == NULL) {
4134 kfree(atsru);
4135 return -ENOMEM;
4136 }
4137 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004138
Jiang Liu0e242612014-02-19 14:07:34 +08004139 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004140
4141 return 0;
4142}
4143
Jiang Liu9bdc5312014-01-06 14:18:27 +08004144static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4145{
4146 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4147 kfree(atsru);
4148}
4149
Jiang Liu6b197242014-11-09 22:47:58 +08004150int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4151{
4152 struct acpi_dmar_atsr *atsr;
4153 struct dmar_atsr_unit *atsru;
4154
4155 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4156 atsru = dmar_find_atsr(atsr);
4157 if (atsru) {
4158 list_del_rcu(&atsru->list);
4159 synchronize_rcu();
4160 intel_iommu_free_atsr(atsru);
4161 }
4162
4163 return 0;
4164}
4165
4166int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4167{
4168 int i;
4169 struct device *dev;
4170 struct acpi_dmar_atsr *atsr;
4171 struct dmar_atsr_unit *atsru;
4172
4173 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4174 atsru = dmar_find_atsr(atsr);
4175 if (!atsru)
4176 return 0;
4177
4178 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4179 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4180 i, dev)
4181 return -EBUSY;
4182
4183 return 0;
4184}
4185
Jiang Liuffebeb42014-11-09 22:48:02 +08004186static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4187{
4188 int sp, ret = 0;
4189 struct intel_iommu *iommu = dmaru->iommu;
4190
4191 if (g_iommus[iommu->seq_id])
4192 return 0;
4193
4194 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004195 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004196 iommu->name);
4197 return -ENXIO;
4198 }
4199 if (!ecap_sc_support(iommu->ecap) &&
4200 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004201 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004202 iommu->name);
4203 return -ENXIO;
4204 }
4205 sp = domain_update_iommu_superpage(iommu) - 1;
4206 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004207 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004208 iommu->name);
4209 return -ENXIO;
4210 }
4211
4212 /*
4213 * Disable translation if already enabled prior to OS handover.
4214 */
4215 if (iommu->gcmd & DMA_GCMD_TE)
4216 iommu_disable_translation(iommu);
4217
4218 g_iommus[iommu->seq_id] = iommu;
4219 ret = iommu_init_domains(iommu);
4220 if (ret == 0)
4221 ret = iommu_alloc_root_entry(iommu);
4222 if (ret)
4223 goto out;
4224
David Woodhouse8a94ade2015-03-24 14:54:56 +00004225#ifdef CONFIG_INTEL_IOMMU_SVM
4226 if (pasid_enabled(iommu))
4227 intel_svm_alloc_pasid_tables(iommu);
4228#endif
4229
Jiang Liuffebeb42014-11-09 22:48:02 +08004230 if (dmaru->ignored) {
4231 /*
4232 * we always have to disable PMRs or DMA may fail on this device
4233 */
4234 if (force_on)
4235 iommu_disable_protect_mem_regions(iommu);
4236 return 0;
4237 }
4238
4239 intel_iommu_init_qi(iommu);
4240 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004241
4242#ifdef CONFIG_INTEL_IOMMU_SVM
4243 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4244 ret = intel_svm_enable_prq(iommu);
4245 if (ret)
4246 goto disable_iommu;
4247 }
4248#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004249 ret = dmar_set_interrupt(iommu);
4250 if (ret)
4251 goto disable_iommu;
4252
4253 iommu_set_root_entry(iommu);
4254 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4255 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4256 iommu_enable_translation(iommu);
4257
Jiang Liuffebeb42014-11-09 22:48:02 +08004258 iommu_disable_protect_mem_regions(iommu);
4259 return 0;
4260
4261disable_iommu:
4262 disable_dmar_iommu(iommu);
4263out:
4264 free_dmar_iommu(iommu);
4265 return ret;
4266}
4267
Jiang Liu6b197242014-11-09 22:47:58 +08004268int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4269{
Jiang Liuffebeb42014-11-09 22:48:02 +08004270 int ret = 0;
4271 struct intel_iommu *iommu = dmaru->iommu;
4272
4273 if (!intel_iommu_enabled)
4274 return 0;
4275 if (iommu == NULL)
4276 return -EINVAL;
4277
4278 if (insert) {
4279 ret = intel_iommu_add(dmaru);
4280 } else {
4281 disable_dmar_iommu(iommu);
4282 free_dmar_iommu(iommu);
4283 }
4284
4285 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004286}
4287
Jiang Liu9bdc5312014-01-06 14:18:27 +08004288static void intel_iommu_free_dmars(void)
4289{
4290 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4291 struct dmar_atsr_unit *atsru, *atsr_n;
4292
4293 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4294 list_del(&rmrru->list);
4295 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4296 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004297 }
4298
Jiang Liu9bdc5312014-01-06 14:18:27 +08004299 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4300 list_del(&atsru->list);
4301 intel_iommu_free_atsr(atsru);
4302 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004303}
4304
4305int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4306{
Jiang Liub683b232014-02-19 14:07:32 +08004307 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004308 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004309 struct pci_dev *bridge = NULL;
4310 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004311 struct acpi_dmar_atsr *atsr;
4312 struct dmar_atsr_unit *atsru;
4313
4314 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004315 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004316 bridge = bus->self;
David Woodhoused14053b2015-10-15 09:28:06 +01004317 /* If it's an integrated device, allow ATS */
4318 if (!bridge)
4319 return 1;
4320 /* Connected via non-PCIe: no ATS */
4321 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004322 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004323 return 0;
David Woodhoused14053b2015-10-15 09:28:06 +01004324 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004325 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004326 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004327 }
4328
Jiang Liu0e242612014-02-19 14:07:34 +08004329 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004330 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4331 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4332 if (atsr->segment != pci_domain_nr(dev->bus))
4333 continue;
4334
Jiang Liub683b232014-02-19 14:07:32 +08004335 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004336 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004337 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004338
4339 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004340 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004341 }
Jiang Liub683b232014-02-19 14:07:32 +08004342 ret = 0;
4343out:
Jiang Liu0e242612014-02-19 14:07:34 +08004344 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004345
Jiang Liub683b232014-02-19 14:07:32 +08004346 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004347}
4348
Jiang Liu59ce0512014-02-19 14:07:35 +08004349int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4350{
4351 int ret = 0;
4352 struct dmar_rmrr_unit *rmrru;
4353 struct dmar_atsr_unit *atsru;
4354 struct acpi_dmar_atsr *atsr;
4355 struct acpi_dmar_reserved_memory *rmrr;
4356
4357 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4358 return 0;
4359
4360 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4361 rmrr = container_of(rmrru->hdr,
4362 struct acpi_dmar_reserved_memory, header);
4363 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4364 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4365 ((void *)rmrr) + rmrr->header.length,
4366 rmrr->segment, rmrru->devices,
4367 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004368 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004369 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004370 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004371 dmar_remove_dev_scope(info, rmrr->segment,
4372 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004373 }
4374 }
4375
4376 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4377 if (atsru->include_all)
4378 continue;
4379
4380 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4381 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4382 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4383 (void *)atsr + atsr->header.length,
4384 atsr->segment, atsru->devices,
4385 atsru->devices_cnt);
4386 if (ret > 0)
4387 break;
4388 else if(ret < 0)
4389 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004390 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004391 if (dmar_remove_dev_scope(info, atsr->segment,
4392 atsru->devices, atsru->devices_cnt))
4393 break;
4394 }
4395 }
4396
4397 return 0;
4398}
4399
Fenghua Yu99dcade2009-11-11 07:23:06 -08004400/*
4401 * Here we only respond to action of unbound device from driver.
4402 *
4403 * Added device is not attached to its DMAR domain here yet. That will happen
4404 * when mapping the device to iova.
4405 */
4406static int device_notifier(struct notifier_block *nb,
4407 unsigned long action, void *data)
4408{
4409 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004410 struct dmar_domain *domain;
4411
David Woodhouse3d891942014-03-06 15:59:26 +00004412 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004413 return 0;
4414
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004415 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004416 return 0;
4417
David Woodhouse1525a292014-03-06 16:19:30 +00004418 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004419 if (!domain)
4420 return 0;
4421
Joerg Roedele6de0f82015-07-22 16:30:36 +02004422 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004423 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004424 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004425
Fenghua Yu99dcade2009-11-11 07:23:06 -08004426 return 0;
4427}
4428
4429static struct notifier_block device_nb = {
4430 .notifier_call = device_notifier,
4431};
4432
Jiang Liu75f05562014-02-19 14:07:37 +08004433static int intel_iommu_memory_notifier(struct notifier_block *nb,
4434 unsigned long val, void *v)
4435{
4436 struct memory_notify *mhp = v;
4437 unsigned long long start, end;
4438 unsigned long start_vpfn, last_vpfn;
4439
4440 switch (val) {
4441 case MEM_GOING_ONLINE:
4442 start = mhp->start_pfn << PAGE_SHIFT;
4443 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4444 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004445 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004446 start, end);
4447 return NOTIFY_BAD;
4448 }
4449 break;
4450
4451 case MEM_OFFLINE:
4452 case MEM_CANCEL_ONLINE:
4453 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4454 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4455 while (start_vpfn <= last_vpfn) {
4456 struct iova *iova;
4457 struct dmar_drhd_unit *drhd;
4458 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004459 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004460
4461 iova = find_iova(&si_domain->iovad, start_vpfn);
4462 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004463 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004464 start_vpfn);
4465 break;
4466 }
4467
4468 iova = split_and_remove_iova(&si_domain->iovad, iova,
4469 start_vpfn, last_vpfn);
4470 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004471 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004472 start_vpfn, last_vpfn);
4473 return NOTIFY_BAD;
4474 }
4475
David Woodhouseea8ea462014-03-05 17:09:32 +00004476 freelist = domain_unmap(si_domain, iova->pfn_lo,
4477 iova->pfn_hi);
4478
Jiang Liu75f05562014-02-19 14:07:37 +08004479 rcu_read_lock();
4480 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004481 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004482 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004483 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004484 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004485 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004486
4487 start_vpfn = iova->pfn_hi + 1;
4488 free_iova_mem(iova);
4489 }
4490 break;
4491 }
4492
4493 return NOTIFY_OK;
4494}
4495
4496static struct notifier_block intel_iommu_memory_nb = {
4497 .notifier_call = intel_iommu_memory_notifier,
4498 .priority = 0
4499};
4500
Alex Williamsona5459cf2014-06-12 16:12:31 -06004501
4502static ssize_t intel_iommu_show_version(struct device *dev,
4503 struct device_attribute *attr,
4504 char *buf)
4505{
4506 struct intel_iommu *iommu = dev_get_drvdata(dev);
4507 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4508 return sprintf(buf, "%d:%d\n",
4509 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4510}
4511static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4512
4513static ssize_t intel_iommu_show_address(struct device *dev,
4514 struct device_attribute *attr,
4515 char *buf)
4516{
4517 struct intel_iommu *iommu = dev_get_drvdata(dev);
4518 return sprintf(buf, "%llx\n", iommu->reg_phys);
4519}
4520static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4521
4522static ssize_t intel_iommu_show_cap(struct device *dev,
4523 struct device_attribute *attr,
4524 char *buf)
4525{
4526 struct intel_iommu *iommu = dev_get_drvdata(dev);
4527 return sprintf(buf, "%llx\n", iommu->cap);
4528}
4529static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4530
4531static ssize_t intel_iommu_show_ecap(struct device *dev,
4532 struct device_attribute *attr,
4533 char *buf)
4534{
4535 struct intel_iommu *iommu = dev_get_drvdata(dev);
4536 return sprintf(buf, "%llx\n", iommu->ecap);
4537}
4538static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4539
Alex Williamson2238c082015-07-14 15:24:53 -06004540static ssize_t intel_iommu_show_ndoms(struct device *dev,
4541 struct device_attribute *attr,
4542 char *buf)
4543{
4544 struct intel_iommu *iommu = dev_get_drvdata(dev);
4545 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4546}
4547static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4548
4549static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4550 struct device_attribute *attr,
4551 char *buf)
4552{
4553 struct intel_iommu *iommu = dev_get_drvdata(dev);
4554 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4555 cap_ndoms(iommu->cap)));
4556}
4557static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4558
Alex Williamsona5459cf2014-06-12 16:12:31 -06004559static struct attribute *intel_iommu_attrs[] = {
4560 &dev_attr_version.attr,
4561 &dev_attr_address.attr,
4562 &dev_attr_cap.attr,
4563 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004564 &dev_attr_domains_supported.attr,
4565 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004566 NULL,
4567};
4568
4569static struct attribute_group intel_iommu_group = {
4570 .name = "intel-iommu",
4571 .attrs = intel_iommu_attrs,
4572};
4573
4574const struct attribute_group *intel_iommu_groups[] = {
4575 &intel_iommu_group,
4576 NULL,
4577};
4578
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004579int __init intel_iommu_init(void)
4580{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004581 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004582 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004583 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004584
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004585 /* VT-d is required for a TXT/tboot launch, so enforce that */
4586 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004587
Jiang Liu3a5670e2014-02-19 14:07:33 +08004588 if (iommu_init_mempool()) {
4589 if (force_on)
4590 panic("tboot: Failed to initialize iommu memory\n");
4591 return -ENOMEM;
4592 }
4593
4594 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004595 if (dmar_table_init()) {
4596 if (force_on)
4597 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004598 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004599 }
4600
Suresh Siddhac2c72862011-08-23 17:05:19 -07004601 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004602 if (force_on)
4603 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004604 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004605 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004606
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004607 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004608 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004609
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004610 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004611 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004612
4613 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004614 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004615
Joseph Cihula51a63e62011-03-21 11:04:24 -07004616 if (dmar_init_reserved_ranges()) {
4617 if (force_on)
4618 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004619 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004620 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004621
4622 init_no_remapping_devices();
4623
Joseph Cihulab7792602011-05-03 00:08:37 -07004624 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004625 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004626 if (force_on)
4627 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004628 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004629 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004630 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004631 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004632 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004633
mark gross5e0d2a62008-03-04 15:22:08 -08004634 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004635#ifdef CONFIG_SWIOTLB
4636 swiotlb = 0;
4637#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004638 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004639
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004640 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004641
Alex Williamsona5459cf2014-06-12 16:12:31 -06004642 for_each_active_iommu(iommu, drhd)
4643 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4644 intel_iommu_groups,
Kees Cook2439d4a2015-07-24 16:27:57 -07004645 "%s", iommu->name);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004646
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004647 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004648 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004649 if (si_domain && !hw_pass_through)
4650 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004651
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004652 intel_iommu_enabled = 1;
4653
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004654 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004655
4656out_free_reserved_range:
4657 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004658out_free_dmar:
4659 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004660 up_write(&dmar_global_lock);
4661 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004662 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004663}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004664
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004665static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004666{
4667 struct intel_iommu *iommu = opaque;
4668
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004669 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004670 return 0;
4671}
4672
4673/*
4674 * NB - intel-iommu lacks any sort of reference counting for the users of
4675 * dependent devices. If multiple endpoints have intersecting dependent
4676 * devices, unbinding the driver from any one of them will possibly leave
4677 * the others unable to operate.
4678 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004679static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004680{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004681 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004682 return;
4683
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004684 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004685}
4686
Joerg Roedel127c7612015-07-23 17:44:46 +02004687static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004688{
Weidong Hanc7151a82008-12-08 22:51:37 +08004689 struct intel_iommu *iommu;
4690 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004691
Joerg Roedel55d94042015-07-22 16:50:40 +02004692 assert_spin_locked(&device_domain_lock);
4693
Joerg Roedelb608ac32015-07-21 18:19:08 +02004694 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004695 return;
4696
Joerg Roedel127c7612015-07-23 17:44:46 +02004697 iommu = info->iommu;
4698
4699 if (info->dev) {
4700 iommu_disable_dev_iotlb(info);
4701 domain_context_clear(iommu, info->dev);
4702 }
4703
Joerg Roedelb608ac32015-07-21 18:19:08 +02004704 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004705
Joerg Roedeld160aca2015-07-22 11:52:53 +02004706 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004707 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004708 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004709
4710 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004711}
4712
Joerg Roedel55d94042015-07-22 16:50:40 +02004713static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4714 struct device *dev)
4715{
Joerg Roedel127c7612015-07-23 17:44:46 +02004716 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004717 unsigned long flags;
4718
Weidong Hanc7151a82008-12-08 22:51:37 +08004719 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004720 info = dev->archdata.iommu;
4721 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004722 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004723}
4724
4725static int md_domain_init(struct dmar_domain *domain, int guest_width)
4726{
4727 int adjust_width;
4728
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004729 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4730 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004731 domain_reserve_special_ranges(domain);
4732
4733 /* calculate AGAW */
4734 domain->gaw = guest_width;
4735 adjust_width = guestwidth_to_adjustwidth(guest_width);
4736 domain->agaw = width_to_agaw(adjust_width);
4737
Weidong Han5e98c4b2008-12-08 23:03:27 +08004738 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004739 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004740 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004741 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004742
4743 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004744 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004745 if (!domain->pgd)
4746 return -ENOMEM;
4747 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4748 return 0;
4749}
4750
Joerg Roedel00a77de2015-03-26 13:43:08 +01004751static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004752{
Joerg Roedel5d450802008-12-03 14:52:32 +01004753 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004754 struct iommu_domain *domain;
4755
4756 if (type != IOMMU_DOMAIN_UNMANAGED)
4757 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004758
Jiang Liuab8dfe22014-07-11 14:19:27 +08004759 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004760 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004761 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004762 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004763 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004764 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004765 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004766 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004767 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004768 }
Allen Kay8140a952011-10-14 12:32:17 -07004769 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004770
Joerg Roedel00a77de2015-03-26 13:43:08 +01004771 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004772 domain->geometry.aperture_start = 0;
4773 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4774 domain->geometry.force_aperture = true;
4775
Joerg Roedel00a77de2015-03-26 13:43:08 +01004776 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004777}
Kay, Allen M38717942008-09-09 18:37:29 +03004778
Joerg Roedel00a77de2015-03-26 13:43:08 +01004779static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004780{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004781 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004782}
Kay, Allen M38717942008-09-09 18:37:29 +03004783
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004784static int intel_iommu_attach_device(struct iommu_domain *domain,
4785 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004786{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004787 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004788 struct intel_iommu *iommu;
4789 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004790 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004791
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004792 if (device_is_rmrr_locked(dev)) {
4793 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4794 return -EPERM;
4795 }
4796
David Woodhouse7207d8f2014-03-09 16:31:06 -07004797 /* normally dev is not mapped */
4798 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004799 struct dmar_domain *old_domain;
4800
David Woodhouse1525a292014-03-06 16:19:30 +00004801 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004802 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02004803 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02004804 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004805 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01004806
4807 if (!domain_type_is_vm_or_si(old_domain) &&
4808 list_empty(&old_domain->devices))
4809 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004810 }
4811 }
4812
David Woodhouse156baca2014-03-09 14:00:57 -07004813 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004814 if (!iommu)
4815 return -ENODEV;
4816
4817 /* check if this iommu agaw is sufficient for max mapped address */
4818 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004819 if (addr_width > cap_mgaw(iommu->cap))
4820 addr_width = cap_mgaw(iommu->cap);
4821
4822 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004823 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004824 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004825 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004826 return -EFAULT;
4827 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004828 dmar_domain->gaw = addr_width;
4829
4830 /*
4831 * Knock out extra levels of page tables if necessary
4832 */
4833 while (iommu->agaw < dmar_domain->agaw) {
4834 struct dma_pte *pte;
4835
4836 pte = dmar_domain->pgd;
4837 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004838 dmar_domain->pgd = (struct dma_pte *)
4839 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004840 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004841 }
4842 dmar_domain->agaw--;
4843 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004844
Joerg Roedel28ccce02015-07-21 14:45:31 +02004845 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004846}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004847
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004848static void intel_iommu_detach_device(struct iommu_domain *domain,
4849 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004850{
Joerg Roedele6de0f82015-07-22 16:30:36 +02004851 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004852}
Kay, Allen M38717942008-09-09 18:37:29 +03004853
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004854static int intel_iommu_map(struct iommu_domain *domain,
4855 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004856 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004857{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004858 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004859 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004860 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004861 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004862
Joerg Roedeldde57a22008-12-03 15:04:09 +01004863 if (iommu_prot & IOMMU_READ)
4864 prot |= DMA_PTE_READ;
4865 if (iommu_prot & IOMMU_WRITE)
4866 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004867 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4868 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004869
David Woodhouse163cc522009-06-28 00:51:17 +01004870 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004871 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004872 u64 end;
4873
4874 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004875 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004876 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004877 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004878 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004879 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004880 return -EFAULT;
4881 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004882 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004883 }
David Woodhousead051222009-06-28 14:22:28 +01004884 /* Round up size to next multiple of PAGE_SIZE, if it and
4885 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004886 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004887 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4888 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004889 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004890}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004891
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004892static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004893 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004894{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004895 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004896 struct page *freelist = NULL;
4897 struct intel_iommu *iommu;
4898 unsigned long start_pfn, last_pfn;
4899 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02004900 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004901
David Woodhouse5cf0a762014-03-19 16:07:49 +00004902 /* Cope with horrid API which requires us to unmap more than the
4903 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02004904 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00004905
4906 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4907 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4908
David Woodhouseea8ea462014-03-05 17:09:32 +00004909 start_pfn = iova >> VTD_PAGE_SHIFT;
4910 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4911
4912 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4913
4914 npages = last_pfn - start_pfn + 1;
4915
Joerg Roedel29a27712015-07-21 17:17:12 +02004916 for_each_domain_iommu(iommu_id, dmar_domain) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004917 iommu = g_iommus[iommu_id];
David Woodhouseea8ea462014-03-05 17:09:32 +00004918
Joerg Roedel42e8c182015-07-21 15:50:02 +02004919 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
4920 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00004921 }
4922
4923 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004924
David Woodhouse163cc522009-06-28 00:51:17 +01004925 if (dmar_domain->max_addr == iova + size)
4926 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004927
David Woodhouse5cf0a762014-03-19 16:07:49 +00004928 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004929}
Kay, Allen M38717942008-09-09 18:37:29 +03004930
Joerg Roedeld14d6572008-12-03 15:06:57 +01004931static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304932 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004933{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004934 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004935 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004936 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004937 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004938
David Woodhouse5cf0a762014-03-19 16:07:49 +00004939 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004940 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004941 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004942
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004943 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004944}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004945
Joerg Roedel5d587b82014-09-05 10:50:45 +02004946static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004947{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004948 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004949 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004950 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004951 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004952
Joerg Roedel5d587b82014-09-05 10:50:45 +02004953 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004954}
4955
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004956static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004957{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004958 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004959 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004960 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004961
Alex Williamsona5459cf2014-06-12 16:12:31 -06004962 iommu = device_to_iommu(dev, &bus, &devfn);
4963 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004964 return -ENODEV;
4965
Alex Williamsona5459cf2014-06-12 16:12:31 -06004966 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004967
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004968 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004969
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004970 if (IS_ERR(group))
4971 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004972
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004973 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004974 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004975}
4976
4977static void intel_iommu_remove_device(struct device *dev)
4978{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004979 struct intel_iommu *iommu;
4980 u8 bus, devfn;
4981
4982 iommu = device_to_iommu(dev, &bus, &devfn);
4983 if (!iommu)
4984 return;
4985
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004986 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004987
4988 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004989}
4990
David Woodhouse2f26e0a2015-09-09 11:40:47 +01004991#ifdef CONFIG_INTEL_IOMMU_SVM
4992int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
4993{
4994 struct device_domain_info *info;
4995 struct context_entry *context;
4996 struct dmar_domain *domain;
4997 unsigned long flags;
4998 u64 ctx_lo;
4999 int ret;
5000
5001 domain = get_valid_domain_for_dev(sdev->dev);
5002 if (!domain)
5003 return -EINVAL;
5004
5005 spin_lock_irqsave(&device_domain_lock, flags);
5006 spin_lock(&iommu->lock);
5007
5008 ret = -EINVAL;
5009 info = sdev->dev->archdata.iommu;
5010 if (!info || !info->pasid_supported)
5011 goto out;
5012
5013 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5014 if (WARN_ON(!context))
5015 goto out;
5016
5017 ctx_lo = context[0].lo;
5018
5019 sdev->did = domain->iommu_did[iommu->seq_id];
5020 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5021
5022 if (!(ctx_lo & CONTEXT_PASIDE)) {
5023 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5024 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
5025 wmb();
5026 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5027 * extended to permit requests-with-PASID if the PASIDE bit
5028 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5029 * however, the PASIDE bit is ignored and requests-with-PASID
5030 * are unconditionally blocked. Which makes less sense.
5031 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5032 * "guest mode" translation types depending on whether ATS
5033 * is available or not. Annoyingly, we can't use the new
5034 * modes *unless* PASIDE is set. */
5035 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5036 ctx_lo &= ~CONTEXT_TT_MASK;
5037 if (info->ats_supported)
5038 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5039 else
5040 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5041 }
5042 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005043 if (iommu->pasid_state_table)
5044 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005045 if (info->pri_supported)
5046 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005047 context[0].lo = ctx_lo;
5048 wmb();
5049 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5050 DMA_CCMD_MASK_NOBIT,
5051 DMA_CCMD_DEVICE_INVL);
5052 }
5053
5054 /* Enable PASID support in the device, if it wasn't already */
5055 if (!info->pasid_enabled)
5056 iommu_enable_dev_iotlb(info);
5057
5058 if (info->ats_enabled) {
5059 sdev->dev_iotlb = 1;
5060 sdev->qdep = info->ats_qdep;
5061 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5062 sdev->qdep = 0;
5063 }
5064 ret = 0;
5065
5066 out:
5067 spin_unlock(&iommu->lock);
5068 spin_unlock_irqrestore(&device_domain_lock, flags);
5069
5070 return ret;
5071}
5072
5073struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5074{
5075 struct intel_iommu *iommu;
5076 u8 bus, devfn;
5077
5078 if (iommu_dummy(dev)) {
5079 dev_warn(dev,
5080 "No IOMMU translation for device; cannot enable SVM\n");
5081 return NULL;
5082 }
5083
5084 iommu = device_to_iommu(dev, &bus, &devfn);
5085 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005086 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005087 return NULL;
5088 }
5089
5090 if (!iommu->pasid_table) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005091 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005092 return NULL;
5093 }
5094
5095 return iommu;
5096}
5097#endif /* CONFIG_INTEL_IOMMU_SVM */
5098
Thierry Redingb22f6432014-06-27 09:03:12 +02005099static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02005100 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01005101 .domain_alloc = intel_iommu_domain_alloc,
5102 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005103 .attach_dev = intel_iommu_attach_device,
5104 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005105 .map = intel_iommu_map,
5106 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07005107 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005108 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005109 .add_device = intel_iommu_add_device,
5110 .remove_device = intel_iommu_remove_device,
Joerg Roedela960fad2015-10-21 23:51:39 +02005111 .device_group = pci_device_group,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02005112 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005113};
David Woodhouse9af88142009-02-13 23:18:03 +00005114
Daniel Vetter94526182013-01-20 23:50:13 +01005115static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5116{
5117 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005118 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005119 dmar_map_gfx = 0;
5120}
5121
5122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5124DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5126DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5127DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5128DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5129
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005130static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005131{
5132 /*
5133 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005134 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005135 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005136 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005137 rwbf_quirk = 1;
5138}
5139
5140DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005141DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5142DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5143DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5144DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5145DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5146DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005147
Adam Jacksoneecfd572010-08-25 21:17:34 +01005148#define GGC 0x52
5149#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5150#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5151#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5152#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5153#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5154#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5155#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5156#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5157
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005158static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005159{
5160 unsigned short ggc;
5161
Adam Jacksoneecfd572010-08-25 21:17:34 +01005162 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005163 return;
5164
Adam Jacksoneecfd572010-08-25 21:17:34 +01005165 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005166 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005167 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005168 } else if (dmar_map_gfx) {
5169 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005170 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005171 intel_iommu_strict = 1;
5172 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005173}
5174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5175DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5176DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5177DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5178
David Woodhousee0fc7e02009-09-30 09:12:17 -07005179/* On Tylersburg chipsets, some BIOSes have been known to enable the
5180 ISOCH DMAR unit for the Azalia sound device, but not give it any
5181 TLB entries, which causes it to deadlock. Check for that. We do
5182 this in a function called from init_dmars(), instead of in a PCI
5183 quirk, because we don't want to print the obnoxious "BIOS broken"
5184 message if VT-d is actually disabled.
5185*/
5186static void __init check_tylersburg_isoch(void)
5187{
5188 struct pci_dev *pdev;
5189 uint32_t vtisochctrl;
5190
5191 /* If there's no Azalia in the system anyway, forget it. */
5192 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5193 if (!pdev)
5194 return;
5195 pci_dev_put(pdev);
5196
5197 /* System Management Registers. Might be hidden, in which case
5198 we can't do the sanity check. But that's OK, because the
5199 known-broken BIOSes _don't_ actually hide it, so far. */
5200 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5201 if (!pdev)
5202 return;
5203
5204 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5205 pci_dev_put(pdev);
5206 return;
5207 }
5208
5209 pci_dev_put(pdev);
5210
5211 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5212 if (vtisochctrl & 1)
5213 return;
5214
5215 /* Drop all bits other than the number of TLB entries */
5216 vtisochctrl &= 0x1c;
5217
5218 /* If we have the recommended number of TLB entries (16), fine. */
5219 if (vtisochctrl == 0x10)
5220 return;
5221
5222 /* Zero TLB entries? You get to ride the short bus to school. */
5223 if (!vtisochctrl) {
5224 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5225 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5226 dmi_get_system_info(DMI_BIOS_VENDOR),
5227 dmi_get_system_info(DMI_BIOS_VERSION),
5228 dmi_get_system_info(DMI_PRODUCT_VERSION));
5229 iommu_identity_mapping |= IDENTMAP_AZALIA;
5230 return;
5231 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005232
5233 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005234 vtisochctrl);
5235}