blob: a1373cf343269455808f66ad18dc0a2fb7aa73f2 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030036#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080037#include <linux/timer.h>
Dan Williamsdfddb9692015-10-09 18:16:46 -040038#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010040#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030041#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010042#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070043#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100044#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020045#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080046#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070047#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020048#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070049#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090051#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
Joerg Roedel078e1ee2012-09-26 12:44:43 +020053#include "irq_remapping.h"
54
Fenghua Yu5b6985c2008-10-16 18:02:32 -070055#define ROOT_SIZE VTD_PAGE_SIZE
56#define CONTEXT_SIZE VTD_PAGE_SIZE
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000059#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070061#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070062
63#define IOAPIC_RANGE_START (0xfee00000)
64#define IOAPIC_RANGE_END (0xfeefffff)
65#define IOVA_START_ADDR (0x1000)
66
67#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080070#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070071
David Woodhouse2ebe3152009-09-19 07:34:04 -070072#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070080
Robin Murphy1b722502015-01-12 17:51:15 +000081/* IO virtual address start page frame number */
82#define IOVA_START_PFN (1)
83
Mark McLoughlinf27be032008-11-20 15:49:43 +000084#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
Shaohua Libfd20f12017-04-26 09:18:35 -0700184int intel_iommu_tboot_noforce;
Joseph Cihulab7792602011-05-03 00:08:37 -0700185
186/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000187 * 0: Present
188 * 1-11: Reserved
189 * 12-63: Context Ptr (12 - (haw-1))
190 * 64-127: Reserved
191 */
192struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000193 u64 lo;
194 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000195};
196#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000197
Joerg Roedel091d42e2015-06-12 11:56:10 +0200198/*
199 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
200 * if marked present.
201 */
202static phys_addr_t root_entry_lctp(struct root_entry *re)
203{
204 if (!(re->lo & 1))
205 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000206
Joerg Roedel091d42e2015-06-12 11:56:10 +0200207 return re->lo & VTD_PAGE_MASK;
208}
209
210/*
211 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
212 * if marked present.
213 */
214static phys_addr_t root_entry_uctp(struct root_entry *re)
215{
216 if (!(re->hi & 1))
217 return 0;
218
219 return re->hi & VTD_PAGE_MASK;
220}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000221/*
222 * low 64 bits:
223 * 0: present
224 * 1: fault processing disable
225 * 2-3: translation type
226 * 12-63: address space root
227 * high 64 bits:
228 * 0-2: address width
229 * 3-6: aval
230 * 8-23: domain id
231 */
232struct context_entry {
233 u64 lo;
234 u64 hi;
235};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000236
Joerg Roedelcf484d02015-06-12 12:21:46 +0200237static inline void context_clear_pasid_enable(struct context_entry *context)
238{
239 context->lo &= ~(1ULL << 11);
240}
241
242static inline bool context_pasid_enabled(struct context_entry *context)
243{
244 return !!(context->lo & (1ULL << 11));
245}
246
247static inline void context_set_copied(struct context_entry *context)
248{
249 context->hi |= (1ull << 3);
250}
251
252static inline bool context_copied(struct context_entry *context)
253{
254 return !!(context->hi & (1ULL << 3));
255}
256
257static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000258{
259 return (context->lo & 1);
260}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200261
262static inline bool context_present(struct context_entry *context)
263{
264 return context_pasid_enabled(context) ?
265 __context_present(context) :
266 __context_present(context) && !context_copied(context);
267}
268
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000269static inline void context_set_present(struct context_entry *context)
270{
271 context->lo |= 1;
272}
273
274static inline void context_set_fault_enable(struct context_entry *context)
275{
276 context->lo &= (((u64)-1) << 2) | 1;
277}
278
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000279static inline void context_set_translation_type(struct context_entry *context,
280 unsigned long value)
281{
282 context->lo &= (((u64)-1) << 4) | 3;
283 context->lo |= (value & 3) << 2;
284}
285
286static inline void context_set_address_root(struct context_entry *context,
287 unsigned long value)
288{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800289 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000290 context->lo |= value & VTD_PAGE_MASK;
291}
292
293static inline void context_set_address_width(struct context_entry *context,
294 unsigned long value)
295{
296 context->hi |= value & 7;
297}
298
299static inline void context_set_domain_id(struct context_entry *context,
300 unsigned long value)
301{
302 context->hi |= (value & ((1 << 16) - 1)) << 8;
303}
304
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200305static inline int context_domain_id(struct context_entry *c)
306{
307 return((c->hi >> 8) & 0xffff);
308}
309
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000310static inline void context_clear_entry(struct context_entry *context)
311{
312 context->lo = 0;
313 context->hi = 0;
314}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000315
Mark McLoughlin622ba122008-11-20 15:49:46 +0000316/*
317 * 0: readable
318 * 1: writable
319 * 2-6: reserved
320 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800321 * 8-10: available
322 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000323 * 12-63: Host physcial address
324 */
325struct dma_pte {
326 u64 val;
327};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000328
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000329static inline void dma_clear_pte(struct dma_pte *pte)
330{
331 pte->val = 0;
332}
333
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000334static inline u64 dma_pte_addr(struct dma_pte *pte)
335{
David Woodhousec85994e2009-07-01 19:21:24 +0100336#ifdef CONFIG_64BIT
337 return pte->val & VTD_PAGE_MASK;
338#else
339 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100340 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100341#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000342}
343
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000344static inline bool dma_pte_present(struct dma_pte *pte)
345{
346 return (pte->val & 3) != 0;
347}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000348
Allen Kay4399c8b2011-10-14 12:32:46 -0700349static inline bool dma_pte_superpage(struct dma_pte *pte)
350{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200351 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700352}
353
David Woodhouse75e6bf92009-07-02 11:21:16 +0100354static inline int first_pte_in_page(struct dma_pte *pte)
355{
356 return !((unsigned long)pte & ~VTD_PAGE_MASK);
357}
358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700359/*
360 * This domain is a statically identity mapping domain.
361 * 1. This domain creats a static 1:1 mapping to all usable memory.
362 * 2. It maps to each iommu if successful.
363 * 3. Each iommu mapps to this domain if successful.
364 */
David Woodhouse19943b02009-08-04 16:19:20 +0100365static struct dmar_domain *si_domain;
366static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700367
Joerg Roedel28ccce02015-07-21 14:45:31 +0200368/*
369 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800370 * across iommus may be owned in one domain, e.g. kvm guest.
371 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800372#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800373
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800375#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700376
Joerg Roedel29a27712015-07-21 17:17:12 +0200377#define for_each_domain_iommu(idx, domain) \
378 for (idx = 0; idx < g_num_of_iommus; idx++) \
379 if (domain->iommu_refcnt[idx])
380
Mark McLoughlin99126f72008-11-20 15:49:47 +0000381struct dmar_domain {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700382 int nid; /* node id */
Joerg Roedel29a27712015-07-21 17:17:12 +0200383
384 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
385 /* Refcount of devices per iommu */
386
Mark McLoughlin99126f72008-11-20 15:49:47 +0000387
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200388 u16 iommu_did[DMAR_UNITS_SUPPORTED];
389 /* Domain ids per IOMMU. Use u16 since
390 * domain ids are 16 bit wide according
391 * to VT-d spec, section 9.3 */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000392
Omer Peleg0824c592016-04-20 19:03:35 +0300393 bool has_iotlb_device;
Joerg Roedel00a77de2015-03-26 13:43:08 +0100394 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000395 struct iova_domain iovad; /* iova's that belong to this domain */
396
397 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000398 int gaw; /* max guest address width */
399
400 /* adjusted guest address width, 0 is level 2 30-bit */
401 int agaw;
402
Weidong Han3b5410e2008-12-08 09:17:15 +0800403 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800404
405 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800406 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800407 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100408 int iommu_superpage;/* Level of superpages supported:
409 0 == 4KiB (no superpages), 1 == 2MiB,
410 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800411 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100412
413 struct iommu_domain domain; /* generic domain data structure for
414 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000415};
416
Mark McLoughlina647dac2008-11-20 15:49:48 +0000417/* PCI domain-device relationship */
418struct device_domain_info {
419 struct list_head link; /* link to domain siblings */
420 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100421 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000422 u8 devfn; /* PCI devfn number */
David Woodhouseb16d0cb2015-10-12 14:17:37 +0100423 u8 pasid_supported:3;
424 u8 pasid_enabled:1;
425 u8 pri_supported:1;
426 u8 pri_enabled:1;
427 u8 ats_supported:1;
428 u8 ats_enabled:1;
429 u8 ats_qdep;
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000430 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800431 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000432 struct dmar_domain *domain; /* pointer to domain */
433};
434
Jiang Liub94e4112014-02-19 14:07:25 +0800435struct dmar_rmrr_unit {
436 struct list_head list; /* list of rmrr units */
437 struct acpi_dmar_header *hdr; /* ACPI header */
438 u64 base_address; /* reserved base address*/
439 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000440 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800441 int devices_cnt; /* target device count */
Eric Auger0659b8d2017-01-19 20:57:53 +0000442 struct iommu_resv_region *resv; /* reserved region handle */
Jiang Liub94e4112014-02-19 14:07:25 +0800443};
444
445struct dmar_atsr_unit {
446 struct list_head list; /* list of ATSR units */
447 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000448 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800449 int devices_cnt; /* target device count */
450 u8 include_all:1; /* include all ports */
451};
452
453static LIST_HEAD(dmar_atsr_units);
454static LIST_HEAD(dmar_rmrr_units);
455
456#define for_each_rmrr_units(rmrr) \
457 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
458
mark gross5e0d2a62008-03-04 15:22:08 -0800459/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800460static int g_num_of_iommus;
461
Jiang Liu92d03cc2014-02-19 14:07:28 +0800462static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200464static void dmar_remove_one_dev_info(struct dmar_domain *domain,
465 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200466static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200467static void domain_context_clear(struct intel_iommu *iommu,
468 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800469static int domain_detach_iommu(struct dmar_domain *domain,
470 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700471
Suresh Siddhad3f13812011-08-23 17:05:25 -0700472#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800473int dmar_disabled = 0;
474#else
475int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700476#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800477
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200478int intel_iommu_enabled = 0;
479EXPORT_SYMBOL_GPL(intel_iommu_enabled);
480
David Woodhouse2d9e6672010-06-15 10:57:57 +0100481static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700482static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800483static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100484static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100485static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100486static int intel_iommu_pasid28;
487static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100488
David Woodhouseae853dd2015-09-09 11:58:59 +0100489#define IDENTMAP_ALL 1
490#define IDENTMAP_GFX 2
491#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100492
David Woodhoused42fde72015-10-24 21:33:01 +0200493/* Broadwell and Skylake have broken ECS support — normal so-called "second
494 * level" translation of DMA requests-without-PASID doesn't actually happen
495 * unless you also set the NESTE bit in an extended context-entry. Which of
496 * course means that SVM doesn't work because it's trying to do nested
497 * translation of the physical addresses it finds in the process page tables,
498 * through the IOVA->phys mapping found in the "second level" page tables.
499 *
500 * The VT-d specification was retroactively changed to change the definition
501 * of the capability bits and pretend that Broadwell/Skylake never happened...
502 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
503 * for some reason it was the PASID capability bit which was redefined (from
504 * bit 28 on BDW/SKL to bit 40 in future).
505 *
506 * So our test for ECS needs to eschew those implementations which set the old
507 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
508 * Unless we are working around the 'pasid28' limitations, that is, by putting
509 * the device into passthrough mode for normal DMA and thus masking the bug.
510 */
David Woodhousec83b2f22015-06-12 10:15:49 +0100511#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
David Woodhoused42fde72015-10-24 21:33:01 +0200512 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
513/* PASID support is thus enabled if ECS is enabled and *either* of the old
514 * or new capability bits are set. */
515#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
516 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700517
David Woodhousec0771df2011-10-14 20:59:46 +0100518int intel_iommu_gfx_mapped;
519EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
520
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700521#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
522static DEFINE_SPINLOCK(device_domain_lock);
523static LIST_HEAD(device_domain_list);
524
Joerg Roedelb0119e82017-02-01 13:23:08 +0100525const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100526
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200527static bool translation_pre_enabled(struct intel_iommu *iommu)
528{
529 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
530}
531
Joerg Roedel091d42e2015-06-12 11:56:10 +0200532static void clear_translation_pre_enabled(struct intel_iommu *iommu)
533{
534 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
535}
536
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200537static void init_translation_status(struct intel_iommu *iommu)
538{
539 u32 gsts;
540
541 gsts = readl(iommu->reg + DMAR_GSTS_REG);
542 if (gsts & DMA_GSTS_TES)
543 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
544}
545
Joerg Roedel00a77de2015-03-26 13:43:08 +0100546/* Convert generic 'struct iommu_domain to private struct dmar_domain */
547static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
548{
549 return container_of(dom, struct dmar_domain, domain);
550}
551
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700552static int __init intel_iommu_setup(char *str)
553{
554 if (!str)
555 return -EINVAL;
556 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800557 if (!strncmp(str, "on", 2)) {
558 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200559 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800560 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700561 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200562 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700563 } else if (!strncmp(str, "igfx_off", 8)) {
564 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200565 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700566 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200567 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700568 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800569 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200570 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800571 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100572 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200573 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100574 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100575 } else if (!strncmp(str, "ecs_off", 7)) {
576 printk(KERN_INFO
577 "Intel-IOMMU: disable extended context table support\n");
578 intel_iommu_ecs = 0;
David Woodhouseae853dd2015-09-09 11:58:59 +0100579 } else if (!strncmp(str, "pasid28", 7)) {
580 printk(KERN_INFO
581 "Intel-IOMMU: enable pre-production PASID support\n");
582 intel_iommu_pasid28 = 1;
583 iommu_identity_mapping |= IDENTMAP_GFX;
Shaohua Libfd20f12017-04-26 09:18:35 -0700584 } else if (!strncmp(str, "tboot_noforce", 13)) {
585 printk(KERN_INFO
586 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
587 intel_iommu_tboot_noforce = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700588 }
589
590 str += strcspn(str, ",");
591 while (*str == ',')
592 str++;
593 }
594 return 0;
595}
596__setup("intel_iommu=", intel_iommu_setup);
597
598static struct kmem_cache *iommu_domain_cache;
599static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700600
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200601static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
602{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200603 struct dmar_domain **domains;
604 int idx = did >> 8;
605
606 domains = iommu->domains[idx];
607 if (!domains)
608 return NULL;
609
610 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200611}
612
613static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
614 struct dmar_domain *domain)
615{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200616 struct dmar_domain **domains;
617 int idx = did >> 8;
618
619 if (!iommu->domains[idx]) {
620 size_t size = 256 * sizeof(struct dmar_domain *);
621 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
622 }
623
624 domains = iommu->domains[idx];
625 if (WARN_ON(!domains))
626 return;
627 else
628 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200629}
630
Suresh Siddha4c923d42009-10-02 11:01:24 -0700631static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700632{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700633 struct page *page;
634 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700635
Suresh Siddha4c923d42009-10-02 11:01:24 -0700636 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
637 if (page)
638 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700639 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700640}
641
642static inline void free_pgtable_page(void *vaddr)
643{
644 free_page((unsigned long)vaddr);
645}
646
647static inline void *alloc_domain_mem(void)
648{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900649 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700650}
651
Kay, Allen M38717942008-09-09 18:37:29 +0300652static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700653{
654 kmem_cache_free(iommu_domain_cache, vaddr);
655}
656
657static inline void * alloc_devinfo_mem(void)
658{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900659 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700660}
661
662static inline void free_devinfo_mem(void *vaddr)
663{
664 kmem_cache_free(iommu_devinfo_cache, vaddr);
665}
666
Jiang Liuab8dfe22014-07-11 14:19:27 +0800667static inline int domain_type_is_vm(struct dmar_domain *domain)
668{
669 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
670}
671
Joerg Roedel28ccce02015-07-21 14:45:31 +0200672static inline int domain_type_is_si(struct dmar_domain *domain)
673{
674 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
675}
676
Jiang Liuab8dfe22014-07-11 14:19:27 +0800677static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
678{
679 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
680 DOMAIN_FLAG_STATIC_IDENTITY);
681}
Weidong Han1b573682008-12-08 15:34:06 +0800682
Jiang Liu162d1b12014-07-11 14:19:35 +0800683static inline int domain_pfn_supported(struct dmar_domain *domain,
684 unsigned long pfn)
685{
686 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
687
688 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
689}
690
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700691static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800692{
693 unsigned long sagaw;
694 int agaw = -1;
695
696 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700697 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800698 agaw >= 0; agaw--) {
699 if (test_bit(agaw, &sagaw))
700 break;
701 }
702
703 return agaw;
704}
705
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700706/*
707 * Calculate max SAGAW for each iommu.
708 */
709int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
710{
711 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
712}
713
714/*
715 * calculate agaw for each iommu.
716 * "SAGAW" may be different across iommus, use a default agaw, and
717 * get a supported less agaw for iommus that don't support the default agaw.
718 */
719int iommu_calculate_agaw(struct intel_iommu *iommu)
720{
721 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
722}
723
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700724/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800725static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
726{
727 int iommu_id;
728
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700729 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800730 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200731 for_each_domain_iommu(iommu_id, domain)
732 break;
733
Weidong Han8c11e792008-12-08 15:29:22 +0800734 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
735 return NULL;
736
737 return g_iommus[iommu_id];
738}
739
Weidong Han8e6040972008-12-08 15:49:06 +0800740static void domain_update_iommu_coherency(struct dmar_domain *domain)
741{
David Woodhoused0501962014-03-11 17:10:29 -0700742 struct dmar_drhd_unit *drhd;
743 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100744 bool found = false;
745 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800746
David Woodhoused0501962014-03-11 17:10:29 -0700747 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800748
Joerg Roedel29a27712015-07-21 17:17:12 +0200749 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100750 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800751 if (!ecap_coherent(g_iommus[i]->ecap)) {
752 domain->iommu_coherency = 0;
753 break;
754 }
Weidong Han8e6040972008-12-08 15:49:06 +0800755 }
David Woodhoused0501962014-03-11 17:10:29 -0700756 if (found)
757 return;
758
759 /* No hardware attached; use lowest common denominator */
760 rcu_read_lock();
761 for_each_active_iommu(iommu, drhd) {
762 if (!ecap_coherent(iommu->ecap)) {
763 domain->iommu_coherency = 0;
764 break;
765 }
766 }
767 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800768}
769
Jiang Liu161f6932014-07-11 14:19:37 +0800770static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100771{
Allen Kay8140a952011-10-14 12:32:17 -0700772 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800773 struct intel_iommu *iommu;
774 int ret = 1;
775
776 rcu_read_lock();
777 for_each_active_iommu(iommu, drhd) {
778 if (iommu != skip) {
779 if (!ecap_sc_support(iommu->ecap)) {
780 ret = 0;
781 break;
782 }
783 }
784 }
785 rcu_read_unlock();
786
787 return ret;
788}
789
790static int domain_update_iommu_superpage(struct intel_iommu *skip)
791{
792 struct dmar_drhd_unit *drhd;
793 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700794 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100795
796 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800797 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100798 }
799
Allen Kay8140a952011-10-14 12:32:17 -0700800 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800801 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700802 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800803 if (iommu != skip) {
804 mask &= cap_super_page_val(iommu->cap);
805 if (!mask)
806 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100807 }
808 }
Jiang Liu0e242612014-02-19 14:07:34 +0800809 rcu_read_unlock();
810
Jiang Liu161f6932014-07-11 14:19:37 +0800811 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100812}
813
Sheng Yang58c610b2009-03-18 15:33:05 +0800814/* Some capabilities may be different across iommus */
815static void domain_update_iommu_cap(struct dmar_domain *domain)
816{
817 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800818 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
819 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800820}
821
David Woodhouse03ecc322015-02-13 14:35:21 +0000822static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
823 u8 bus, u8 devfn, int alloc)
824{
825 struct root_entry *root = &iommu->root_entry[bus];
826 struct context_entry *context;
827 u64 *entry;
828
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200829 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100830 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000831 if (devfn >= 0x80) {
832 devfn -= 0x80;
833 entry = &root->hi;
834 }
835 devfn *= 2;
836 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000837 if (*entry & 1)
838 context = phys_to_virt(*entry & VTD_PAGE_MASK);
839 else {
840 unsigned long phy_addr;
841 if (!alloc)
842 return NULL;
843
844 context = alloc_pgtable_page(iommu->node);
845 if (!context)
846 return NULL;
847
848 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
849 phy_addr = virt_to_phys((void *)context);
850 *entry = phy_addr | 1;
851 __iommu_flush_cache(iommu, entry, sizeof(*entry));
852 }
853 return &context[devfn];
854}
855
David Woodhouse4ed6a542015-05-11 14:59:20 +0100856static int iommu_dummy(struct device *dev)
857{
858 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
859}
860
David Woodhouse156baca2014-03-09 14:00:57 -0700861static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800862{
863 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800864 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700865 struct device *tmp;
866 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800867 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800868 int i;
869
David Woodhouse4ed6a542015-05-11 14:59:20 +0100870 if (iommu_dummy(dev))
871 return NULL;
872
David Woodhouse156baca2014-03-09 14:00:57 -0700873 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700874 struct pci_dev *pf_pdev;
875
David Woodhouse156baca2014-03-09 14:00:57 -0700876 pdev = to_pci_dev(dev);
Jon Derrick5823e332017-08-30 15:05:59 -0600877
878#ifdef CONFIG_X86
879 /* VMD child devices currently cannot be handled individually */
880 if (is_vmd(pdev->bus))
881 return NULL;
882#endif
883
Ashok Raj1c387182016-10-21 15:32:05 -0700884 /* VFs aren't listed in scope tables; we need to look up
885 * the PF instead to find the IOMMU. */
886 pf_pdev = pci_physfn(pdev);
887 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700888 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100889 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700890 dev = &ACPI_COMPANION(dev)->dev;
891
Jiang Liu0e242612014-02-19 14:07:34 +0800892 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800893 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700894 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100895 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800896
Jiang Liub683b232014-02-19 14:07:32 +0800897 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700898 drhd->devices_cnt, i, tmp) {
899 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700900 /* For a VF use its original BDF# not that of the PF
901 * which we used for the IOMMU lookup. Strictly speaking
902 * we could do this for all PCI devices; we only need to
903 * get the BDF# from the scope table for ACPI matches. */
Koos Vriezen5003ae12017-03-01 21:02:50 +0100904 if (pdev && pdev->is_virtfn)
Ashok Raj1c387182016-10-21 15:32:05 -0700905 goto got_pdev;
906
David Woodhouse156baca2014-03-09 14:00:57 -0700907 *bus = drhd->devices[i].bus;
908 *devfn = drhd->devices[i].devfn;
909 goto out;
910 }
911
912 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000913 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700914
915 ptmp = to_pci_dev(tmp);
916 if (ptmp->subordinate &&
917 ptmp->subordinate->number <= pdev->bus->number &&
918 ptmp->subordinate->busn_res.end >= pdev->bus->number)
919 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100920 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800921
David Woodhouse156baca2014-03-09 14:00:57 -0700922 if (pdev && drhd->include_all) {
923 got_pdev:
924 *bus = pdev->bus->number;
925 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800926 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700927 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800928 }
Jiang Liub683b232014-02-19 14:07:32 +0800929 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700930 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800931 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800932
Jiang Liub683b232014-02-19 14:07:32 +0800933 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800934}
935
Weidong Han5331fe62008-12-08 23:00:00 +0800936static void domain_flush_cache(struct dmar_domain *domain,
937 void *addr, int size)
938{
939 if (!domain->iommu_coherency)
940 clflush_cache_range(addr, size);
941}
942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
944{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000946 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 unsigned long flags;
948
949 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000950 context = iommu_context_addr(iommu, bus, devfn, 0);
951 if (context)
952 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953 spin_unlock_irqrestore(&iommu->lock, flags);
954 return ret;
955}
956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957static void free_context_table(struct intel_iommu *iommu)
958{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 int i;
960 unsigned long flags;
961 struct context_entry *context;
962
963 spin_lock_irqsave(&iommu->lock, flags);
964 if (!iommu->root_entry) {
965 goto out;
966 }
967 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000968 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969 if (context)
970 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000971
David Woodhousec83b2f22015-06-12 10:15:49 +0100972 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000973 continue;
974
975 context = iommu_context_addr(iommu, i, 0x80, 0);
976 if (context)
977 free_pgtable_page(context);
978
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 }
980 free_pgtable_page(iommu->root_entry);
981 iommu->root_entry = NULL;
982out:
983 spin_unlock_irqrestore(&iommu->lock, flags);
984}
985
David Woodhouseb026fd22009-06-28 10:37:25 +0100986static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000987 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 struct dma_pte *parent, *pte = NULL;
990 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700991 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992
993 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200994
Jiang Liu162d1b12014-07-11 14:19:35 +0800995 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200996 /* Address beyond IOMMU's addressing capabilities. */
997 return NULL;
998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999 parent = domain->pgd;
1000
David Woodhouse5cf0a762014-03-19 16:07:49 +00001001 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002 void *tmp_page;
1003
David Woodhouseb026fd22009-06-28 10:37:25 +01001004 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +00001006 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001007 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +00001008 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009 break;
1010
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001011 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +01001012 uint64_t pteval;
1013
Suresh Siddha4c923d42009-10-02 11:01:24 -07001014 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015
David Woodhouse206a73c2009-07-01 19:30:28 +01001016 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +01001018
David Woodhousec85994e2009-07-01 19:21:24 +01001019 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04001020 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +08001021 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +01001022 /* Someone else set it while we were thinking; use theirs. */
1023 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +08001024 else
David Woodhousec85994e2009-07-01 19:21:24 +01001025 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026 }
David Woodhouse5cf0a762014-03-19 16:07:49 +00001027 if (level == 1)
1028 break;
1029
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001030 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031 level--;
1032 }
1033
David Woodhouse5cf0a762014-03-19 16:07:49 +00001034 if (!*target_level)
1035 *target_level = level;
1036
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001037 return pte;
1038}
1039
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001040
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +01001042static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1043 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001044 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045{
1046 struct dma_pte *parent, *pte = NULL;
1047 int total = agaw_to_level(domain->agaw);
1048 int offset;
1049
1050 parent = domain->pgd;
1051 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001052 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053 pte = &parent[offset];
1054 if (level == total)
1055 return pte;
1056
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001057 if (!dma_pte_present(pte)) {
1058 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001059 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001060 }
1061
Yijing Wange16922a2014-05-20 20:37:51 +08001062 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001063 *large_page = total;
1064 return pte;
1065 }
1066
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001067 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 total--;
1069 }
1070 return NULL;
1071}
1072
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001073/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001074static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +01001075 unsigned long start_pfn,
1076 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001078 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001079 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080
Jiang Liu162d1b12014-07-11 14:19:35 +08001081 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1082 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001083 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001084
David Woodhouse04b18e62009-06-27 19:15:01 +01001085 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001086 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001087 large_page = 1;
1088 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001089 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001090 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001091 continue;
1092 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001093 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001094 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001095 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001096 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001097 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1098
David Woodhouse310a5ab2009-06-28 18:52:20 +01001099 domain_flush_cache(domain, first_pte,
1100 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001101
1102 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001103}
1104
Alex Williamson3269ee02013-06-15 10:27:19 -06001105static void dma_pte_free_level(struct dmar_domain *domain, int level,
David Dillowbc24c572017-06-28 19:42:23 -07001106 int retain_level, struct dma_pte *pte,
1107 unsigned long pfn, unsigned long start_pfn,
1108 unsigned long last_pfn)
Alex Williamson3269ee02013-06-15 10:27:19 -06001109{
1110 pfn = max(start_pfn, pfn);
1111 pte = &pte[pfn_level_offset(pfn, level)];
1112
1113 do {
1114 unsigned long level_pfn;
1115 struct dma_pte *level_pte;
1116
1117 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1118 goto next;
1119
David Dillowf7116e12017-01-30 19:11:11 -08001120 level_pfn = pfn & level_mask(level);
Alex Williamson3269ee02013-06-15 10:27:19 -06001121 level_pte = phys_to_virt(dma_pte_addr(pte));
1122
David Dillowbc24c572017-06-28 19:42:23 -07001123 if (level > 2) {
1124 dma_pte_free_level(domain, level - 1, retain_level,
1125 level_pte, level_pfn, start_pfn,
1126 last_pfn);
1127 }
Alex Williamson3269ee02013-06-15 10:27:19 -06001128
David Dillowbc24c572017-06-28 19:42:23 -07001129 /*
1130 * Free the page table if we're below the level we want to
1131 * retain and the range covers the entire table.
1132 */
1133 if (level < retain_level && !(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001134 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001135 dma_clear_pte(pte);
1136 domain_flush_cache(domain, pte, sizeof(*pte));
1137 free_pgtable_page(level_pte);
1138 }
1139next:
1140 pfn += level_size(level);
1141 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1142}
1143
David Dillowbc24c572017-06-28 19:42:23 -07001144/*
1145 * clear last level (leaf) ptes and free page table pages below the
1146 * level we wish to keep intact.
1147 */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001149 unsigned long start_pfn,
David Dillowbc24c572017-06-28 19:42:23 -07001150 unsigned long last_pfn,
1151 int retain_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001152{
Jiang Liu162d1b12014-07-11 14:19:35 +08001153 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1154 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001155 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156
Jiang Liud41a4ad2014-07-11 14:19:34 +08001157 dma_pte_clear_range(domain, start_pfn, last_pfn);
1158
David Woodhousef3a0a522009-06-30 03:40:07 +01001159 /* We don't need lock here; nobody else touches the iova range */
David Dillowbc24c572017-06-28 19:42:23 -07001160 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
Alex Williamson3269ee02013-06-15 10:27:19 -06001161 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001164 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165 free_pgtable_page(domain->pgd);
1166 domain->pgd = NULL;
1167 }
1168}
1169
David Woodhouseea8ea462014-03-05 17:09:32 +00001170/* When a page at a given level is being unlinked from its parent, we don't
1171 need to *modify* it at all. All we need to do is make a list of all the
1172 pages which can be freed just as soon as we've flushed the IOTLB and we
1173 know the hardware page-walk will no longer touch them.
1174 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1175 be freed. */
1176static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1177 int level, struct dma_pte *pte,
1178 struct page *freelist)
1179{
1180 struct page *pg;
1181
1182 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1183 pg->freelist = freelist;
1184 freelist = pg;
1185
1186 if (level == 1)
1187 return freelist;
1188
Jiang Liuadeb2592014-04-09 10:20:39 +08001189 pte = page_address(pg);
1190 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001191 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1192 freelist = dma_pte_list_pagetables(domain, level - 1,
1193 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001194 pte++;
1195 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001196
1197 return freelist;
1198}
1199
1200static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1201 struct dma_pte *pte, unsigned long pfn,
1202 unsigned long start_pfn,
1203 unsigned long last_pfn,
1204 struct page *freelist)
1205{
1206 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1207
1208 pfn = max(start_pfn, pfn);
1209 pte = &pte[pfn_level_offset(pfn, level)];
1210
1211 do {
1212 unsigned long level_pfn;
1213
1214 if (!dma_pte_present(pte))
1215 goto next;
1216
1217 level_pfn = pfn & level_mask(level);
1218
1219 /* If range covers entire pagetable, free it */
1220 if (start_pfn <= level_pfn &&
1221 last_pfn >= level_pfn + level_size(level) - 1) {
1222 /* These suborbinate page tables are going away entirely. Don't
1223 bother to clear them; we're just going to *free* them. */
1224 if (level > 1 && !dma_pte_superpage(pte))
1225 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1226
1227 dma_clear_pte(pte);
1228 if (!first_pte)
1229 first_pte = pte;
1230 last_pte = pte;
1231 } else if (level > 1) {
1232 /* Recurse down into a level that isn't *entirely* obsolete */
1233 freelist = dma_pte_clear_level(domain, level - 1,
1234 phys_to_virt(dma_pte_addr(pte)),
1235 level_pfn, start_pfn, last_pfn,
1236 freelist);
1237 }
1238next:
1239 pfn += level_size(level);
1240 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1241
1242 if (first_pte)
1243 domain_flush_cache(domain, first_pte,
1244 (void *)++last_pte - (void *)first_pte);
1245
1246 return freelist;
1247}
1248
1249/* We can't just free the pages because the IOMMU may still be walking
1250 the page tables, and may have cached the intermediate levels. The
1251 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001252static struct page *domain_unmap(struct dmar_domain *domain,
1253 unsigned long start_pfn,
1254 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001255{
David Woodhouseea8ea462014-03-05 17:09:32 +00001256 struct page *freelist = NULL;
1257
Jiang Liu162d1b12014-07-11 14:19:35 +08001258 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1259 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001260 BUG_ON(start_pfn > last_pfn);
1261
1262 /* we don't need lock here; nobody else touches the iova range */
1263 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1264 domain->pgd, 0, start_pfn, last_pfn, NULL);
1265
1266 /* free pgd */
1267 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1268 struct page *pgd_page = virt_to_page(domain->pgd);
1269 pgd_page->freelist = freelist;
1270 freelist = pgd_page;
1271
1272 domain->pgd = NULL;
1273 }
1274
1275 return freelist;
1276}
1277
Joerg Roedelb6904202015-08-13 11:32:18 +02001278static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001279{
1280 struct page *pg;
1281
1282 while ((pg = freelist)) {
1283 freelist = pg->freelist;
1284 free_pgtable_page(page_address(pg));
1285 }
1286}
1287
Joerg Roedel13cf0172017-08-11 11:40:10 +02001288static void iova_entry_free(unsigned long data)
1289{
1290 struct page *freelist = (struct page *)data;
1291
1292 dma_free_pagelist(freelist);
1293}
1294
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295/* iommu handling */
1296static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1297{
1298 struct root_entry *root;
1299 unsigned long flags;
1300
Suresh Siddha4c923d42009-10-02 11:01:24 -07001301 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001302 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001303 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001304 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001306 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001308 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
1310 spin_lock_irqsave(&iommu->lock, flags);
1311 iommu->root_entry = root;
1312 spin_unlock_irqrestore(&iommu->lock, flags);
1313
1314 return 0;
1315}
1316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317static void iommu_set_root_entry(struct intel_iommu *iommu)
1318{
David Woodhouse03ecc322015-02-13 14:35:21 +00001319 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001320 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321 unsigned long flag;
1322
David Woodhouse03ecc322015-02-13 14:35:21 +00001323 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001324 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001325 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001327 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001328 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329
David Woodhousec416daa2009-05-10 20:30:58 +01001330 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331
1332 /* Make sure hardware complete it */
1333 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001334 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001336 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337}
1338
1339static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1340{
1341 u32 val;
1342 unsigned long flag;
1343
David Woodhouse9af88142009-02-13 23:18:03 +00001344 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001347 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001348 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 /* Make sure hardware complete it */
1351 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001352 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001353
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001354 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001355}
1356
1357/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001358static void __iommu_flush_context(struct intel_iommu *iommu,
1359 u16 did, u16 source_id, u8 function_mask,
1360 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361{
1362 u64 val = 0;
1363 unsigned long flag;
1364
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365 switch (type) {
1366 case DMA_CCMD_GLOBAL_INVL:
1367 val = DMA_CCMD_GLOBAL_INVL;
1368 break;
1369 case DMA_CCMD_DOMAIN_INVL:
1370 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1371 break;
1372 case DMA_CCMD_DEVICE_INVL:
1373 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1374 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1375 break;
1376 default:
1377 BUG();
1378 }
1379 val |= DMA_CCMD_ICC;
1380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1383
1384 /* Make sure hardware complete it */
1385 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1386 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1387
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001388 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001389}
1390
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001391/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001392static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1393 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1396 u64 val = 0, val_iva = 0;
1397 unsigned long flag;
1398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399 switch (type) {
1400 case DMA_TLB_GLOBAL_FLUSH:
1401 /* global flush doesn't need set IVA_REG */
1402 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1403 break;
1404 case DMA_TLB_DSI_FLUSH:
1405 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1406 break;
1407 case DMA_TLB_PSI_FLUSH:
1408 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001409 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410 val_iva = size_order | addr;
1411 break;
1412 default:
1413 BUG();
1414 }
1415 /* Note: set drain read/write */
1416#if 0
1417 /*
1418 * This is probably to be super secure.. Looks like we can
1419 * ignore it without any impact.
1420 */
1421 if (cap_read_drain(iommu->cap))
1422 val |= DMA_TLB_READ_DRAIN;
1423#endif
1424 if (cap_write_drain(iommu->cap))
1425 val |= DMA_TLB_WRITE_DRAIN;
1426
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001427 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428 /* Note: Only uses first TLB reg currently */
1429 if (val_iva)
1430 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1431 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1432
1433 /* Make sure hardware complete it */
1434 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1435 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1436
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001437 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
1439 /* check IOTLB invalidation granularity */
1440 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001441 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001443 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001444 (unsigned long long)DMA_TLB_IIRG(type),
1445 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446}
1447
David Woodhouse64ae8922014-03-09 12:52:30 -07001448static struct device_domain_info *
1449iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1450 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001451{
Yu Zhao93a23a72009-05-18 13:51:37 +08001452 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001453
Joerg Roedel55d94042015-07-22 16:50:40 +02001454 assert_spin_locked(&device_domain_lock);
1455
Yu Zhao93a23a72009-05-18 13:51:37 +08001456 if (!iommu->qi)
1457 return NULL;
1458
Yu Zhao93a23a72009-05-18 13:51:37 +08001459 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001460 if (info->iommu == iommu && info->bus == bus &&
1461 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001462 if (info->ats_supported && info->dev)
1463 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001464 break;
1465 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001466
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001467 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001468}
1469
Omer Peleg0824c592016-04-20 19:03:35 +03001470static void domain_update_iotlb(struct dmar_domain *domain)
1471{
1472 struct device_domain_info *info;
1473 bool has_iotlb_device = false;
1474
1475 assert_spin_locked(&device_domain_lock);
1476
1477 list_for_each_entry(info, &domain->devices, link) {
1478 struct pci_dev *pdev;
1479
1480 if (!info->dev || !dev_is_pci(info->dev))
1481 continue;
1482
1483 pdev = to_pci_dev(info->dev);
1484 if (pdev->ats_enabled) {
1485 has_iotlb_device = true;
1486 break;
1487 }
1488 }
1489
1490 domain->has_iotlb_device = has_iotlb_device;
1491}
1492
Yu Zhao93a23a72009-05-18 13:51:37 +08001493static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1494{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001495 struct pci_dev *pdev;
1496
Omer Peleg0824c592016-04-20 19:03:35 +03001497 assert_spin_locked(&device_domain_lock);
1498
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001499 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001500 return;
1501
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001502 pdev = to_pci_dev(info->dev);
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001503
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001504#ifdef CONFIG_INTEL_IOMMU_SVM
1505 /* The PCIe spec, in its wisdom, declares that the behaviour of
1506 the device if you enable PASID support after ATS support is
1507 undefined. So always enable PASID support on devices which
1508 have it, even if we can't yet know if we're ever going to
1509 use it. */
1510 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1511 info->pasid_enabled = 1;
1512
1513 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1514 info->pri_enabled = 1;
1515#endif
1516 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1517 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001518 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001519 info->ats_qdep = pci_ats_queue_depth(pdev);
1520 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001521}
1522
1523static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1524{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001525 struct pci_dev *pdev;
1526
Omer Peleg0824c592016-04-20 19:03:35 +03001527 assert_spin_locked(&device_domain_lock);
1528
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001529 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001530 return;
1531
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001532 pdev = to_pci_dev(info->dev);
1533
1534 if (info->ats_enabled) {
1535 pci_disable_ats(pdev);
1536 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001537 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001538 }
1539#ifdef CONFIG_INTEL_IOMMU_SVM
1540 if (info->pri_enabled) {
1541 pci_disable_pri(pdev);
1542 info->pri_enabled = 0;
1543 }
1544 if (info->pasid_enabled) {
1545 pci_disable_pasid(pdev);
1546 info->pasid_enabled = 0;
1547 }
1548#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001549}
1550
1551static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1552 u64 addr, unsigned mask)
1553{
1554 u16 sid, qdep;
1555 unsigned long flags;
1556 struct device_domain_info *info;
1557
Omer Peleg0824c592016-04-20 19:03:35 +03001558 if (!domain->has_iotlb_device)
1559 return;
1560
Yu Zhao93a23a72009-05-18 13:51:37 +08001561 spin_lock_irqsave(&device_domain_lock, flags);
1562 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001563 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001564 continue;
1565
1566 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001567 qdep = info->ats_qdep;
Yu Zhao93a23a72009-05-18 13:51:37 +08001568 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1569 }
1570 spin_unlock_irqrestore(&device_domain_lock, flags);
1571}
1572
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001573static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1574 struct dmar_domain *domain,
1575 unsigned long pfn, unsigned int pages,
1576 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001578 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001579 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001580 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001581
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 BUG_ON(pages == 0);
1583
David Woodhouseea8ea462014-03-05 17:09:32 +00001584 if (ih)
1585 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001587 * Fallback to domain selective flush if no PSI support or the size is
1588 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589 * PSI requires page size to be 2 ^ x, and the base address is naturally
1590 * aligned to the size
1591 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001592 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1593 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001594 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001595 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001596 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001597 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001598
1599 /*
Nadav Amit82653632010-04-01 13:24:40 +03001600 * In caching mode, changes of pages from non-present to present require
1601 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001602 */
Nadav Amit82653632010-04-01 13:24:40 +03001603 if (!cap_caching_mode(iommu->cap) || !map)
Joerg Roedel9452d5b2015-07-21 10:00:56 +02001604 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1605 addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001606}
1607
Joerg Roedel13cf0172017-08-11 11:40:10 +02001608static void iommu_flush_iova(struct iova_domain *iovad)
1609{
1610 struct dmar_domain *domain;
1611 int idx;
1612
1613 domain = container_of(iovad, struct dmar_domain, iovad);
1614
1615 for_each_domain_iommu(idx, domain) {
1616 struct intel_iommu *iommu = g_iommus[idx];
1617 u16 did = domain->iommu_did[iommu->seq_id];
1618
1619 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1620
1621 if (!cap_caching_mode(iommu->cap))
1622 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1623 0, MAX_AGAW_PFN_WIDTH);
1624 }
1625}
1626
mark grossf8bab732008-02-08 04:18:38 -08001627static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1628{
1629 u32 pmen;
1630 unsigned long flags;
1631
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001632 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001633 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1634 pmen &= ~DMA_PMEN_EPM;
1635 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1636
1637 /* wait for the protected region status bit to clear */
1638 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1639 readl, !(pmen & DMA_PMEN_PRS), pmen);
1640
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001641 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001642}
1643
Jiang Liu2a41cce2014-07-11 14:19:33 +08001644static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645{
1646 u32 sts;
1647 unsigned long flags;
1648
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001649 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001650 iommu->gcmd |= DMA_GCMD_TE;
1651 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652
1653 /* Make sure hardware complete it */
1654 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001655 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001657 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658}
1659
Jiang Liu2a41cce2014-07-11 14:19:33 +08001660static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661{
1662 u32 sts;
1663 unsigned long flag;
1664
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001665 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666 iommu->gcmd &= ~DMA_GCMD_TE;
1667 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1668
1669 /* Make sure hardware complete it */
1670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001671 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001673 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674}
1675
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001676
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001677static int iommu_init_domains(struct intel_iommu *iommu)
1678{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001679 u32 ndomains, nlongs;
1680 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001681
1682 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001683 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001684 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 nlongs = BITS_TO_LONGS(ndomains);
1686
Donald Dutile94a91b52009-08-20 16:51:34 -04001687 spin_lock_init(&iommu->lock);
1688
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1690 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001691 pr_err("%s: Allocating domain id array failed\n",
1692 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001693 return -ENOMEM;
1694 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001695
Wei Yang86f004c2016-05-21 02:41:51 +00001696 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001697 iommu->domains = kzalloc(size, GFP_KERNEL);
1698
1699 if (iommu->domains) {
1700 size = 256 * sizeof(struct dmar_domain *);
1701 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1702 }
1703
1704 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001705 pr_err("%s: Allocating domain array failed\n",
1706 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001707 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001708 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001709 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001710 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 return -ENOMEM;
1712 }
1713
Joerg Roedel8bf47812015-07-21 10:41:21 +02001714
1715
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001717 * If Caching mode is set, then invalid translations are tagged
1718 * with domain-id 0, hence we need to pre-allocate it. We also
1719 * use domain-id 0 as a marker for non-allocated domain-id, so
1720 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001722 set_bit(0, iommu->domain_ids);
1723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724 return 0;
1725}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726
Jiang Liuffebeb42014-11-09 22:48:02 +08001727static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728{
Joerg Roedel29a27712015-07-21 17:17:12 +02001729 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001730 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001731
Joerg Roedel29a27712015-07-21 17:17:12 +02001732 if (!iommu->domains || !iommu->domain_ids)
1733 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001734
Joerg Roedelbea64032016-11-08 15:08:26 +01001735again:
Joerg Roedel55d94042015-07-22 16:50:40 +02001736 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001737 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1738 struct dmar_domain *domain;
1739
1740 if (info->iommu != iommu)
1741 continue;
1742
1743 if (!info->dev || !info->domain)
1744 continue;
1745
1746 domain = info->domain;
1747
Joerg Roedelbea64032016-11-08 15:08:26 +01001748 __dmar_remove_one_dev_info(info);
Joerg Roedel29a27712015-07-21 17:17:12 +02001749
Joerg Roedelbea64032016-11-08 15:08:26 +01001750 if (!domain_type_is_vm_or_si(domain)) {
1751 /*
1752 * The domain_exit() function can't be called under
1753 * device_domain_lock, as it takes this lock itself.
1754 * So release the lock here and re-run the loop
1755 * afterwards.
1756 */
1757 spin_unlock_irqrestore(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001758 domain_exit(domain);
Joerg Roedelbea64032016-11-08 15:08:26 +01001759 goto again;
1760 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001761 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001762 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763
1764 if (iommu->gcmd & DMA_GCMD_TE)
1765 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001766}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767
Jiang Liuffebeb42014-11-09 22:48:02 +08001768static void free_dmar_iommu(struct intel_iommu *iommu)
1769{
1770 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001771 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001772 int i;
1773
1774 for (i = 0; i < elems; i++)
1775 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001776 kfree(iommu->domains);
1777 kfree(iommu->domain_ids);
1778 iommu->domains = NULL;
1779 iommu->domain_ids = NULL;
1780 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001781
Weidong Hand9630fe2008-12-08 11:06:32 +08001782 g_iommus[iommu->seq_id] = NULL;
1783
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001784 /* free context mapping */
1785 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001786
1787#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001788 if (pasid_enabled(iommu)) {
1789 if (ecap_prs(iommu->ecap))
1790 intel_svm_finish_prq(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001791 intel_svm_free_pasid_tables(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001792 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001793#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001794}
1795
Jiang Liuab8dfe22014-07-11 14:19:27 +08001796static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799
1800 domain = alloc_domain_mem();
1801 if (!domain)
1802 return NULL;
1803
Jiang Liuab8dfe22014-07-11 14:19:27 +08001804 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001805 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001806 domain->flags = flags;
Omer Peleg0824c592016-04-20 19:03:35 +03001807 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001808 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001809
1810 return domain;
1811}
1812
Joerg Roedeld160aca2015-07-22 11:52:53 +02001813/* Must be called with iommu->lock */
1814static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001815 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001816{
Jiang Liu44bde612014-07-11 14:19:29 +08001817 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001818 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001819
Joerg Roedel55d94042015-07-22 16:50:40 +02001820 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001821 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001822
Joerg Roedel29a27712015-07-21 17:17:12 +02001823 domain->iommu_refcnt[iommu->seq_id] += 1;
1824 domain->iommu_count += 1;
1825 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001826 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001827 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1828
1829 if (num >= ndomains) {
1830 pr_err("%s: No free domain ids\n", iommu->name);
1831 domain->iommu_refcnt[iommu->seq_id] -= 1;
1832 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001833 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001834 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001835
Joerg Roedeld160aca2015-07-22 11:52:53 +02001836 set_bit(num, iommu->domain_ids);
1837 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001838
Joerg Roedeld160aca2015-07-22 11:52:53 +02001839 domain->iommu_did[iommu->seq_id] = num;
1840 domain->nid = iommu->node;
1841
Jiang Liufb170fb2014-07-11 14:19:28 +08001842 domain_update_iommu_cap(domain);
1843 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001844
Joerg Roedel55d94042015-07-22 16:50:40 +02001845 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001846}
1847
1848static int domain_detach_iommu(struct dmar_domain *domain,
1849 struct intel_iommu *iommu)
1850{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001851 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001852
Joerg Roedel55d94042015-07-22 16:50:40 +02001853 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001854 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001855
Joerg Roedel29a27712015-07-21 17:17:12 +02001856 domain->iommu_refcnt[iommu->seq_id] -= 1;
1857 count = --domain->iommu_count;
1858 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001859 num = domain->iommu_did[iommu->seq_id];
1860 clear_bit(num, iommu->domain_ids);
1861 set_iommu_domain(iommu, num, NULL);
1862
Jiang Liufb170fb2014-07-11 14:19:28 +08001863 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001864 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001865 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001866
1867 return count;
1868}
1869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001871static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001872
Joseph Cihula51a63e62011-03-21 11:04:24 -07001873static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874{
1875 struct pci_dev *pdev = NULL;
1876 struct iova *iova;
1877 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878
Zhen Leiaa3ac942017-09-21 16:52:45 +01001879 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880
Mark Gross8a443df2008-03-04 14:59:31 -08001881 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1882 &reserved_rbtree_key);
1883
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001884 /* IOAPIC ranges shouldn't be accessed by DMA */
1885 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1886 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001887 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001888 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001889 return -ENODEV;
1890 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001891
1892 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1893 for_each_pci_dev(pdev) {
1894 struct resource *r;
1895
1896 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1897 r = &pdev->resource[i];
1898 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1899 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001900 iova = reserve_iova(&reserved_iova_list,
1901 IOVA_PFN(r->start),
1902 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001903 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001904 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001905 return -ENODEV;
1906 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001907 }
1908 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001909 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910}
1911
1912static void domain_reserve_special_ranges(struct dmar_domain *domain)
1913{
1914 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1915}
1916
1917static inline int guestwidth_to_adjustwidth(int gaw)
1918{
1919 int agaw;
1920 int r = (gaw - 12) % 9;
1921
1922 if (r == 0)
1923 agaw = gaw;
1924 else
1925 agaw = gaw + 9 - r;
1926 if (agaw > 64)
1927 agaw = 64;
1928 return agaw;
1929}
1930
Joerg Roedeldc534b22015-07-22 12:44:02 +02001931static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1932 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934 int adjust_width, agaw;
1935 unsigned long sagaw;
Joerg Roedel13cf0172017-08-11 11:40:10 +02001936 int err;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937
Zhen Leiaa3ac942017-09-21 16:52:45 +01001938 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Joerg Roedel13cf0172017-08-11 11:40:10 +02001939
1940 err = init_iova_flush_queue(&domain->iovad,
1941 iommu_flush_iova, iova_entry_free);
1942 if (err)
1943 return err;
1944
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 domain_reserve_special_ranges(domain);
1946
1947 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948 if (guest_width > cap_mgaw(iommu->cap))
1949 guest_width = cap_mgaw(iommu->cap);
1950 domain->gaw = guest_width;
1951 adjust_width = guestwidth_to_adjustwidth(guest_width);
1952 agaw = width_to_agaw(adjust_width);
1953 sagaw = cap_sagaw(iommu->cap);
1954 if (!test_bit(agaw, &sagaw)) {
1955 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001956 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957 agaw = find_next_bit(&sagaw, 5, agaw);
1958 if (agaw >= 5)
1959 return -ENODEV;
1960 }
1961 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001962
Weidong Han8e6040972008-12-08 15:49:06 +08001963 if (ecap_coherent(iommu->ecap))
1964 domain->iommu_coherency = 1;
1965 else
1966 domain->iommu_coherency = 0;
1967
Sheng Yang58c610b2009-03-18 15:33:05 +08001968 if (ecap_sc_support(iommu->ecap))
1969 domain->iommu_snooping = 1;
1970 else
1971 domain->iommu_snooping = 0;
1972
David Woodhouse214e39a2014-03-19 10:38:49 +00001973 if (intel_iommu_superpage)
1974 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1975 else
1976 domain->iommu_superpage = 0;
1977
Suresh Siddha4c923d42009-10-02 11:01:24 -07001978 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001979
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001980 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001981 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982 if (!domain->pgd)
1983 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001984 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001985 return 0;
1986}
1987
1988static void domain_exit(struct dmar_domain *domain)
1989{
David Woodhouseea8ea462014-03-05 17:09:32 +00001990 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991
1992 /* Domain 0 is reserved, so dont process it */
1993 if (!domain)
1994 return;
1995
Joerg Roedeld160aca2015-07-22 11:52:53 +02001996 /* Remove associated devices and clear attached or cached domains */
1997 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001999 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08002000
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001 /* destroy iovas */
2002 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002003
David Woodhouseea8ea462014-03-05 17:09:32 +00002004 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002005
David Woodhouseea8ea462014-03-05 17:09:32 +00002006 dma_free_pagelist(freelist);
2007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 free_domain_mem(domain);
2009}
2010
David Woodhouse64ae8922014-03-09 12:52:30 -07002011static int domain_context_mapping_one(struct dmar_domain *domain,
2012 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002013 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002014{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002015 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02002016 int translation = CONTEXT_TT_MULTI_LEVEL;
2017 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002019 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08002020 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02002021 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02002022
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002023 WARN_ON(did == 0);
2024
Joerg Roedel28ccce02015-07-21 14:45:31 +02002025 if (hw_pass_through && domain_type_is_si(domain))
2026 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027
2028 pr_debug("Set context mapping for %02x:%02x.%d\n",
2029 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002030
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002031 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002032
Joerg Roedel55d94042015-07-22 16:50:40 +02002033 spin_lock_irqsave(&device_domain_lock, flags);
2034 spin_lock(&iommu->lock);
2035
2036 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002037 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002038 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002039 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040
Joerg Roedel55d94042015-07-22 16:50:40 +02002041 ret = 0;
2042 if (context_present(context))
2043 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002044
Xunlei Pangaec0e862016-12-05 20:09:07 +08002045 /*
2046 * For kdump cases, old valid entries may be cached due to the
2047 * in-flight DMA and copied pgtable, but there is no unmapping
2048 * behaviour for them, thus we need an explicit cache flush for
2049 * the newly-mapped device. For kdump, at this point, the device
2050 * is supposed to finish reset at its driver probe stage, so no
2051 * in-flight DMA will exist, and we don't need to worry anymore
2052 * hereafter.
2053 */
2054 if (context_copied(context)) {
2055 u16 did_old = context_domain_id(context);
2056
Christos Gkekasb117e032017-10-08 23:33:31 +01002057 if (did_old < cap_ndoms(iommu->cap)) {
Xunlei Pangaec0e862016-12-05 20:09:07 +08002058 iommu->flush.flush_context(iommu, did_old,
2059 (((u16)bus) << 8) | devfn,
2060 DMA_CCMD_MASK_NOBIT,
2061 DMA_CCMD_DEVICE_INVL);
KarimAllah Ahmedf73a7ee2017-05-05 11:39:59 -07002062 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2063 DMA_TLB_DSI_FLUSH);
2064 }
Xunlei Pangaec0e862016-12-05 20:09:07 +08002065 }
2066
Weidong Hanea6606b2008-12-08 23:08:15 +08002067 pgd = domain->pgd;
2068
Joerg Roedelde24e552015-07-21 14:53:04 +02002069 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002070 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08002071
Joerg Roedelde24e552015-07-21 14:53:04 +02002072 /*
2073 * Skip top levels of page tables for iommu which has less agaw
2074 * than default. Unnecessary for PT mode.
2075 */
Yu Zhao93a23a72009-05-18 13:51:37 +08002076 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02002077 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02002078 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02002079 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02002080 if (!dma_pte_present(pgd))
2081 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02002082 }
2083
David Woodhouse64ae8922014-03-09 12:52:30 -07002084 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002085 if (info && info->ats_supported)
2086 translation = CONTEXT_TT_DEV_IOTLB;
2087 else
2088 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02002089
Yu Zhao93a23a72009-05-18 13:51:37 +08002090 context_set_address_root(context, virt_to_phys(pgd));
2091 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02002092 } else {
2093 /*
2094 * In pass through mode, AW must be programmed to
2095 * indicate the largest AGAW value supported by
2096 * hardware. And ASR is ignored by hardware.
2097 */
2098 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002099 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002100
2101 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002102 context_set_fault_enable(context);
2103 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002104 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002105
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002106 /*
2107 * It's a non-present to present mapping. If hardware doesn't cache
2108 * non-present entry we only need to flush the write-buffer. If the
2109 * _does_ cache non-present entries, then it does so in the special
2110 * domain #0, which we have to flush:
2111 */
2112 if (cap_caching_mode(iommu->cap)) {
2113 iommu->flush.flush_context(iommu, 0,
2114 (((u16)bus) << 8) | devfn,
2115 DMA_CCMD_MASK_NOBIT,
2116 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002117 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002118 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002119 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002120 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002121 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002122
Joerg Roedel55d94042015-07-22 16:50:40 +02002123 ret = 0;
2124
2125out_unlock:
2126 spin_unlock(&iommu->lock);
2127 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002128
Wei Yang5c365d12016-07-13 13:53:21 +00002129 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130}
2131
Alex Williamson579305f2014-07-03 09:51:43 -06002132struct domain_context_mapping_data {
2133 struct dmar_domain *domain;
2134 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002135};
2136
2137static int domain_context_mapping_cb(struct pci_dev *pdev,
2138 u16 alias, void *opaque)
2139{
2140 struct domain_context_mapping_data *data = opaque;
2141
2142 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002143 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002144}
2145
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002146static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002147domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002148{
David Woodhouse64ae8922014-03-09 12:52:30 -07002149 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002150 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002151 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002152
David Woodhousee1f167f2014-03-09 15:24:46 -07002153 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002154 if (!iommu)
2155 return -ENODEV;
2156
Alex Williamson579305f2014-07-03 09:51:43 -06002157 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002158 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002159
2160 data.domain = domain;
2161 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002162
2163 return pci_for_each_dma_alias(to_pci_dev(dev),
2164 &domain_context_mapping_cb, &data);
2165}
2166
2167static int domain_context_mapped_cb(struct pci_dev *pdev,
2168 u16 alias, void *opaque)
2169{
2170 struct intel_iommu *iommu = opaque;
2171
2172 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002173}
2174
David Woodhousee1f167f2014-03-09 15:24:46 -07002175static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176{
Weidong Han5331fe62008-12-08 23:00:00 +08002177 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002178 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002179
David Woodhousee1f167f2014-03-09 15:24:46 -07002180 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002181 if (!iommu)
2182 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002183
Alex Williamson579305f2014-07-03 09:51:43 -06002184 if (!dev_is_pci(dev))
2185 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002186
Alex Williamson579305f2014-07-03 09:51:43 -06002187 return !pci_for_each_dma_alias(to_pci_dev(dev),
2188 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002189}
2190
Fenghua Yuf5329592009-08-04 15:09:37 -07002191/* Returns a number of VTD pages, but aligned to MM page size */
2192static inline unsigned long aligned_nrpages(unsigned long host_addr,
2193 size_t size)
2194{
2195 host_addr &= ~PAGE_MASK;
2196 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2197}
2198
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002199/* Return largest possible superpage level for a given mapping */
2200static inline int hardware_largepage_caps(struct dmar_domain *domain,
2201 unsigned long iov_pfn,
2202 unsigned long phy_pfn,
2203 unsigned long pages)
2204{
2205 int support, level = 1;
2206 unsigned long pfnmerge;
2207
2208 support = domain->iommu_superpage;
2209
2210 /* To use a large page, the virtual *and* physical addresses
2211 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2212 of them will mean we have to use smaller pages. So just
2213 merge them and check both at once. */
2214 pfnmerge = iov_pfn | phy_pfn;
2215
2216 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2217 pages >>= VTD_STRIDE_SHIFT;
2218 if (!pages)
2219 break;
2220 pfnmerge >>= VTD_STRIDE_SHIFT;
2221 level++;
2222 support--;
2223 }
2224 return level;
2225}
2226
David Woodhouse9051aa02009-06-29 12:30:54 +01002227static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2228 struct scatterlist *sg, unsigned long phys_pfn,
2229 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002230{
2231 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002232 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002233 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002234 unsigned int largepage_lvl = 0;
2235 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002236
Jiang Liu162d1b12014-07-11 14:19:35 +08002237 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002238
2239 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2240 return -EINVAL;
2241
2242 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2243
Jiang Liucc4f14a2014-11-26 09:42:10 +08002244 if (!sg) {
2245 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002246 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2247 }
2248
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002249 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002250 uint64_t tmp;
2251
David Woodhousee1605492009-06-29 11:17:38 +01002252 if (!sg_res) {
Robin Murphy29a90b72017-09-28 15:14:01 +01002253 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2254
Fenghua Yuf5329592009-08-04 15:09:37 -07002255 sg_res = aligned_nrpages(sg->offset, sg->length);
Robin Murphy29a90b72017-09-28 15:14:01 +01002256 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
David Woodhousee1605492009-06-29 11:17:38 +01002257 sg->dma_length = sg->length;
Robin Murphy29a90b72017-09-28 15:14:01 +01002258 pteval = (sg_phys(sg) - pgoff) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002259 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002260 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002261
David Woodhousee1605492009-06-29 11:17:38 +01002262 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002263 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2264
David Woodhouse5cf0a762014-03-19 16:07:49 +00002265 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002266 if (!pte)
2267 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002268 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002269 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002270 unsigned long nr_superpages, end_pfn;
2271
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002272 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002273 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002274
2275 nr_superpages = sg_res / lvl_pages;
2276 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2277
Jiang Liud41a4ad2014-07-11 14:19:34 +08002278 /*
2279 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002280 * removed to make room for superpage(s).
David Dillowbc24c572017-06-28 19:42:23 -07002281 * We're adding new large pages, so make sure
2282 * we don't remove their parent tables.
Jiang Liud41a4ad2014-07-11 14:19:34 +08002283 */
David Dillowbc24c572017-06-28 19:42:23 -07002284 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2285 largepage_lvl + 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002286 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002287 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002288 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002289
David Woodhousee1605492009-06-29 11:17:38 +01002290 }
2291 /* We don't need lock here, nobody else
2292 * touches the iova range
2293 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002294 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002295 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002296 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002297 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2298 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002299 if (dumps) {
2300 dumps--;
2301 debug_dma_dump_mappings(NULL);
2302 }
2303 WARN_ON(1);
2304 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002305
2306 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2307
2308 BUG_ON(nr_pages < lvl_pages);
2309 BUG_ON(sg_res < lvl_pages);
2310
2311 nr_pages -= lvl_pages;
2312 iov_pfn += lvl_pages;
2313 phys_pfn += lvl_pages;
2314 pteval += lvl_pages * VTD_PAGE_SIZE;
2315 sg_res -= lvl_pages;
2316
2317 /* If the next PTE would be the first in a new page, then we
2318 need to flush the cache on the entries we've just written.
2319 And then we'll need to recalculate 'pte', so clear it and
2320 let it get set again in the if (!pte) block above.
2321
2322 If we're done (!nr_pages) we need to flush the cache too.
2323
2324 Also if we've been setting superpages, we may need to
2325 recalculate 'pte' and switch back to smaller pages for the
2326 end of the mapping, if the trailing size is not enough to
2327 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002328 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002329 if (!nr_pages || first_pte_in_page(pte) ||
2330 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002331 domain_flush_cache(domain, first_pte,
2332 (void *)pte - (void *)first_pte);
2333 pte = NULL;
2334 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002335
2336 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002337 sg = sg_next(sg);
2338 }
2339 return 0;
2340}
2341
David Woodhouse9051aa02009-06-29 12:30:54 +01002342static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2343 struct scatterlist *sg, unsigned long nr_pages,
2344 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002345{
David Woodhouse9051aa02009-06-29 12:30:54 +01002346 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2347}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002348
David Woodhouse9051aa02009-06-29 12:30:54 +01002349static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2350 unsigned long phys_pfn, unsigned long nr_pages,
2351 int prot)
2352{
2353 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002354}
2355
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002356static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002357{
Filippo Sironi50822192017-08-31 10:58:11 +02002358 unsigned long flags;
2359 struct context_entry *context;
2360 u16 did_old;
2361
Weidong Hanc7151a82008-12-08 22:51:37 +08002362 if (!iommu)
2363 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002364
Filippo Sironi50822192017-08-31 10:58:11 +02002365 spin_lock_irqsave(&iommu->lock, flags);
2366 context = iommu_context_addr(iommu, bus, devfn, 0);
2367 if (!context) {
2368 spin_unlock_irqrestore(&iommu->lock, flags);
2369 return;
2370 }
2371 did_old = context_domain_id(context);
2372 context_clear_entry(context);
2373 __iommu_flush_cache(iommu, context, sizeof(*context));
2374 spin_unlock_irqrestore(&iommu->lock, flags);
2375 iommu->flush.flush_context(iommu,
2376 did_old,
2377 (((u16)bus) << 8) | devfn,
2378 DMA_CCMD_MASK_NOBIT,
2379 DMA_CCMD_DEVICE_INVL);
2380 iommu->flush.flush_iotlb(iommu,
2381 did_old,
2382 0,
2383 0,
2384 DMA_TLB_DSI_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385}
2386
David Woodhouse109b9b02012-05-25 17:43:02 +01002387static inline void unlink_domain_info(struct device_domain_info *info)
2388{
2389 assert_spin_locked(&device_domain_lock);
2390 list_del(&info->link);
2391 list_del(&info->global);
2392 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002393 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002394}
2395
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396static void domain_remove_dev_info(struct dmar_domain *domain)
2397{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002398 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002399 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002400
2401 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002402 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002403 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002404 spin_unlock_irqrestore(&device_domain_lock, flags);
2405}
2406
2407/*
2408 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002409 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002410 */
David Woodhouse1525a292014-03-06 16:19:30 +00002411static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002412{
2413 struct device_domain_info *info;
2414
2415 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002416 info = dev->archdata.iommu;
Peter Xub316d022017-05-22 18:28:51 +08002417 if (likely(info))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002418 return info->domain;
2419 return NULL;
2420}
2421
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002422static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002423dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2424{
2425 struct device_domain_info *info;
2426
2427 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002428 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002429 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002430 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002431
2432 return NULL;
2433}
2434
Joerg Roedel5db31562015-07-22 12:40:43 +02002435static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2436 int bus, int devfn,
2437 struct device *dev,
2438 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002439{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002440 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002441 struct device_domain_info *info;
2442 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002443 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002444
2445 info = alloc_devinfo_mem();
2446 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002447 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002448
Jiang Liu745f2582014-02-19 14:07:26 +08002449 info->bus = bus;
2450 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002451 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2452 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2453 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002454 info->dev = dev;
2455 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002456 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002457
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002458 if (dev && dev_is_pci(dev)) {
2459 struct pci_dev *pdev = to_pci_dev(info->dev);
2460
2461 if (ecap_dev_iotlb_support(iommu->ecap) &&
2462 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2463 dmar_find_matched_atsr_unit(pdev))
2464 info->ats_supported = 1;
2465
2466 if (ecs_enabled(iommu)) {
2467 if (pasid_enabled(iommu)) {
2468 int features = pci_pasid_features(pdev);
2469 if (features >= 0)
2470 info->pasid_supported = features | 1;
2471 }
2472
2473 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2474 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2475 info->pri_supported = 1;
2476 }
2477 }
2478
Jiang Liu745f2582014-02-19 14:07:26 +08002479 spin_lock_irqsave(&device_domain_lock, flags);
2480 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002481 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002482
2483 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002484 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002485 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002486 if (info2) {
2487 found = info2->domain;
2488 info2->dev = dev;
2489 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002490 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002491
Jiang Liu745f2582014-02-19 14:07:26 +08002492 if (found) {
2493 spin_unlock_irqrestore(&device_domain_lock, flags);
2494 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002495 /* Caller must free the original domain */
2496 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002497 }
2498
Joerg Roedeld160aca2015-07-22 11:52:53 +02002499 spin_lock(&iommu->lock);
2500 ret = domain_attach_iommu(domain, iommu);
2501 spin_unlock(&iommu->lock);
2502
2503 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002504 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302505 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002506 return NULL;
2507 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002508
David Woodhouseb718cd32014-03-09 13:11:33 -07002509 list_add(&info->link, &domain->devices);
2510 list_add(&info->global, &device_domain_list);
2511 if (dev)
2512 dev->archdata.iommu = info;
2513 spin_unlock_irqrestore(&device_domain_lock, flags);
2514
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002515 if (dev && domain_context_mapping(domain, dev)) {
2516 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002517 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002518 return NULL;
2519 }
2520
David Woodhouseb718cd32014-03-09 13:11:33 -07002521 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002522}
2523
Alex Williamson579305f2014-07-03 09:51:43 -06002524static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2525{
2526 *(u16 *)opaque = alias;
2527 return 0;
2528}
2529
Joerg Roedel76208352016-08-25 14:25:12 +02002530static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002531{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002532 struct device_domain_info *info = NULL;
Joerg Roedel76208352016-08-25 14:25:12 +02002533 struct dmar_domain *domain = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002534 struct intel_iommu *iommu;
Joerg Roedel08a7f452015-07-23 18:09:11 +02002535 u16 req_id, dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002536 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002537 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002538
David Woodhouse146922e2014-03-09 15:44:17 -07002539 iommu = device_to_iommu(dev, &bus, &devfn);
2540 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002541 return NULL;
2542
Joerg Roedel08a7f452015-07-23 18:09:11 +02002543 req_id = ((u16)bus << 8) | devfn;
2544
Alex Williamson579305f2014-07-03 09:51:43 -06002545 if (dev_is_pci(dev)) {
2546 struct pci_dev *pdev = to_pci_dev(dev);
2547
2548 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2549
2550 spin_lock_irqsave(&device_domain_lock, flags);
2551 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2552 PCI_BUS_NUM(dma_alias),
2553 dma_alias & 0xff);
2554 if (info) {
2555 iommu = info->iommu;
2556 domain = info->domain;
2557 }
2558 spin_unlock_irqrestore(&device_domain_lock, flags);
2559
Joerg Roedel76208352016-08-25 14:25:12 +02002560 /* DMA alias already has a domain, use it */
Alex Williamson579305f2014-07-03 09:51:43 -06002561 if (info)
Joerg Roedel76208352016-08-25 14:25:12 +02002562 goto out;
Alex Williamson579305f2014-07-03 09:51:43 -06002563 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002564
David Woodhouse146922e2014-03-09 15:44:17 -07002565 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002566 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002567 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002568 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002569 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002570 domain_exit(domain);
2571 return NULL;
2572 }
2573
Joerg Roedel76208352016-08-25 14:25:12 +02002574out:
Alex Williamson579305f2014-07-03 09:51:43 -06002575
Joerg Roedel76208352016-08-25 14:25:12 +02002576 return domain;
2577}
2578
2579static struct dmar_domain *set_domain_for_dev(struct device *dev,
2580 struct dmar_domain *domain)
2581{
2582 struct intel_iommu *iommu;
2583 struct dmar_domain *tmp;
2584 u16 req_id, dma_alias;
2585 u8 bus, devfn;
2586
2587 iommu = device_to_iommu(dev, &bus, &devfn);
2588 if (!iommu)
2589 return NULL;
2590
2591 req_id = ((u16)bus << 8) | devfn;
2592
2593 if (dev_is_pci(dev)) {
2594 struct pci_dev *pdev = to_pci_dev(dev);
2595
2596 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2597
2598 /* register PCI DMA alias device */
2599 if (req_id != dma_alias) {
2600 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2601 dma_alias & 0xff, NULL, domain);
2602
2603 if (!tmp || tmp != domain)
2604 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002605 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002606 }
2607
Joerg Roedel5db31562015-07-22 12:40:43 +02002608 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Joerg Roedel76208352016-08-25 14:25:12 +02002609 if (!tmp || tmp != domain)
2610 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002611
Joerg Roedel76208352016-08-25 14:25:12 +02002612 return domain;
2613}
2614
2615static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2616{
2617 struct dmar_domain *domain, *tmp;
2618
2619 domain = find_domain(dev);
2620 if (domain)
2621 goto out;
2622
2623 domain = find_or_alloc_domain(dev, gaw);
2624 if (!domain)
2625 goto out;
2626
2627 tmp = set_domain_for_dev(dev, domain);
2628 if (!tmp || domain != tmp) {
Alex Williamson579305f2014-07-03 09:51:43 -06002629 domain_exit(domain);
2630 domain = tmp;
2631 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002632
Joerg Roedel76208352016-08-25 14:25:12 +02002633out:
2634
David Woodhouseb718cd32014-03-09 13:11:33 -07002635 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002636}
2637
David Woodhouseb2132032009-06-26 18:50:28 +01002638static int iommu_domain_identity_map(struct dmar_domain *domain,
2639 unsigned long long start,
2640 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002641{
David Woodhousec5395d52009-06-28 16:35:56 +01002642 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2643 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644
David Woodhousec5395d52009-06-28 16:35:56 +01002645 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2646 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002647 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002648 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002649 }
2650
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002651 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002652 /*
2653 * RMRR range might have overlap with physical memory range,
2654 * clear it first
2655 */
David Woodhousec5395d52009-06-28 16:35:56 +01002656 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002657
David Woodhousec5395d52009-06-28 16:35:56 +01002658 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2659 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002660 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002661}
2662
Joerg Roedeld66ce542015-09-23 19:00:10 +02002663static int domain_prepare_identity_map(struct device *dev,
2664 struct dmar_domain *domain,
2665 unsigned long long start,
2666 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002667{
David Woodhouse19943b02009-08-04 16:19:20 +01002668 /* For _hardware_ passthrough, don't bother. But for software
2669 passthrough, we do it anyway -- it may indicate a memory
2670 range which is reserved in E820, so which didn't get set
2671 up to start with in si_domain */
2672 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002673 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2674 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002675 return 0;
2676 }
2677
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002678 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2679 dev_name(dev), start, end);
2680
David Woodhouse5595b522009-12-02 09:21:55 +00002681 if (end < start) {
2682 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2683 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2684 dmi_get_system_info(DMI_BIOS_VENDOR),
2685 dmi_get_system_info(DMI_BIOS_VERSION),
2686 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002687 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002688 }
2689
David Woodhouse2ff729f2009-08-26 14:25:41 +01002690 if (end >> agaw_to_width(domain->agaw)) {
2691 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2692 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2693 agaw_to_width(domain->agaw),
2694 dmi_get_system_info(DMI_BIOS_VENDOR),
2695 dmi_get_system_info(DMI_BIOS_VERSION),
2696 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002697 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002698 }
David Woodhouse19943b02009-08-04 16:19:20 +01002699
Joerg Roedeld66ce542015-09-23 19:00:10 +02002700 return iommu_domain_identity_map(domain, start, end);
2701}
2702
2703static int iommu_prepare_identity_map(struct device *dev,
2704 unsigned long long start,
2705 unsigned long long end)
2706{
2707 struct dmar_domain *domain;
2708 int ret;
2709
2710 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2711 if (!domain)
2712 return -ENOMEM;
2713
2714 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002716 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002719}
2720
2721static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002722 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002724 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002725 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002726 return iommu_prepare_identity_map(dev, rmrr->base_address,
2727 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002728}
2729
Suresh Siddhad3f13812011-08-23 17:05:25 -07002730#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002731static inline void iommu_prepare_isa(void)
2732{
2733 struct pci_dev *pdev;
2734 int ret;
2735
2736 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2737 if (!pdev)
2738 return;
2739
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002740 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002741 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002742
2743 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002744 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002745
Yijing Wang9b27e822014-05-20 20:37:52 +08002746 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002747}
2748#else
2749static inline void iommu_prepare_isa(void)
2750{
2751 return;
2752}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002753#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002754
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002756
Matt Kraai071e1372009-08-23 22:30:22 -07002757static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002758{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002759 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002760
Jiang Liuab8dfe22014-07-11 14:19:27 +08002761 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002762 if (!si_domain)
2763 return -EFAULT;
2764
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002765 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2766 domain_exit(si_domain);
2767 return -EFAULT;
2768 }
2769
Joerg Roedel0dc79712015-07-21 15:40:06 +02002770 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002771
David Woodhouse19943b02009-08-04 16:19:20 +01002772 if (hw)
2773 return 0;
2774
David Woodhousec7ab48d2009-06-26 19:10:36 +01002775 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002776 unsigned long start_pfn, end_pfn;
2777 int i;
2778
2779 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2780 ret = iommu_domain_identity_map(si_domain,
2781 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2782 if (ret)
2783 return ret;
2784 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002785 }
2786
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002787 return 0;
2788}
2789
David Woodhouse9b226622014-03-09 14:03:28 -07002790static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002791{
2792 struct device_domain_info *info;
2793
2794 if (likely(!iommu_identity_mapping))
2795 return 0;
2796
David Woodhouse9b226622014-03-09 14:03:28 -07002797 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002798 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2799 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002800
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002801 return 0;
2802}
2803
Joerg Roedel28ccce02015-07-21 14:45:31 +02002804static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002805{
David Woodhouse0ac72662014-03-09 13:19:22 -07002806 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002807 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002808 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002809
David Woodhouse5913c9b2014-03-09 16:27:31 -07002810 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002811 if (!iommu)
2812 return -ENODEV;
2813
Joerg Roedel5db31562015-07-22 12:40:43 +02002814 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002815 if (ndomain != domain)
2816 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002817
2818 return 0;
2819}
2820
David Woodhouse0b9d9752014-03-09 15:48:15 -07002821static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002822{
2823 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002824 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002825 int i;
2826
Jiang Liu0e242612014-02-19 14:07:34 +08002827 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002828 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002829 /*
2830 * Return TRUE if this RMRR contains the device that
2831 * is passed in.
2832 */
2833 for_each_active_dev_scope(rmrr->devices,
2834 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002835 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002836 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002837 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002838 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002839 }
Jiang Liu0e242612014-02-19 14:07:34 +08002840 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002841 return false;
2842}
2843
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002844/*
2845 * There are a couple cases where we need to restrict the functionality of
2846 * devices associated with RMRRs. The first is when evaluating a device for
2847 * identity mapping because problems exist when devices are moved in and out
2848 * of domains and their respective RMRR information is lost. This means that
2849 * a device with associated RMRRs will never be in a "passthrough" domain.
2850 * The second is use of the device through the IOMMU API. This interface
2851 * expects to have full control of the IOVA space for the device. We cannot
2852 * satisfy both the requirement that RMRR access is maintained and have an
2853 * unencumbered IOVA space. We also have no ability to quiesce the device's
2854 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2855 * We therefore prevent devices associated with an RMRR from participating in
2856 * the IOMMU API, which eliminates them from device assignment.
2857 *
2858 * In both cases we assume that PCI USB devices with RMRRs have them largely
2859 * for historical reasons and that the RMRR space is not actively used post
2860 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002861 *
2862 * The same exception is made for graphics devices, with the requirement that
2863 * any use of the RMRR regions will be torn down before assigning the device
2864 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002865 */
2866static bool device_is_rmrr_locked(struct device *dev)
2867{
2868 if (!device_has_rmrr(dev))
2869 return false;
2870
2871 if (dev_is_pci(dev)) {
2872 struct pci_dev *pdev = to_pci_dev(dev);
2873
David Woodhouse18436af2015-03-25 15:05:47 +00002874 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002875 return false;
2876 }
2877
2878 return true;
2879}
2880
David Woodhouse3bdb2592014-03-09 16:03:08 -07002881static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002882{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002883
David Woodhouse3bdb2592014-03-09 16:03:08 -07002884 if (dev_is_pci(dev)) {
2885 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002886
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002887 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002888 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002889
David Woodhouse3bdb2592014-03-09 16:03:08 -07002890 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2891 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002892
David Woodhouse3bdb2592014-03-09 16:03:08 -07002893 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2894 return 1;
2895
2896 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2897 return 0;
2898
2899 /*
2900 * We want to start off with all devices in the 1:1 domain, and
2901 * take them out later if we find they can't access all of memory.
2902 *
2903 * However, we can't do this for PCI devices behind bridges,
2904 * because all PCI devices behind the same bridge will end up
2905 * with the same source-id on their transactions.
2906 *
2907 * Practically speaking, we can't change things around for these
2908 * devices at run-time, because we can't be sure there'll be no
2909 * DMA transactions in flight for any of their siblings.
2910 *
2911 * So PCI devices (unless they're on the root bus) as well as
2912 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2913 * the 1:1 domain, just in _case_ one of their siblings turns out
2914 * not to be able to map all of memory.
2915 */
2916 if (!pci_is_pcie(pdev)) {
2917 if (!pci_is_root_bus(pdev->bus))
2918 return 0;
2919 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2920 return 0;
2921 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2922 return 0;
2923 } else {
2924 if (device_has_rmrr(dev))
2925 return 0;
2926 }
David Woodhouse6941af22009-07-04 18:24:27 +01002927
David Woodhouse3dfc8132009-07-04 19:11:08 +01002928 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002929 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002930 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002931 * take them out of the 1:1 domain later.
2932 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002933 if (!startup) {
2934 /*
2935 * If the device's dma_mask is less than the system's memory
2936 * size then this is not a candidate for identity mapping.
2937 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002938 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002939
David Woodhouse3bdb2592014-03-09 16:03:08 -07002940 if (dev->coherent_dma_mask &&
2941 dev->coherent_dma_mask < dma_mask)
2942 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002943
David Woodhouse3bdb2592014-03-09 16:03:08 -07002944 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002945 }
David Woodhouse6941af22009-07-04 18:24:27 +01002946
2947 return 1;
2948}
2949
David Woodhousecf04eee2014-03-21 16:49:04 +00002950static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2951{
2952 int ret;
2953
2954 if (!iommu_should_identity_map(dev, 1))
2955 return 0;
2956
Joerg Roedel28ccce02015-07-21 14:45:31 +02002957 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002958 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002959 pr_info("%s identity mapping for device %s\n",
2960 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002961 else if (ret == -ENODEV)
2962 /* device not associated with an iommu */
2963 ret = 0;
2964
2965 return ret;
2966}
2967
2968
Matt Kraai071e1372009-08-23 22:30:22 -07002969static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002970{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002971 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002972 struct dmar_drhd_unit *drhd;
2973 struct intel_iommu *iommu;
2974 struct device *dev;
2975 int i;
2976 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002977
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002978 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002979 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2980 if (ret)
2981 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002982 }
2983
David Woodhousecf04eee2014-03-21 16:49:04 +00002984 for_each_active_iommu(iommu, drhd)
2985 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2986 struct acpi_device_physical_node *pn;
2987 struct acpi_device *adev;
2988
2989 if (dev->bus != &acpi_bus_type)
2990 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002991
David Woodhousecf04eee2014-03-21 16:49:04 +00002992 adev= to_acpi_device(dev);
2993 mutex_lock(&adev->physical_node_lock);
2994 list_for_each_entry(pn, &adev->physical_node_list, node) {
2995 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2996 if (ret)
2997 break;
2998 }
2999 mutex_unlock(&adev->physical_node_lock);
3000 if (ret)
3001 return ret;
3002 }
3003
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003004 return 0;
3005}
3006
Jiang Liuffebeb42014-11-09 22:48:02 +08003007static void intel_iommu_init_qi(struct intel_iommu *iommu)
3008{
3009 /*
3010 * Start from the sane iommu hardware state.
3011 * If the queued invalidation is already initialized by us
3012 * (for example, while enabling interrupt-remapping) then
3013 * we got the things already rolling from a sane state.
3014 */
3015 if (!iommu->qi) {
3016 /*
3017 * Clear any previous faults.
3018 */
3019 dmar_fault(-1, iommu);
3020 /*
3021 * Disable queued invalidation if supported and already enabled
3022 * before OS handover.
3023 */
3024 dmar_disable_qi(iommu);
3025 }
3026
3027 if (dmar_enable_qi(iommu)) {
3028 /*
3029 * Queued Invalidate not enabled, use Register Based Invalidate
3030 */
3031 iommu->flush.flush_context = __iommu_flush_context;
3032 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003033 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003034 iommu->name);
3035 } else {
3036 iommu->flush.flush_context = qi_flush_context;
3037 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003038 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08003039 }
3040}
3041
Joerg Roedel091d42e2015-06-12 11:56:10 +02003042static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb9692015-10-09 18:16:46 -04003043 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02003044 struct context_entry **tbl,
3045 int bus, bool ext)
3046{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003047 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003048 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003049 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003050 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003051 phys_addr_t old_ce_phys;
3052
3053 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003054 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003055
3056 for (devfn = 0; devfn < 256; devfn++) {
3057 /* First calculate the correct index */
3058 idx = (ext ? devfn * 2 : devfn) % 256;
3059
3060 if (idx == 0) {
3061 /* First save what we may have and clean up */
3062 if (new_ce) {
3063 tbl[tbl_idx] = new_ce;
3064 __iommu_flush_cache(iommu, new_ce,
3065 VTD_PAGE_SIZE);
3066 pos = 1;
3067 }
3068
3069 if (old_ce)
3070 iounmap(old_ce);
3071
3072 ret = 0;
3073 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003074 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003075 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003076 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003077
3078 if (!old_ce_phys) {
3079 if (ext && devfn == 0) {
3080 /* No LCTP, try UCTP */
3081 devfn = 0x7f;
3082 continue;
3083 } else {
3084 goto out;
3085 }
3086 }
3087
3088 ret = -ENOMEM;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003089 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3090 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003091 if (!old_ce)
3092 goto out;
3093
3094 new_ce = alloc_pgtable_page(iommu->node);
3095 if (!new_ce)
3096 goto out_unmap;
3097
3098 ret = 0;
3099 }
3100
3101 /* Now copy the context entry */
Dan Williamsdfddb9692015-10-09 18:16:46 -04003102 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003103
Joerg Roedelcf484d02015-06-12 12:21:46 +02003104 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02003105 continue;
3106
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003107 did = context_domain_id(&ce);
3108 if (did >= 0 && did < cap_ndoms(iommu->cap))
3109 set_bit(did, iommu->domain_ids);
3110
Joerg Roedelcf484d02015-06-12 12:21:46 +02003111 /*
3112 * We need a marker for copied context entries. This
3113 * marker needs to work for the old format as well as
3114 * for extended context entries.
3115 *
3116 * Bit 67 of the context entry is used. In the old
3117 * format this bit is available to software, in the
3118 * extended format it is the PGE bit, but PGE is ignored
3119 * by HW if PASIDs are disabled (and thus still
3120 * available).
3121 *
3122 * So disable PASIDs first and then mark the entry
3123 * copied. This means that we don't copy PASID
3124 * translations from the old kernel, but this is fine as
3125 * faults there are not fatal.
3126 */
3127 context_clear_pasid_enable(&ce);
3128 context_set_copied(&ce);
3129
Joerg Roedel091d42e2015-06-12 11:56:10 +02003130 new_ce[idx] = ce;
3131 }
3132
3133 tbl[tbl_idx + pos] = new_ce;
3134
3135 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3136
3137out_unmap:
Dan Williamsdfddb9692015-10-09 18:16:46 -04003138 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003139
3140out:
3141 return ret;
3142}
3143
3144static int copy_translation_tables(struct intel_iommu *iommu)
3145{
3146 struct context_entry **ctxt_tbls;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003147 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003148 phys_addr_t old_rt_phys;
3149 int ctxt_table_entries;
3150 unsigned long flags;
3151 u64 rtaddr_reg;
3152 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003153 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003154
3155 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3156 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003157 new_ext = !!ecap_ecs(iommu->ecap);
3158
3159 /*
3160 * The RTT bit can only be changed when translation is disabled,
3161 * but disabling translation means to open a window for data
3162 * corruption. So bail out and don't copy anything if we would
3163 * have to change the bit.
3164 */
3165 if (new_ext != ext)
3166 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003167
3168 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3169 if (!old_rt_phys)
3170 return -EINVAL;
3171
Dan Williamsdfddb9692015-10-09 18:16:46 -04003172 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003173 if (!old_rt)
3174 return -ENOMEM;
3175
3176 /* This is too big for the stack - allocate it from slab */
3177 ctxt_table_entries = ext ? 512 : 256;
3178 ret = -ENOMEM;
3179 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3180 if (!ctxt_tbls)
3181 goto out_unmap;
3182
3183 for (bus = 0; bus < 256; bus++) {
3184 ret = copy_context_table(iommu, &old_rt[bus],
3185 ctxt_tbls, bus, ext);
3186 if (ret) {
3187 pr_err("%s: Failed to copy context table for bus %d\n",
3188 iommu->name, bus);
3189 continue;
3190 }
3191 }
3192
3193 spin_lock_irqsave(&iommu->lock, flags);
3194
3195 /* Context tables are copied, now write them to the root_entry table */
3196 for (bus = 0; bus < 256; bus++) {
3197 int idx = ext ? bus * 2 : bus;
3198 u64 val;
3199
3200 if (ctxt_tbls[idx]) {
3201 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3202 iommu->root_entry[bus].lo = val;
3203 }
3204
3205 if (!ext || !ctxt_tbls[idx + 1])
3206 continue;
3207
3208 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3209 iommu->root_entry[bus].hi = val;
3210 }
3211
3212 spin_unlock_irqrestore(&iommu->lock, flags);
3213
3214 kfree(ctxt_tbls);
3215
3216 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3217
3218 ret = 0;
3219
3220out_unmap:
Dan Williamsdfddb9692015-10-09 18:16:46 -04003221 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003222
3223 return ret;
3224}
3225
Joseph Cihulab7792602011-05-03 00:08:37 -07003226static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003227{
3228 struct dmar_drhd_unit *drhd;
3229 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003230 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003231 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232 struct intel_iommu *iommu;
Joerg Roedel13cf0172017-08-11 11:40:10 +02003233 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003234
3235 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003236 * for each drhd
3237 * allocate root
3238 * initialize and program root entry to not present
3239 * endfor
3240 */
3241 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003242 /*
3243 * lock not needed as this is only incremented in the single
3244 * threaded kernel __init code path all other access are read
3245 * only
3246 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003247 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003248 g_num_of_iommus++;
3249 continue;
3250 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003251 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003252 }
3253
Jiang Liuffebeb42014-11-09 22:48:02 +08003254 /* Preallocate enough resources for IOMMU hot-addition */
3255 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3256 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3257
Weidong Hand9630fe2008-12-08 11:06:32 +08003258 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3259 GFP_KERNEL);
3260 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003261 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003262 ret = -ENOMEM;
3263 goto error;
3264 }
3265
Jiang Liu7c919772014-01-06 14:18:18 +08003266 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003267 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003269 intel_iommu_init_qi(iommu);
3270
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003271 ret = iommu_init_domains(iommu);
3272 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003273 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003274
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003275 init_translation_status(iommu);
3276
Joerg Roedel091d42e2015-06-12 11:56:10 +02003277 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3278 iommu_disable_translation(iommu);
3279 clear_translation_pre_enabled(iommu);
3280 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3281 iommu->name);
3282 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003283
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284 /*
3285 * TBD:
3286 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003287 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003288 */
3289 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003290 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003291 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003292
Joerg Roedel091d42e2015-06-12 11:56:10 +02003293 if (translation_pre_enabled(iommu)) {
3294 pr_info("Translation already enabled - trying to copy translation structures\n");
3295
3296 ret = copy_translation_tables(iommu);
3297 if (ret) {
3298 /*
3299 * We found the IOMMU with translation
3300 * enabled - but failed to copy over the
3301 * old root-entry table. Try to proceed
3302 * by disabling translation now and
3303 * allocating a clean root-entry table.
3304 * This might cause DMAR faults, but
3305 * probably the dump will still succeed.
3306 */
3307 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3308 iommu->name);
3309 iommu_disable_translation(iommu);
3310 clear_translation_pre_enabled(iommu);
3311 } else {
3312 pr_info("Copied translation tables from previous kernel for %s\n",
3313 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003314 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003315 }
3316 }
3317
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003318 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003319 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003320#ifdef CONFIG_INTEL_IOMMU_SVM
3321 if (pasid_enabled(iommu))
3322 intel_svm_alloc_pasid_tables(iommu);
3323#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003324 }
3325
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003326 /*
3327 * Now that qi is enabled on all iommus, set the root entry and flush
3328 * caches. This is required on some Intel X58 chipsets, otherwise the
3329 * flush_context function will loop forever and the boot hangs.
3330 */
3331 for_each_active_iommu(iommu, drhd) {
3332 iommu_flush_write_buffer(iommu);
3333 iommu_set_root_entry(iommu);
3334 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3335 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3336 }
3337
David Woodhouse19943b02009-08-04 16:19:20 +01003338 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003339 iommu_identity_mapping |= IDENTMAP_ALL;
3340
Suresh Siddhad3f13812011-08-23 17:05:25 -07003341#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003342 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003343#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003344
Ashok Raj21e722c2017-01-30 09:39:53 -08003345 check_tylersburg_isoch();
3346
Joerg Roedel86080cc2015-06-12 12:27:16 +02003347 if (iommu_identity_mapping) {
3348 ret = si_domain_init(hw_pass_through);
3349 if (ret)
3350 goto free_iommu;
3351 }
3352
David Woodhousee0fc7e02009-09-30 09:12:17 -07003353
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003354 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003355 * If we copied translations from a previous kernel in the kdump
3356 * case, we can not assign the devices to domains now, as that
3357 * would eliminate the old mappings. So skip this part and defer
3358 * the assignment to device driver initialization time.
3359 */
3360 if (copied_tables)
3361 goto domains_done;
3362
3363 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003364 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003365 * identity mappings for rmrr, gfx, and isa and may fall back to static
3366 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003367 */
David Woodhouse19943b02009-08-04 16:19:20 +01003368 if (iommu_identity_mapping) {
3369 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3370 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003371 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003372 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003373 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003374 }
David Woodhouse19943b02009-08-04 16:19:20 +01003375 /*
3376 * For each rmrr
3377 * for each dev attached to rmrr
3378 * do
3379 * locate drhd for dev, alloc domain for dev
3380 * allocate free domain
3381 * allocate page table entries for rmrr
3382 * if context not allocated for bus
3383 * allocate and init context
3384 * set present in root table for this bus
3385 * init context with domain, translation etc
3386 * endfor
3387 * endfor
3388 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003389 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003390 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003391 /* some BIOS lists non-exist devices in DMAR table. */
3392 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003393 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003394 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003395 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003396 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003397 }
3398 }
3399
3400 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003401
Joerg Roedela87f4912015-06-12 12:32:54 +02003402domains_done:
3403
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003404 /*
3405 * for each drhd
3406 * enable fault log
3407 * global invalidate context cache
3408 * global invalidate iotlb
3409 * enable translation
3410 */
Jiang Liu7c919772014-01-06 14:18:18 +08003411 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003412 if (drhd->ignored) {
3413 /*
3414 * we always have to disable PMRs or DMA may fail on
3415 * this device
3416 */
3417 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003418 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003419 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003420 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003421
3422 iommu_flush_write_buffer(iommu);
3423
David Woodhousea222a7f2015-10-07 23:35:18 +01003424#ifdef CONFIG_INTEL_IOMMU_SVM
3425 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3426 ret = intel_svm_enable_prq(iommu);
3427 if (ret)
3428 goto free_iommu;
3429 }
3430#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003431 ret = dmar_set_interrupt(iommu);
3432 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003433 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003434
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003435 if (!translation_pre_enabled(iommu))
3436 iommu_enable_translation(iommu);
3437
David Woodhouseb94996c2009-09-19 15:28:12 -07003438 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003439 }
3440
3441 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003442
3443free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003444 for_each_active_iommu(iommu, drhd) {
3445 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003446 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003447 }
Joerg Roedel13cf0172017-08-11 11:40:10 +02003448
Weidong Hand9630fe2008-12-08 11:06:32 +08003449 kfree(g_iommus);
Joerg Roedel13cf0172017-08-11 11:40:10 +02003450
Jiang Liu989d51f2014-02-19 14:07:21 +08003451error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003452 return ret;
3453}
3454
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003455/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003456static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003457 struct dmar_domain *domain,
3458 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003459{
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003460 unsigned long iova_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003461
David Woodhouse875764d2009-06-28 21:20:51 +01003462 /* Restrict dma_mask to the width that the iommu can handle */
3463 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003464 /* Ensure we reserve the whole size-aligned region */
3465 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003466
3467 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003468 /*
3469 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003470 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003471 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003472 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003473 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003474 IOVA_PFN(DMA_BIT_MASK(32)), false);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003475 if (iova_pfn)
3476 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003477 }
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02003478 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3479 IOVA_PFN(dma_mask), true);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003480 if (unlikely(!iova_pfn)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003481 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003482 nrpages, dev_name(dev));
Omer Peleg2aac6302016-04-20 11:33:57 +03003483 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003484 }
3485
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003486 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003487}
3488
Peter Xub316d022017-05-22 18:28:51 +08003489static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003490{
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003491 struct dmar_domain *domain, *tmp;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003492 struct dmar_rmrr_unit *rmrr;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003493 struct device *i_dev;
3494 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003496 domain = find_domain(dev);
3497 if (domain)
3498 goto out;
3499
3500 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3501 if (!domain)
3502 goto out;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003503
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003504 /* We have a new domain - setup possible RMRRs for the device */
3505 rcu_read_lock();
3506 for_each_rmrr_units(rmrr) {
3507 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3508 i, i_dev) {
3509 if (i_dev != dev)
3510 continue;
3511
3512 ret = domain_prepare_identity_map(dev, domain,
3513 rmrr->base_address,
3514 rmrr->end_address);
3515 if (ret)
3516 dev_err(dev, "Mapping reserved region failed\n");
3517 }
3518 }
3519 rcu_read_unlock();
3520
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003521 tmp = set_domain_for_dev(dev, domain);
3522 if (!tmp || domain != tmp) {
3523 domain_exit(domain);
3524 domain = tmp;
3525 }
3526
3527out:
3528
3529 if (!domain)
3530 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3531
3532
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003533 return domain;
3534}
3535
David Woodhouseecb509e2014-03-09 16:29:55 -07003536/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003537static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003538{
3539 int found;
3540
David Woodhouse3d891942014-03-06 15:59:26 +00003541 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003542 return 1;
3543
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003544 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003545 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003546
David Woodhouse9b226622014-03-09 14:03:28 -07003547 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003548 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003549 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003550 return 1;
3551 else {
3552 /*
3553 * 32 bit DMA is removed from si_domain and fall back
3554 * to non-identity mapping.
3555 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003556 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003557 pr_info("32bit %s uses non-identity mapping\n",
3558 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003559 return 0;
3560 }
3561 } else {
3562 /*
3563 * In case of a detached 64 bit DMA device from vm, the device
3564 * is put into si_domain for identity mapping.
3565 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003566 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003567 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003568 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003569 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003570 pr_info("64bit %s uses identity mapping\n",
3571 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003572 return 1;
3573 }
3574 }
3575 }
3576
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003577 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003578}
3579
David Woodhouse5040a912014-03-09 16:14:00 -07003580static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003581 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003582{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003583 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003584 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003585 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003586 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003587 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003588 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003589 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003590
3591 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003592
David Woodhouse5040a912014-03-09 16:14:00 -07003593 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003594 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003595
David Woodhouse5040a912014-03-09 16:14:00 -07003596 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003597 if (!domain)
3598 return 0;
3599
Weidong Han8c11e792008-12-08 15:29:22 +08003600 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003601 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003602
Omer Peleg2aac6302016-04-20 11:33:57 +03003603 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3604 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003605 goto error;
3606
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003607 /*
3608 * Check if DMAR supports zero-length reads on write only
3609 * mappings..
3610 */
3611 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003612 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003613 prot |= DMA_PTE_READ;
3614 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3615 prot |= DMA_PTE_WRITE;
3616 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003617 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003618 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003619 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003620 * is not a big problem
3621 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003622 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003623 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003624 if (ret)
3625 goto error;
3626
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003627 /* it's a non-present to present mapping. Only flush if caching mode */
3628 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003629 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003630 mm_to_dma_pfn(iova_pfn),
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003631 size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003632 else
Weidong Han8c11e792008-12-08 15:29:22 +08003633 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003634
Omer Peleg2aac6302016-04-20 11:33:57 +03003635 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003636 start_paddr += paddr & ~PAGE_MASK;
3637 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003638
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003639error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003640 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003641 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003642 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003643 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003644 return 0;
3645}
3646
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003647static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3648 unsigned long offset, size_t size,
3649 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003650 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003651{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003652 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003653 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003654}
3655
Omer Peleg769530e2016-04-20 11:33:25 +03003656static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003657{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003658 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003659 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003660 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003661 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003662 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003663 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003664
David Woodhouse73676832009-07-04 14:08:36 +01003665 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003666 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003667
David Woodhouse1525a292014-03-06 16:19:30 +00003668 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003669 BUG_ON(!domain);
3670
Weidong Han8c11e792008-12-08 15:29:22 +08003671 iommu = domain_get_iommu(domain);
3672
Omer Peleg2aac6302016-04-20 11:33:57 +03003673 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003674
Omer Peleg769530e2016-04-20 11:33:25 +03003675 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003676 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003677 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003678
David Woodhoused794dc92009-06-28 00:27:49 +01003679 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003680 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003681
David Woodhouseea8ea462014-03-05 17:09:32 +00003682 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003683
mark gross5e0d2a62008-03-04 15:22:08 -08003684 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003685 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003686 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003687 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003688 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003689 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003690 } else {
Joerg Roedel13cf0172017-08-11 11:40:10 +02003691 queue_iova(&domain->iovad, iova_pfn, nrpages,
3692 (unsigned long)freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003693 /*
3694 * queue up the release of the unmap to save the 1/6th of the
3695 * cpu used up by the iotlb flush operation...
3696 */
mark gross5e0d2a62008-03-04 15:22:08 -08003697 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003698}
3699
Jiang Liud41a4ad2014-07-11 14:19:34 +08003700static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3701 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003702 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003703{
Omer Peleg769530e2016-04-20 11:33:25 +03003704 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003705}
3706
David Woodhouse5040a912014-03-09 16:14:00 -07003707static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003708 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003709 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003710{
Akinobu Mita36746432014-06-04 16:06:51 -07003711 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003712 int order;
3713
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003714 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003715 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003716
David Woodhouse5040a912014-03-09 16:14:00 -07003717 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003718 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003719 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3720 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003721 flags |= GFP_DMA;
3722 else
3723 flags |= GFP_DMA32;
3724 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003725
Mel Gormand0164ad2015-11-06 16:28:21 -08003726 if (gfpflags_allow_blocking(flags)) {
Akinobu Mita36746432014-06-04 16:06:51 -07003727 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003728
Lucas Stach712c6042017-02-24 14:58:44 -08003729 page = dma_alloc_from_contiguous(dev, count, order, flags);
Akinobu Mita36746432014-06-04 16:06:51 -07003730 if (page && iommu_no_mapping(dev) &&
3731 page_to_phys(page) + size > dev->coherent_dma_mask) {
3732 dma_release_from_contiguous(dev, page, count);
3733 page = NULL;
3734 }
3735 }
3736
3737 if (!page)
3738 page = alloc_pages(flags, order);
3739 if (!page)
3740 return NULL;
3741 memset(page_address(page), 0, size);
3742
3743 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003744 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003745 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003746 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003747 return page_address(page);
3748 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3749 __free_pages(page, order);
3750
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003751 return NULL;
3752}
3753
David Woodhouse5040a912014-03-09 16:14:00 -07003754static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003755 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003756{
3757 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003758 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003759
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003760 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003761 order = get_order(size);
3762
Omer Peleg769530e2016-04-20 11:33:25 +03003763 intel_unmap(dev, dma_handle, size);
Akinobu Mita36746432014-06-04 16:06:51 -07003764 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3765 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003766}
3767
David Woodhouse5040a912014-03-09 16:14:00 -07003768static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003769 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003770 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003771{
Omer Peleg769530e2016-04-20 11:33:25 +03003772 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3773 unsigned long nrpages = 0;
3774 struct scatterlist *sg;
3775 int i;
3776
3777 for_each_sg(sglist, sg, nelems, i) {
3778 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3779 }
3780
3781 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003782}
3783
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003784static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003785 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003786{
3787 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003788 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003789
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003790 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003791 BUG_ON(!sg_page(sg));
Robin Murphy29a90b72017-09-28 15:14:01 +01003792 sg->dma_address = sg_phys(sg);
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003793 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003794 }
3795 return nelems;
3796}
3797
David Woodhouse5040a912014-03-09 16:14:00 -07003798static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003799 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003800{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003801 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003802 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003803 size_t size = 0;
3804 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003805 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003806 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003807 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003808 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003809 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003810
3811 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003812 if (iommu_no_mapping(dev))
3813 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003814
David Woodhouse5040a912014-03-09 16:14:00 -07003815 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003816 if (!domain)
3817 return 0;
3818
Weidong Han8c11e792008-12-08 15:29:22 +08003819 iommu = domain_get_iommu(domain);
3820
David Woodhouseb536d242009-06-28 14:49:31 +01003821 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003822 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003823
Omer Peleg2aac6302016-04-20 11:33:57 +03003824 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003825 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003826 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003827 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003828 return 0;
3829 }
3830
3831 /*
3832 * Check if DMAR supports zero-length reads on write only
3833 * mappings..
3834 */
3835 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003836 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003837 prot |= DMA_PTE_READ;
3838 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3839 prot |= DMA_PTE_WRITE;
3840
Omer Peleg2aac6302016-04-20 11:33:57 +03003841 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003842
Fenghua Yuf5329592009-08-04 15:09:37 -07003843 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003844 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003845 dma_pte_free_pagetable(domain, start_vpfn,
David Dillowbc24c572017-06-28 19:42:23 -07003846 start_vpfn + size - 1,
3847 agaw_to_level(domain->agaw) + 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003848 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003849 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003850 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003851
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003852 /* it's a non-present to present mapping. Only flush if caching mode */
3853 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003854 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003855 else
Weidong Han8c11e792008-12-08 15:29:22 +08003856 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003857
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003858 return nelems;
3859}
3860
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003861static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3862{
3863 return !dma_addr;
3864}
3865
Arvind Yadav01e19322017-06-28 16:39:32 +05303866const struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003867 .alloc = intel_alloc_coherent,
3868 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003869 .map_sg = intel_map_sg,
3870 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003871 .map_page = intel_map_page,
3872 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003873 .mapping_error = intel_mapping_error,
Christoph Hellwig5860acc2017-05-22 11:38:27 +02003874#ifdef CONFIG_X86
3875 .dma_supported = x86_dma_supported,
3876#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003877};
3878
3879static inline int iommu_domain_cache_init(void)
3880{
3881 int ret = 0;
3882
3883 iommu_domain_cache = kmem_cache_create("iommu_domain",
3884 sizeof(struct dmar_domain),
3885 0,
3886 SLAB_HWCACHE_ALIGN,
3887
3888 NULL);
3889 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003890 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003891 ret = -ENOMEM;
3892 }
3893
3894 return ret;
3895}
3896
3897static inline int iommu_devinfo_cache_init(void)
3898{
3899 int ret = 0;
3900
3901 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3902 sizeof(struct device_domain_info),
3903 0,
3904 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003905 NULL);
3906 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003907 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003908 ret = -ENOMEM;
3909 }
3910
3911 return ret;
3912}
3913
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003914static int __init iommu_init_mempool(void)
3915{
3916 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003917 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003918 if (ret)
3919 return ret;
3920
3921 ret = iommu_domain_cache_init();
3922 if (ret)
3923 goto domain_error;
3924
3925 ret = iommu_devinfo_cache_init();
3926 if (!ret)
3927 return ret;
3928
3929 kmem_cache_destroy(iommu_domain_cache);
3930domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003931 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003932
3933 return -ENOMEM;
3934}
3935
3936static void __init iommu_exit_mempool(void)
3937{
3938 kmem_cache_destroy(iommu_devinfo_cache);
3939 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03003940 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003941}
3942
Dan Williams556ab452010-07-23 15:47:56 -07003943static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3944{
3945 struct dmar_drhd_unit *drhd;
3946 u32 vtbar;
3947 int rc;
3948
3949 /* We know that this device on this chipset has its own IOMMU.
3950 * If we find it under a different IOMMU, then the BIOS is lying
3951 * to us. Hope that the IOMMU for this device is actually
3952 * disabled, and it needs no translation...
3953 */
3954 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3955 if (rc) {
3956 /* "can't" happen */
3957 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3958 return;
3959 }
3960 vtbar &= 0xffff0000;
3961
3962 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3963 drhd = dmar_find_matched_drhd_unit(pdev);
3964 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3965 TAINT_FIRMWARE_WORKAROUND,
3966 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3967 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3968}
3969DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3970
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003971static void __init init_no_remapping_devices(void)
3972{
3973 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003974 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003975 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003976
3977 for_each_drhd_unit(drhd) {
3978 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003979 for_each_active_dev_scope(drhd->devices,
3980 drhd->devices_cnt, i, dev)
3981 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003982 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003983 if (i == drhd->devices_cnt)
3984 drhd->ignored = 1;
3985 }
3986 }
3987
Jiang Liu7c919772014-01-06 14:18:18 +08003988 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003989 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003990 continue;
3991
Jiang Liub683b232014-02-19 14:07:32 +08003992 for_each_active_dev_scope(drhd->devices,
3993 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003994 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003995 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003996 if (i < drhd->devices_cnt)
3997 continue;
3998
David Woodhousec0771df2011-10-14 20:59:46 +01003999 /* This IOMMU has *only* gfx devices. Either bypass it or
4000 set the gfx_mapped flag, as appropriate */
4001 if (dmar_map_gfx) {
4002 intel_iommu_gfx_mapped = 1;
4003 } else {
4004 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08004005 for_each_active_dev_scope(drhd->devices,
4006 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004007 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004008 }
4009 }
4010}
4011
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004012#ifdef CONFIG_SUSPEND
4013static int init_iommu_hw(void)
4014{
4015 struct dmar_drhd_unit *drhd;
4016 struct intel_iommu *iommu = NULL;
4017
4018 for_each_active_iommu(iommu, drhd)
4019 if (iommu->qi)
4020 dmar_reenable_qi(iommu);
4021
Joseph Cihulab7792602011-05-03 00:08:37 -07004022 for_each_iommu(iommu, drhd) {
4023 if (drhd->ignored) {
4024 /*
4025 * we always have to disable PMRs or DMA may fail on
4026 * this device
4027 */
4028 if (force_on)
4029 iommu_disable_protect_mem_regions(iommu);
4030 continue;
4031 }
4032
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004033 iommu_flush_write_buffer(iommu);
4034
4035 iommu_set_root_entry(iommu);
4036
4037 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004038 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004039 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4040 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004041 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004042 }
4043
4044 return 0;
4045}
4046
4047static void iommu_flush_all(void)
4048{
4049 struct dmar_drhd_unit *drhd;
4050 struct intel_iommu *iommu;
4051
4052 for_each_active_iommu(iommu, drhd) {
4053 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004054 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004055 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004056 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004057 }
4058}
4059
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004060static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004061{
4062 struct dmar_drhd_unit *drhd;
4063 struct intel_iommu *iommu = NULL;
4064 unsigned long flag;
4065
4066 for_each_active_iommu(iommu, drhd) {
4067 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4068 GFP_ATOMIC);
4069 if (!iommu->iommu_state)
4070 goto nomem;
4071 }
4072
4073 iommu_flush_all();
4074
4075 for_each_active_iommu(iommu, drhd) {
4076 iommu_disable_translation(iommu);
4077
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004078 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004079
4080 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4081 readl(iommu->reg + DMAR_FECTL_REG);
4082 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4083 readl(iommu->reg + DMAR_FEDATA_REG);
4084 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4085 readl(iommu->reg + DMAR_FEADDR_REG);
4086 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4087 readl(iommu->reg + DMAR_FEUADDR_REG);
4088
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004089 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004090 }
4091 return 0;
4092
4093nomem:
4094 for_each_active_iommu(iommu, drhd)
4095 kfree(iommu->iommu_state);
4096
4097 return -ENOMEM;
4098}
4099
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004100static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004101{
4102 struct dmar_drhd_unit *drhd;
4103 struct intel_iommu *iommu = NULL;
4104 unsigned long flag;
4105
4106 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004107 if (force_on)
4108 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4109 else
4110 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004111 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004112 }
4113
4114 for_each_active_iommu(iommu, drhd) {
4115
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004116 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004117
4118 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4119 iommu->reg + DMAR_FECTL_REG);
4120 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4121 iommu->reg + DMAR_FEDATA_REG);
4122 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4123 iommu->reg + DMAR_FEADDR_REG);
4124 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4125 iommu->reg + DMAR_FEUADDR_REG);
4126
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004127 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004128 }
4129
4130 for_each_active_iommu(iommu, drhd)
4131 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004132}
4133
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004134static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004135 .resume = iommu_resume,
4136 .suspend = iommu_suspend,
4137};
4138
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004139static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004140{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004141 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004142}
4143
4144#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004145static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004146#endif /* CONFIG_PM */
4147
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004148
Jiang Liuc2a0b532014-11-09 22:47:56 +08004149int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004150{
4151 struct acpi_dmar_reserved_memory *rmrr;
Eric Auger0659b8d2017-01-19 20:57:53 +00004152 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004153 struct dmar_rmrr_unit *rmrru;
Eric Auger0659b8d2017-01-19 20:57:53 +00004154 size_t length;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004155
4156 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4157 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004158 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004159
4160 rmrru->hdr = header;
4161 rmrr = (struct acpi_dmar_reserved_memory *)header;
4162 rmrru->base_address = rmrr->base_address;
4163 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004164
4165 length = rmrr->end_address - rmrr->base_address + 1;
4166 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4167 IOMMU_RESV_DIRECT);
4168 if (!rmrru->resv)
4169 goto free_rmrru;
4170
Jiang Liu2e455282014-02-19 14:07:36 +08004171 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4172 ((void *)rmrr) + rmrr->header.length,
4173 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004174 if (rmrru->devices_cnt && rmrru->devices == NULL)
4175 goto free_all;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004176
Jiang Liu2e455282014-02-19 14:07:36 +08004177 list_add(&rmrru->list, &dmar_rmrr_units);
4178
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004179 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004180free_all:
4181 kfree(rmrru->resv);
4182free_rmrru:
4183 kfree(rmrru);
4184out:
4185 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004186}
4187
Jiang Liu6b197242014-11-09 22:47:58 +08004188static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4189{
4190 struct dmar_atsr_unit *atsru;
4191 struct acpi_dmar_atsr *tmp;
4192
4193 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4194 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4195 if (atsr->segment != tmp->segment)
4196 continue;
4197 if (atsr->header.length != tmp->header.length)
4198 continue;
4199 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4200 return atsru;
4201 }
4202
4203 return NULL;
4204}
4205
4206int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004207{
4208 struct acpi_dmar_atsr *atsr;
4209 struct dmar_atsr_unit *atsru;
4210
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004211 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
Jiang Liu6b197242014-11-09 22:47:58 +08004212 return 0;
4213
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004214 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004215 atsru = dmar_find_atsr(atsr);
4216 if (atsru)
4217 return 0;
4218
4219 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004220 if (!atsru)
4221 return -ENOMEM;
4222
Jiang Liu6b197242014-11-09 22:47:58 +08004223 /*
4224 * If memory is allocated from slab by ACPI _DSM method, we need to
4225 * copy the memory content because the memory buffer will be freed
4226 * on return.
4227 */
4228 atsru->hdr = (void *)(atsru + 1);
4229 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004230 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004231 if (!atsru->include_all) {
4232 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4233 (void *)atsr + atsr->header.length,
4234 &atsru->devices_cnt);
4235 if (atsru->devices_cnt && atsru->devices == NULL) {
4236 kfree(atsru);
4237 return -ENOMEM;
4238 }
4239 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004240
Jiang Liu0e242612014-02-19 14:07:34 +08004241 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004242
4243 return 0;
4244}
4245
Jiang Liu9bdc5312014-01-06 14:18:27 +08004246static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4247{
4248 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4249 kfree(atsru);
4250}
4251
Jiang Liu6b197242014-11-09 22:47:58 +08004252int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4253{
4254 struct acpi_dmar_atsr *atsr;
4255 struct dmar_atsr_unit *atsru;
4256
4257 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4258 atsru = dmar_find_atsr(atsr);
4259 if (atsru) {
4260 list_del_rcu(&atsru->list);
4261 synchronize_rcu();
4262 intel_iommu_free_atsr(atsru);
4263 }
4264
4265 return 0;
4266}
4267
4268int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4269{
4270 int i;
4271 struct device *dev;
4272 struct acpi_dmar_atsr *atsr;
4273 struct dmar_atsr_unit *atsru;
4274
4275 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4276 atsru = dmar_find_atsr(atsr);
4277 if (!atsru)
4278 return 0;
4279
Linus Torvalds194dc872016-07-27 20:03:31 -07004280 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004281 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4282 i, dev)
4283 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004284 }
Jiang Liu6b197242014-11-09 22:47:58 +08004285
4286 return 0;
4287}
4288
Jiang Liuffebeb42014-11-09 22:48:02 +08004289static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4290{
4291 int sp, ret = 0;
4292 struct intel_iommu *iommu = dmaru->iommu;
4293
4294 if (g_iommus[iommu->seq_id])
4295 return 0;
4296
4297 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004298 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004299 iommu->name);
4300 return -ENXIO;
4301 }
4302 if (!ecap_sc_support(iommu->ecap) &&
4303 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004304 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004305 iommu->name);
4306 return -ENXIO;
4307 }
4308 sp = domain_update_iommu_superpage(iommu) - 1;
4309 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004310 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004311 iommu->name);
4312 return -ENXIO;
4313 }
4314
4315 /*
4316 * Disable translation if already enabled prior to OS handover.
4317 */
4318 if (iommu->gcmd & DMA_GCMD_TE)
4319 iommu_disable_translation(iommu);
4320
4321 g_iommus[iommu->seq_id] = iommu;
4322 ret = iommu_init_domains(iommu);
4323 if (ret == 0)
4324 ret = iommu_alloc_root_entry(iommu);
4325 if (ret)
4326 goto out;
4327
David Woodhouse8a94ade2015-03-24 14:54:56 +00004328#ifdef CONFIG_INTEL_IOMMU_SVM
4329 if (pasid_enabled(iommu))
4330 intel_svm_alloc_pasid_tables(iommu);
4331#endif
4332
Jiang Liuffebeb42014-11-09 22:48:02 +08004333 if (dmaru->ignored) {
4334 /*
4335 * we always have to disable PMRs or DMA may fail on this device
4336 */
4337 if (force_on)
4338 iommu_disable_protect_mem_regions(iommu);
4339 return 0;
4340 }
4341
4342 intel_iommu_init_qi(iommu);
4343 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004344
4345#ifdef CONFIG_INTEL_IOMMU_SVM
4346 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4347 ret = intel_svm_enable_prq(iommu);
4348 if (ret)
4349 goto disable_iommu;
4350 }
4351#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004352 ret = dmar_set_interrupt(iommu);
4353 if (ret)
4354 goto disable_iommu;
4355
4356 iommu_set_root_entry(iommu);
4357 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4358 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4359 iommu_enable_translation(iommu);
4360
Jiang Liuffebeb42014-11-09 22:48:02 +08004361 iommu_disable_protect_mem_regions(iommu);
4362 return 0;
4363
4364disable_iommu:
4365 disable_dmar_iommu(iommu);
4366out:
4367 free_dmar_iommu(iommu);
4368 return ret;
4369}
4370
Jiang Liu6b197242014-11-09 22:47:58 +08004371int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4372{
Jiang Liuffebeb42014-11-09 22:48:02 +08004373 int ret = 0;
4374 struct intel_iommu *iommu = dmaru->iommu;
4375
4376 if (!intel_iommu_enabled)
4377 return 0;
4378 if (iommu == NULL)
4379 return -EINVAL;
4380
4381 if (insert) {
4382 ret = intel_iommu_add(dmaru);
4383 } else {
4384 disable_dmar_iommu(iommu);
4385 free_dmar_iommu(iommu);
4386 }
4387
4388 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004389}
4390
Jiang Liu9bdc5312014-01-06 14:18:27 +08004391static void intel_iommu_free_dmars(void)
4392{
4393 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4394 struct dmar_atsr_unit *atsru, *atsr_n;
4395
4396 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4397 list_del(&rmrru->list);
4398 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004399 kfree(rmrru->resv);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004400 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004401 }
4402
Jiang Liu9bdc5312014-01-06 14:18:27 +08004403 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4404 list_del(&atsru->list);
4405 intel_iommu_free_atsr(atsru);
4406 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004407}
4408
4409int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4410{
Jiang Liub683b232014-02-19 14:07:32 +08004411 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004412 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004413 struct pci_dev *bridge = NULL;
4414 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004415 struct acpi_dmar_atsr *atsr;
4416 struct dmar_atsr_unit *atsru;
4417
4418 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004419 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004420 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004421 /* If it's an integrated device, allow ATS */
4422 if (!bridge)
4423 return 1;
4424 /* Connected via non-PCIe: no ATS */
4425 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004426 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004427 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004428 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004429 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004430 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004431 }
4432
Jiang Liu0e242612014-02-19 14:07:34 +08004433 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004434 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4435 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4436 if (atsr->segment != pci_domain_nr(dev->bus))
4437 continue;
4438
Jiang Liub683b232014-02-19 14:07:32 +08004439 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004440 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004441 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004442
4443 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004444 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004445 }
Jiang Liub683b232014-02-19 14:07:32 +08004446 ret = 0;
4447out:
Jiang Liu0e242612014-02-19 14:07:34 +08004448 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004449
Jiang Liub683b232014-02-19 14:07:32 +08004450 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004451}
4452
Jiang Liu59ce0512014-02-19 14:07:35 +08004453int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4454{
4455 int ret = 0;
4456 struct dmar_rmrr_unit *rmrru;
4457 struct dmar_atsr_unit *atsru;
4458 struct acpi_dmar_atsr *atsr;
4459 struct acpi_dmar_reserved_memory *rmrr;
4460
Thomas Gleixnerb608fe32017-05-16 20:42:41 +02004461 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
Jiang Liu59ce0512014-02-19 14:07:35 +08004462 return 0;
4463
4464 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4465 rmrr = container_of(rmrru->hdr,
4466 struct acpi_dmar_reserved_memory, header);
4467 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4468 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4469 ((void *)rmrr) + rmrr->header.length,
4470 rmrr->segment, rmrru->devices,
4471 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004472 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004473 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004474 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004475 dmar_remove_dev_scope(info, rmrr->segment,
4476 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004477 }
4478 }
4479
4480 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4481 if (atsru->include_all)
4482 continue;
4483
4484 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4485 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4486 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4487 (void *)atsr + atsr->header.length,
4488 atsr->segment, atsru->devices,
4489 atsru->devices_cnt);
4490 if (ret > 0)
4491 break;
4492 else if(ret < 0)
4493 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004494 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004495 if (dmar_remove_dev_scope(info, atsr->segment,
4496 atsru->devices, atsru->devices_cnt))
4497 break;
4498 }
4499 }
4500
4501 return 0;
4502}
4503
Fenghua Yu99dcade2009-11-11 07:23:06 -08004504/*
4505 * Here we only respond to action of unbound device from driver.
4506 *
4507 * Added device is not attached to its DMAR domain here yet. That will happen
4508 * when mapping the device to iova.
4509 */
4510static int device_notifier(struct notifier_block *nb,
4511 unsigned long action, void *data)
4512{
4513 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004514 struct dmar_domain *domain;
4515
David Woodhouse3d891942014-03-06 15:59:26 +00004516 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004517 return 0;
4518
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004519 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004520 return 0;
4521
David Woodhouse1525a292014-03-06 16:19:30 +00004522 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004523 if (!domain)
4524 return 0;
4525
Joerg Roedele6de0f82015-07-22 16:30:36 +02004526 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004527 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004528 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004529
Fenghua Yu99dcade2009-11-11 07:23:06 -08004530 return 0;
4531}
4532
4533static struct notifier_block device_nb = {
4534 .notifier_call = device_notifier,
4535};
4536
Jiang Liu75f05562014-02-19 14:07:37 +08004537static int intel_iommu_memory_notifier(struct notifier_block *nb,
4538 unsigned long val, void *v)
4539{
4540 struct memory_notify *mhp = v;
4541 unsigned long long start, end;
4542 unsigned long start_vpfn, last_vpfn;
4543
4544 switch (val) {
4545 case MEM_GOING_ONLINE:
4546 start = mhp->start_pfn << PAGE_SHIFT;
4547 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4548 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004549 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004550 start, end);
4551 return NOTIFY_BAD;
4552 }
4553 break;
4554
4555 case MEM_OFFLINE:
4556 case MEM_CANCEL_ONLINE:
4557 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4558 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4559 while (start_vpfn <= last_vpfn) {
4560 struct iova *iova;
4561 struct dmar_drhd_unit *drhd;
4562 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004563 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004564
4565 iova = find_iova(&si_domain->iovad, start_vpfn);
4566 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004567 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004568 start_vpfn);
4569 break;
4570 }
4571
4572 iova = split_and_remove_iova(&si_domain->iovad, iova,
4573 start_vpfn, last_vpfn);
4574 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004575 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004576 start_vpfn, last_vpfn);
4577 return NOTIFY_BAD;
4578 }
4579
David Woodhouseea8ea462014-03-05 17:09:32 +00004580 freelist = domain_unmap(si_domain, iova->pfn_lo,
4581 iova->pfn_hi);
4582
Jiang Liu75f05562014-02-19 14:07:37 +08004583 rcu_read_lock();
4584 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004585 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004586 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004587 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004588 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004589 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004590
4591 start_vpfn = iova->pfn_hi + 1;
4592 free_iova_mem(iova);
4593 }
4594 break;
4595 }
4596
4597 return NOTIFY_OK;
4598}
4599
4600static struct notifier_block intel_iommu_memory_nb = {
4601 .notifier_call = intel_iommu_memory_notifier,
4602 .priority = 0
4603};
4604
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004605static void free_all_cpu_cached_iovas(unsigned int cpu)
4606{
4607 int i;
4608
4609 for (i = 0; i < g_num_of_iommus; i++) {
4610 struct intel_iommu *iommu = g_iommus[i];
4611 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004612 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004613
4614 if (!iommu)
4615 continue;
4616
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004617 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004618 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004619
4620 if (!domain)
4621 continue;
4622 free_cpu_cached_iovas(cpu, &domain->iovad);
4623 }
4624 }
4625}
4626
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004627static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004628{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004629 free_all_cpu_cached_iovas(cpu);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004630 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004631}
4632
Joerg Roedel161b28a2017-03-28 17:04:52 +02004633static void intel_disable_iommus(void)
4634{
4635 struct intel_iommu *iommu = NULL;
4636 struct dmar_drhd_unit *drhd;
4637
4638 for_each_iommu(iommu, drhd)
4639 iommu_disable_translation(iommu);
4640}
4641
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004642static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4643{
Joerg Roedel2926a2aa2017-08-14 17:19:26 +02004644 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4645
4646 return container_of(iommu_dev, struct intel_iommu, iommu);
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004647}
4648
Alex Williamsona5459cf2014-06-12 16:12:31 -06004649static ssize_t intel_iommu_show_version(struct device *dev,
4650 struct device_attribute *attr,
4651 char *buf)
4652{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004653 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004654 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4655 return sprintf(buf, "%d:%d\n",
4656 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4657}
4658static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4659
4660static ssize_t intel_iommu_show_address(struct device *dev,
4661 struct device_attribute *attr,
4662 char *buf)
4663{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004664 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004665 return sprintf(buf, "%llx\n", iommu->reg_phys);
4666}
4667static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4668
4669static ssize_t intel_iommu_show_cap(struct device *dev,
4670 struct device_attribute *attr,
4671 char *buf)
4672{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004673 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004674 return sprintf(buf, "%llx\n", iommu->cap);
4675}
4676static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4677
4678static ssize_t intel_iommu_show_ecap(struct device *dev,
4679 struct device_attribute *attr,
4680 char *buf)
4681{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004682 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004683 return sprintf(buf, "%llx\n", iommu->ecap);
4684}
4685static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4686
Alex Williamson2238c082015-07-14 15:24:53 -06004687static ssize_t intel_iommu_show_ndoms(struct device *dev,
4688 struct device_attribute *attr,
4689 char *buf)
4690{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004691 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004692 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4693}
4694static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4695
4696static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4697 struct device_attribute *attr,
4698 char *buf)
4699{
Joerg Roedela7fdb6e2017-02-28 13:57:18 +01004700 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
Alex Williamson2238c082015-07-14 15:24:53 -06004701 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4702 cap_ndoms(iommu->cap)));
4703}
4704static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4705
Alex Williamsona5459cf2014-06-12 16:12:31 -06004706static struct attribute *intel_iommu_attrs[] = {
4707 &dev_attr_version.attr,
4708 &dev_attr_address.attr,
4709 &dev_attr_cap.attr,
4710 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004711 &dev_attr_domains_supported.attr,
4712 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004713 NULL,
4714};
4715
4716static struct attribute_group intel_iommu_group = {
4717 .name = "intel-iommu",
4718 .attrs = intel_iommu_attrs,
4719};
4720
4721const struct attribute_group *intel_iommu_groups[] = {
4722 &intel_iommu_group,
4723 NULL,
4724};
4725
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004726int __init intel_iommu_init(void)
4727{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004728 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004729 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004730 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004731
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004732 /* VT-d is required for a TXT/tboot launch, so enforce that */
4733 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004734
Jiang Liu3a5670e2014-02-19 14:07:33 +08004735 if (iommu_init_mempool()) {
4736 if (force_on)
4737 panic("tboot: Failed to initialize iommu memory\n");
4738 return -ENOMEM;
4739 }
4740
4741 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004742 if (dmar_table_init()) {
4743 if (force_on)
4744 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004745 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004746 }
4747
Suresh Siddhac2c72862011-08-23 17:05:19 -07004748 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004749 if (force_on)
4750 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004751 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004752 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004753
Joerg Roedelec154bf2017-10-06 15:00:53 +02004754 up_write(&dmar_global_lock);
4755
4756 /*
4757 * The bus notifier takes the dmar_global_lock, so lockdep will
4758 * complain later when we register it under the lock.
4759 */
4760 dmar_register_bus_notifier();
4761
4762 down_write(&dmar_global_lock);
4763
Joerg Roedel161b28a2017-03-28 17:04:52 +02004764 if (no_iommu || dmar_disabled) {
4765 /*
Shaohua Libfd20f12017-04-26 09:18:35 -07004766 * We exit the function here to ensure IOMMU's remapping and
4767 * mempool aren't setup, which means that the IOMMU's PMRs
4768 * won't be disabled via the call to init_dmars(). So disable
4769 * it explicitly here. The PMRs were setup by tboot prior to
4770 * calling SENTER, but the kernel is expected to reset/tear
4771 * down the PMRs.
4772 */
4773 if (intel_iommu_tboot_noforce) {
4774 for_each_iommu(iommu, drhd)
4775 iommu_disable_protect_mem_regions(iommu);
4776 }
4777
4778 /*
Joerg Roedel161b28a2017-03-28 17:04:52 +02004779 * Make sure the IOMMUs are switched off, even when we
4780 * boot into a kexec kernel and the previous kernel left
4781 * them enabled
4782 */
4783 intel_disable_iommus();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004784 goto out_free_dmar;
Joerg Roedel161b28a2017-03-28 17:04:52 +02004785 }
Suresh Siddha2ae21012008-07-10 11:16:43 -07004786
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004787 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004788 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004789
4790 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004791 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004792
Joseph Cihula51a63e62011-03-21 11:04:24 -07004793 if (dmar_init_reserved_ranges()) {
4794 if (force_on)
4795 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004796 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004797 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004798
4799 init_no_remapping_devices();
4800
Joseph Cihulab7792602011-05-03 00:08:37 -07004801 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004802 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004803 if (force_on)
4804 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004805 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004806 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004807 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004808 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004809 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004810
Christoph Hellwig4fac8072017-12-24 13:57:08 +01004811#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004812 swiotlb = 0;
4813#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004814 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004815
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004816 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004817
Joerg Roedel39ab9552017-02-01 16:56:46 +01004818 for_each_active_iommu(iommu, drhd) {
4819 iommu_device_sysfs_add(&iommu->iommu, NULL,
4820 intel_iommu_groups,
4821 "%s", iommu->name);
4822 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4823 iommu_device_register(&iommu->iommu);
4824 }
Alex Williamsona5459cf2014-06-12 16:12:31 -06004825
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004826 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004827 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004828 if (si_domain && !hw_pass_through)
4829 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004830 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4831 intel_iommu_cpu_dead);
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004832 intel_iommu_enabled = 1;
4833
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004834 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004835
4836out_free_reserved_range:
4837 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004838out_free_dmar:
4839 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004840 up_write(&dmar_global_lock);
4841 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004842 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004843}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004844
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004845static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004846{
4847 struct intel_iommu *iommu = opaque;
4848
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004849 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004850 return 0;
4851}
4852
4853/*
4854 * NB - intel-iommu lacks any sort of reference counting for the users of
4855 * dependent devices. If multiple endpoints have intersecting dependent
4856 * devices, unbinding the driver from any one of them will possibly leave
4857 * the others unable to operate.
4858 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004859static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004860{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004861 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004862 return;
4863
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004864 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004865}
4866
Joerg Roedel127c7612015-07-23 17:44:46 +02004867static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004868{
Weidong Hanc7151a82008-12-08 22:51:37 +08004869 struct intel_iommu *iommu;
4870 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004871
Joerg Roedel55d94042015-07-22 16:50:40 +02004872 assert_spin_locked(&device_domain_lock);
4873
Joerg Roedelb608ac32015-07-21 18:19:08 +02004874 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004875 return;
4876
Joerg Roedel127c7612015-07-23 17:44:46 +02004877 iommu = info->iommu;
4878
4879 if (info->dev) {
4880 iommu_disable_dev_iotlb(info);
4881 domain_context_clear(iommu, info->dev);
4882 }
4883
Joerg Roedelb608ac32015-07-21 18:19:08 +02004884 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004885
Joerg Roedeld160aca2015-07-22 11:52:53 +02004886 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004887 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004888 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004889
4890 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004891}
4892
Joerg Roedel55d94042015-07-22 16:50:40 +02004893static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4894 struct device *dev)
4895{
Joerg Roedel127c7612015-07-23 17:44:46 +02004896 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004897 unsigned long flags;
4898
Weidong Hanc7151a82008-12-08 22:51:37 +08004899 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004900 info = dev->archdata.iommu;
4901 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004902 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004903}
4904
4905static int md_domain_init(struct dmar_domain *domain, int guest_width)
4906{
4907 int adjust_width;
4908
Zhen Leiaa3ac942017-09-21 16:52:45 +01004909 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004910 domain_reserve_special_ranges(domain);
4911
4912 /* calculate AGAW */
4913 domain->gaw = guest_width;
4914 adjust_width = guestwidth_to_adjustwidth(guest_width);
4915 domain->agaw = width_to_agaw(adjust_width);
4916
Weidong Han5e98c4b2008-12-08 23:03:27 +08004917 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004918 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004919 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004920 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004921
4922 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004923 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004924 if (!domain->pgd)
4925 return -ENOMEM;
4926 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4927 return 0;
4928}
4929
Joerg Roedel00a77de2015-03-26 13:43:08 +01004930static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004931{
Joerg Roedel5d450802008-12-03 14:52:32 +01004932 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004933 struct iommu_domain *domain;
4934
4935 if (type != IOMMU_DOMAIN_UNMANAGED)
4936 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004937
Jiang Liuab8dfe22014-07-11 14:19:27 +08004938 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004939 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004940 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004941 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004942 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004943 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004944 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004945 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004946 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004947 }
Allen Kay8140a952011-10-14 12:32:17 -07004948 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004949
Joerg Roedel00a77de2015-03-26 13:43:08 +01004950 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004951 domain->geometry.aperture_start = 0;
4952 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4953 domain->geometry.force_aperture = true;
4954
Joerg Roedel00a77de2015-03-26 13:43:08 +01004955 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004956}
Kay, Allen M38717942008-09-09 18:37:29 +03004957
Joerg Roedel00a77de2015-03-26 13:43:08 +01004958static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004959{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004960 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004961}
Kay, Allen M38717942008-09-09 18:37:29 +03004962
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004963static int intel_iommu_attach_device(struct iommu_domain *domain,
4964 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004965{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004966 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004967 struct intel_iommu *iommu;
4968 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004969 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004970
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004971 if (device_is_rmrr_locked(dev)) {
4972 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4973 return -EPERM;
4974 }
4975
David Woodhouse7207d8f2014-03-09 16:31:06 -07004976 /* normally dev is not mapped */
4977 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004978 struct dmar_domain *old_domain;
4979
David Woodhouse1525a292014-03-06 16:19:30 +00004980 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004981 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02004982 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02004983 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004984 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01004985
4986 if (!domain_type_is_vm_or_si(old_domain) &&
4987 list_empty(&old_domain->devices))
4988 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004989 }
4990 }
4991
David Woodhouse156baca2014-03-09 14:00:57 -07004992 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004993 if (!iommu)
4994 return -ENODEV;
4995
4996 /* check if this iommu agaw is sufficient for max mapped address */
4997 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004998 if (addr_width > cap_mgaw(iommu->cap))
4999 addr_width = cap_mgaw(iommu->cap);
5000
5001 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005002 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005003 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01005004 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005005 return -EFAULT;
5006 }
Tom Lyona99c47a2010-05-17 08:20:45 +01005007 dmar_domain->gaw = addr_width;
5008
5009 /*
5010 * Knock out extra levels of page tables if necessary
5011 */
5012 while (iommu->agaw < dmar_domain->agaw) {
5013 struct dma_pte *pte;
5014
5015 pte = dmar_domain->pgd;
5016 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08005017 dmar_domain->pgd = (struct dma_pte *)
5018 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01005019 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01005020 }
5021 dmar_domain->agaw--;
5022 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005023
Joerg Roedel28ccce02015-07-21 14:45:31 +02005024 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005025}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005026
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005027static void intel_iommu_detach_device(struct iommu_domain *domain,
5028 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005029{
Joerg Roedele6de0f82015-07-22 16:30:36 +02005030 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005031}
Kay, Allen M38717942008-09-09 18:37:29 +03005032
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005033static int intel_iommu_map(struct iommu_domain *domain,
5034 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005035 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03005036{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005037 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005038 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005039 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005040 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005041
Joerg Roedeldde57a22008-12-03 15:04:09 +01005042 if (iommu_prot & IOMMU_READ)
5043 prot |= DMA_PTE_READ;
5044 if (iommu_prot & IOMMU_WRITE)
5045 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005046 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5047 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005048
David Woodhouse163cc522009-06-28 00:51:17 +01005049 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005050 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005051 u64 end;
5052
5053 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005054 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005055 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005056 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005057 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005058 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005059 return -EFAULT;
5060 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005061 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005062 }
David Woodhousead051222009-06-28 14:22:28 +01005063 /* Round up size to next multiple of PAGE_SIZE, if it and
5064 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005065 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005066 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5067 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005068 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005069}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005070
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005071static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00005072 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005073{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005074 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005075 struct page *freelist = NULL;
5076 struct intel_iommu *iommu;
5077 unsigned long start_pfn, last_pfn;
5078 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005079 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005080
David Woodhouse5cf0a762014-03-19 16:07:49 +00005081 /* Cope with horrid API which requires us to unmap more than the
5082 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005083 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005084
5085 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5086 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5087
David Woodhouseea8ea462014-03-05 17:09:32 +00005088 start_pfn = iova >> VTD_PAGE_SHIFT;
5089 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5090
5091 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5092
5093 npages = last_pfn - start_pfn + 1;
5094
Joerg Roedel29a27712015-07-21 17:17:12 +02005095 for_each_domain_iommu(iommu_id, dmar_domain) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02005096 iommu = g_iommus[iommu_id];
David Woodhouseea8ea462014-03-05 17:09:32 +00005097
Joerg Roedel42e8c182015-07-21 15:50:02 +02005098 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5099 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005100 }
5101
5102 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005103
David Woodhouse163cc522009-06-28 00:51:17 +01005104 if (dmar_domain->max_addr == iova + size)
5105 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005106
David Woodhouse5cf0a762014-03-19 16:07:49 +00005107 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005108}
Kay, Allen M38717942008-09-09 18:37:29 +03005109
Joerg Roedeld14d6572008-12-03 15:06:57 +01005110static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305111 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005112{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005113 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005114 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005115 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005116 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005117
David Woodhouse5cf0a762014-03-19 16:07:49 +00005118 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03005119 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005120 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03005121
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005122 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005123}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005124
Joerg Roedel5d587b82014-09-05 10:50:45 +02005125static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005126{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005127 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005128 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005129 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005130 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005131
Joerg Roedel5d587b82014-09-05 10:50:45 +02005132 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005133}
5134
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005135static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005136{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005137 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005138 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07005139 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005140
Alex Williamsona5459cf2014-06-12 16:12:31 -06005141 iommu = device_to_iommu(dev, &bus, &devfn);
5142 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005143 return -ENODEV;
5144
Joerg Roedele3d10af2017-02-01 17:23:22 +01005145 iommu_device_link(&iommu->iommu, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005146
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005147 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06005148
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005149 if (IS_ERR(group))
5150 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005151
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005152 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005153 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005154}
5155
5156static void intel_iommu_remove_device(struct device *dev)
5157{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005158 struct intel_iommu *iommu;
5159 u8 bus, devfn;
5160
5161 iommu = device_to_iommu(dev, &bus, &devfn);
5162 if (!iommu)
5163 return;
5164
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005165 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005166
Joerg Roedele3d10af2017-02-01 17:23:22 +01005167 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005168}
5169
Eric Auger0659b8d2017-01-19 20:57:53 +00005170static void intel_iommu_get_resv_regions(struct device *device,
5171 struct list_head *head)
5172{
5173 struct iommu_resv_region *reg;
5174 struct dmar_rmrr_unit *rmrr;
5175 struct device *i_dev;
5176 int i;
5177
5178 rcu_read_lock();
5179 for_each_rmrr_units(rmrr) {
5180 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5181 i, i_dev) {
5182 if (i_dev != device)
5183 continue;
5184
5185 list_add_tail(&rmrr->resv->list, head);
5186 }
5187 }
5188 rcu_read_unlock();
5189
5190 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5191 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00005192 0, IOMMU_RESV_MSI);
Eric Auger0659b8d2017-01-19 20:57:53 +00005193 if (!reg)
5194 return;
5195 list_add_tail(&reg->list, head);
5196}
5197
5198static void intel_iommu_put_resv_regions(struct device *dev,
5199 struct list_head *head)
5200{
5201 struct iommu_resv_region *entry, *next;
5202
5203 list_for_each_entry_safe(entry, next, head, list) {
5204 if (entry->type == IOMMU_RESV_RESERVED)
5205 kfree(entry);
5206 }
Kay, Allen M38717942008-09-09 18:37:29 +03005207}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005208
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005209#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan65ca7f52016-12-06 10:14:23 -08005210#define MAX_NR_PASID_BITS (20)
5211static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5212{
5213 /*
5214 * Convert ecap_pss to extend context entry pts encoding, also
5215 * respect the soft pasid_max value set by the iommu.
5216 * - number of PASID bits = ecap_pss + 1
5217 * - number of PASID table entries = 2^(pts + 5)
5218 * Therefore, pts = ecap_pss - 4
5219 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5220 */
5221 if (ecap_pss(iommu->ecap) < 5)
5222 return 0;
5223
5224 /* pasid_max is encoded as actual number of entries not the bits */
5225 return find_first_bit((unsigned long *)&iommu->pasid_max,
5226 MAX_NR_PASID_BITS) - 5;
5227}
5228
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005229int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5230{
5231 struct device_domain_info *info;
5232 struct context_entry *context;
5233 struct dmar_domain *domain;
5234 unsigned long flags;
5235 u64 ctx_lo;
5236 int ret;
5237
5238 domain = get_valid_domain_for_dev(sdev->dev);
5239 if (!domain)
5240 return -EINVAL;
5241
5242 spin_lock_irqsave(&device_domain_lock, flags);
5243 spin_lock(&iommu->lock);
5244
5245 ret = -EINVAL;
5246 info = sdev->dev->archdata.iommu;
5247 if (!info || !info->pasid_supported)
5248 goto out;
5249
5250 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5251 if (WARN_ON(!context))
5252 goto out;
5253
5254 ctx_lo = context[0].lo;
5255
5256 sdev->did = domain->iommu_did[iommu->seq_id];
5257 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5258
5259 if (!(ctx_lo & CONTEXT_PASIDE)) {
Ashok Raj11b93eb2017-08-08 13:29:28 -07005260 if (iommu->pasid_state_table)
5261 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
Jacob Pan65ca7f52016-12-06 10:14:23 -08005262 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5263 intel_iommu_get_pts(iommu);
5264
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005265 wmb();
5266 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5267 * extended to permit requests-with-PASID if the PASIDE bit
5268 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5269 * however, the PASIDE bit is ignored and requests-with-PASID
5270 * are unconditionally blocked. Which makes less sense.
5271 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5272 * "guest mode" translation types depending on whether ATS
5273 * is available or not. Annoyingly, we can't use the new
5274 * modes *unless* PASIDE is set. */
5275 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5276 ctx_lo &= ~CONTEXT_TT_MASK;
5277 if (info->ats_supported)
5278 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5279 else
5280 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5281 }
5282 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005283 if (iommu->pasid_state_table)
5284 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005285 if (info->pri_supported)
5286 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005287 context[0].lo = ctx_lo;
5288 wmb();
5289 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5290 DMA_CCMD_MASK_NOBIT,
5291 DMA_CCMD_DEVICE_INVL);
5292 }
5293
5294 /* Enable PASID support in the device, if it wasn't already */
5295 if (!info->pasid_enabled)
5296 iommu_enable_dev_iotlb(info);
5297
5298 if (info->ats_enabled) {
5299 sdev->dev_iotlb = 1;
5300 sdev->qdep = info->ats_qdep;
5301 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5302 sdev->qdep = 0;
5303 }
5304 ret = 0;
5305
5306 out:
5307 spin_unlock(&iommu->lock);
5308 spin_unlock_irqrestore(&device_domain_lock, flags);
5309
5310 return ret;
5311}
5312
5313struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5314{
5315 struct intel_iommu *iommu;
5316 u8 bus, devfn;
5317
5318 if (iommu_dummy(dev)) {
5319 dev_warn(dev,
5320 "No IOMMU translation for device; cannot enable SVM\n");
5321 return NULL;
5322 }
5323
5324 iommu = device_to_iommu(dev, &bus, &devfn);
5325 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005326 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005327 return NULL;
5328 }
5329
5330 if (!iommu->pasid_table) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005331 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005332 return NULL;
5333 }
5334
5335 return iommu;
5336}
5337#endif /* CONFIG_INTEL_IOMMU_SVM */
5338
Joerg Roedelb0119e82017-02-01 13:23:08 +01005339const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00005340 .capable = intel_iommu_capable,
5341 .domain_alloc = intel_iommu_domain_alloc,
5342 .domain_free = intel_iommu_domain_free,
5343 .attach_dev = intel_iommu_attach_device,
5344 .detach_dev = intel_iommu_detach_device,
5345 .map = intel_iommu_map,
5346 .unmap = intel_iommu_unmap,
5347 .map_sg = default_iommu_map_sg,
5348 .iova_to_phys = intel_iommu_iova_to_phys,
5349 .add_device = intel_iommu_add_device,
5350 .remove_device = intel_iommu_remove_device,
5351 .get_resv_regions = intel_iommu_get_resv_regions,
5352 .put_resv_regions = intel_iommu_put_resv_regions,
5353 .device_group = pci_device_group,
5354 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005355};
David Woodhouse9af88142009-02-13 23:18:03 +00005356
Daniel Vetter94526182013-01-20 23:50:13 +01005357static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5358{
5359 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005360 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005361 dmar_map_gfx = 0;
5362}
5363
5364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5366DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5367DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5368DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5369DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5370DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5371
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005372static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005373{
5374 /*
5375 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005376 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005377 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005378 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005379 rwbf_quirk = 1;
5380}
5381
5382DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005383DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5384DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5385DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5386DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5387DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5388DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005389
Adam Jacksoneecfd572010-08-25 21:17:34 +01005390#define GGC 0x52
5391#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5392#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5393#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5394#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5395#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5396#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5397#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5398#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5399
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005400static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005401{
5402 unsigned short ggc;
5403
Adam Jacksoneecfd572010-08-25 21:17:34 +01005404 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005405 return;
5406
Adam Jacksoneecfd572010-08-25 21:17:34 +01005407 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005408 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005409 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005410 } else if (dmar_map_gfx) {
5411 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005412 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005413 intel_iommu_strict = 1;
5414 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005415}
5416DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5417DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5418DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5419DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5420
David Woodhousee0fc7e02009-09-30 09:12:17 -07005421/* On Tylersburg chipsets, some BIOSes have been known to enable the
5422 ISOCH DMAR unit for the Azalia sound device, but not give it any
5423 TLB entries, which causes it to deadlock. Check for that. We do
5424 this in a function called from init_dmars(), instead of in a PCI
5425 quirk, because we don't want to print the obnoxious "BIOS broken"
5426 message if VT-d is actually disabled.
5427*/
5428static void __init check_tylersburg_isoch(void)
5429{
5430 struct pci_dev *pdev;
5431 uint32_t vtisochctrl;
5432
5433 /* If there's no Azalia in the system anyway, forget it. */
5434 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5435 if (!pdev)
5436 return;
5437 pci_dev_put(pdev);
5438
5439 /* System Management Registers. Might be hidden, in which case
5440 we can't do the sanity check. But that's OK, because the
5441 known-broken BIOSes _don't_ actually hide it, so far. */
5442 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5443 if (!pdev)
5444 return;
5445
5446 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5447 pci_dev_put(pdev);
5448 return;
5449 }
5450
5451 pci_dev_put(pdev);
5452
5453 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5454 if (vtisochctrl & 1)
5455 return;
5456
5457 /* Drop all bits other than the number of TLB entries */
5458 vtisochctrl &= 0x1c;
5459
5460 /* If we have the recommended number of TLB entries (16), fine. */
5461 if (vtisochctrl == 0x10)
5462 return;
5463
5464 /* Zero TLB entries? You get to ride the short bus to school. */
5465 if (!vtisochctrl) {
5466 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5467 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5468 dmi_get_system_info(DMI_BIOS_VENDOR),
5469 dmi_get_system_info(DMI_BIOS_VERSION),
5470 dmi_get_system_info(DMI_PRODUCT_VERSION));
5471 iommu_identity_mapping |= IDENTMAP_AZALIA;
5472 return;
5473 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005474
5475 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005476 vtisochctrl);
5477}