blob: bce59a53c2a6db08fced9d0366516ea58566976b [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
Omer Pelegaa473242016-04-20 11:33:02 +030036#include <linux/cpu.h>
mark gross5e0d2a62008-03-04 15:22:08 -080037#include <linux/timer.h>
Dan Williamsdfddb9692015-10-09 18:16:46 -040038#include <linux/io.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010040#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030041#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010042#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070043#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100044#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020045#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080046#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070047#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020048#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070049#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090051#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
Joerg Roedel078e1ee2012-09-26 12:44:43 +020053#include "irq_remapping.h"
54
Fenghua Yu5b6985c2008-10-16 18:02:32 -070055#define ROOT_SIZE VTD_PAGE_SIZE
56#define CONTEXT_SIZE VTD_PAGE_SIZE
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000059#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070061#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070062
63#define IOAPIC_RANGE_START (0xfee00000)
64#define IOAPIC_RANGE_END (0xfeefffff)
65#define IOVA_START_ADDR (0x1000)
66
67#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080070#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070071
David Woodhouse2ebe3152009-09-19 07:34:04 -070072#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070080
Robin Murphy1b722502015-01-12 17:51:15 +000081/* IO virtual address start page frame number */
82#define IOVA_START_PFN (1)
83
Mark McLoughlinf27be032008-11-20 15:49:43 +000084#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070085#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070086#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080087
Andrew Mortondf08cdc2010-09-22 13:05:11 -070088/* page table handling */
89#define LEVEL_STRIDE (9)
90#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020092/*
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
96 * that we support.
97 *
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
101 *
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
104 *
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
107 */
108#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
109
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700110static inline int agaw_to_level(int agaw)
111{
112 return agaw + 2;
113}
114
115static inline int agaw_to_width(int agaw)
116{
Jiang Liu5c645b32014-01-06 14:18:12 +0800117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700118}
119
120static inline int width_to_agaw(int width)
121{
Jiang Liu5c645b32014-01-06 14:18:12 +0800122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700123}
124
125static inline unsigned int level_to_offset_bits(int level)
126{
127 return (level - 1) * LEVEL_STRIDE;
128}
129
130static inline int pfn_level_offset(unsigned long pfn, int level)
131{
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133}
134
135static inline unsigned long level_mask(int level)
136{
137 return -1UL << level_to_offset_bits(level);
138}
139
140static inline unsigned long level_size(int level)
141{
142 return 1UL << level_to_offset_bits(level);
143}
144
145static inline unsigned long align_to_level(unsigned long pfn, int level)
146{
147 return (pfn + level_size(level) - 1) & level_mask(level);
148}
David Woodhousefd18de52009-05-10 23:57:41 +0100149
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100150static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
151{
Jiang Liu5c645b32014-01-06 14:18:12 +0800152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100153}
154
David Woodhousedd4e8312009-06-27 16:21:20 +0100155/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
158{
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160}
161
162static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
163{
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
165}
166static inline unsigned long page_to_dma_pfn(struct page *pg)
167{
168 return mm_to_dma_pfn(page_to_pfn(pg));
169}
170static inline unsigned long virt_to_dma_pfn(void *p)
171{
172 return page_to_dma_pfn(virt_to_page(p));
173}
174
Weidong Hand9630fe2008-12-08 11:06:32 +0800175/* global iommu list, set NULL for ignored DMAR units */
176static struct intel_iommu **g_iommus;
177
David Woodhousee0fc7e02009-09-30 09:12:17 -0700178static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000179static int rwbf_quirk;
180
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000181/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
184 */
185static int force_on = 0;
186
187/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000188 * 0: Present
189 * 1-11: Reserved
190 * 12-63: Context Ptr (12 - (haw-1))
191 * 64-127: Reserved
192 */
193struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000194 u64 lo;
195 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196};
197#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000198
Joerg Roedel091d42e2015-06-12 11:56:10 +0200199/*
200 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201 * if marked present.
202 */
203static phys_addr_t root_entry_lctp(struct root_entry *re)
204{
205 if (!(re->lo & 1))
206 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000207
Joerg Roedel091d42e2015-06-12 11:56:10 +0200208 return re->lo & VTD_PAGE_MASK;
209}
210
211/*
212 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213 * if marked present.
214 */
215static phys_addr_t root_entry_uctp(struct root_entry *re)
216{
217 if (!(re->hi & 1))
218 return 0;
219
220 return re->hi & VTD_PAGE_MASK;
221}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000222/*
223 * low 64 bits:
224 * 0: present
225 * 1: fault processing disable
226 * 2-3: translation type
227 * 12-63: address space root
228 * high 64 bits:
229 * 0-2: address width
230 * 3-6: aval
231 * 8-23: domain id
232 */
233struct context_entry {
234 u64 lo;
235 u64 hi;
236};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000237
Joerg Roedelcf484d02015-06-12 12:21:46 +0200238static inline void context_clear_pasid_enable(struct context_entry *context)
239{
240 context->lo &= ~(1ULL << 11);
241}
242
243static inline bool context_pasid_enabled(struct context_entry *context)
244{
245 return !!(context->lo & (1ULL << 11));
246}
247
248static inline void context_set_copied(struct context_entry *context)
249{
250 context->hi |= (1ull << 3);
251}
252
253static inline bool context_copied(struct context_entry *context)
254{
255 return !!(context->hi & (1ULL << 3));
256}
257
258static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000259{
260 return (context->lo & 1);
261}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200262
263static inline bool context_present(struct context_entry *context)
264{
265 return context_pasid_enabled(context) ?
266 __context_present(context) :
267 __context_present(context) && !context_copied(context);
268}
269
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000270static inline void context_set_present(struct context_entry *context)
271{
272 context->lo |= 1;
273}
274
275static inline void context_set_fault_enable(struct context_entry *context)
276{
277 context->lo &= (((u64)-1) << 2) | 1;
278}
279
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000280static inline void context_set_translation_type(struct context_entry *context,
281 unsigned long value)
282{
283 context->lo &= (((u64)-1) << 4) | 3;
284 context->lo |= (value & 3) << 2;
285}
286
287static inline void context_set_address_root(struct context_entry *context,
288 unsigned long value)
289{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800290 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000291 context->lo |= value & VTD_PAGE_MASK;
292}
293
294static inline void context_set_address_width(struct context_entry *context,
295 unsigned long value)
296{
297 context->hi |= value & 7;
298}
299
300static inline void context_set_domain_id(struct context_entry *context,
301 unsigned long value)
302{
303 context->hi |= (value & ((1 << 16) - 1)) << 8;
304}
305
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200306static inline int context_domain_id(struct context_entry *c)
307{
308 return((c->hi >> 8) & 0xffff);
309}
310
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000311static inline void context_clear_entry(struct context_entry *context)
312{
313 context->lo = 0;
314 context->hi = 0;
315}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000316
Mark McLoughlin622ba122008-11-20 15:49:46 +0000317/*
318 * 0: readable
319 * 1: writable
320 * 2-6: reserved
321 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800322 * 8-10: available
323 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000324 * 12-63: Host physcial address
325 */
326struct dma_pte {
327 u64 val;
328};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000329
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000330static inline void dma_clear_pte(struct dma_pte *pte)
331{
332 pte->val = 0;
333}
334
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000335static inline u64 dma_pte_addr(struct dma_pte *pte)
336{
David Woodhousec85994e2009-07-01 19:21:24 +0100337#ifdef CONFIG_64BIT
338 return pte->val & VTD_PAGE_MASK;
339#else
340 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100341 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100342#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000343}
344
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000345static inline bool dma_pte_present(struct dma_pte *pte)
346{
347 return (pte->val & 3) != 0;
348}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000349
Allen Kay4399c8b2011-10-14 12:32:46 -0700350static inline bool dma_pte_superpage(struct dma_pte *pte)
351{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200352 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700353}
354
David Woodhouse75e6bf92009-07-02 11:21:16 +0100355static inline int first_pte_in_page(struct dma_pte *pte)
356{
357 return !((unsigned long)pte & ~VTD_PAGE_MASK);
358}
359
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700360/*
361 * This domain is a statically identity mapping domain.
362 * 1. This domain creats a static 1:1 mapping to all usable memory.
363 * 2. It maps to each iommu if successful.
364 * 3. Each iommu mapps to this domain if successful.
365 */
David Woodhouse19943b02009-08-04 16:19:20 +0100366static struct dmar_domain *si_domain;
367static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700368
Joerg Roedel28ccce02015-07-21 14:45:31 +0200369/*
370 * Domain represents a virtual machine, more than one devices
Weidong Han1ce28fe2008-12-08 16:35:39 +0800371 * across iommus may be owned in one domain, e.g. kvm guest.
372 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800373#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800374
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700375/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800376#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700377
Joerg Roedel29a27712015-07-21 17:17:12 +0200378#define for_each_domain_iommu(idx, domain) \
379 for (idx = 0; idx < g_num_of_iommus; idx++) \
380 if (domain->iommu_refcnt[idx])
381
Mark McLoughlin99126f72008-11-20 15:49:47 +0000382struct dmar_domain {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700383 int nid; /* node id */
Joerg Roedel29a27712015-07-21 17:17:12 +0200384
385 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
386 /* Refcount of devices per iommu */
387
Mark McLoughlin99126f72008-11-20 15:49:47 +0000388
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +0200389 u16 iommu_did[DMAR_UNITS_SUPPORTED];
390 /* Domain ids per IOMMU. Use u16 since
391 * domain ids are 16 bit wide according
392 * to VT-d spec, section 9.3 */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000393
Omer Peleg0824c592016-04-20 19:03:35 +0300394 bool has_iotlb_device;
Joerg Roedel00a77de2015-03-26 13:43:08 +0100395 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000396 struct iova_domain iovad; /* iova's that belong to this domain */
397
398 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000399 int gaw; /* max guest address width */
400
401 /* adjusted guest address width, 0 is level 2 30-bit */
402 int agaw;
403
Weidong Han3b5410e2008-12-08 09:17:15 +0800404 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800405
406 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800407 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800408 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100409 int iommu_superpage;/* Level of superpages supported:
410 0 == 4KiB (no superpages), 1 == 2MiB,
411 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800412 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100413
414 struct iommu_domain domain; /* generic domain data structure for
415 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000416};
417
Mark McLoughlina647dac2008-11-20 15:49:48 +0000418/* PCI domain-device relationship */
419struct device_domain_info {
420 struct list_head link; /* link to domain siblings */
421 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100422 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000423 u8 devfn; /* PCI devfn number */
David Woodhouseb16d0cb2015-10-12 14:17:37 +0100424 u8 pasid_supported:3;
425 u8 pasid_enabled:1;
426 u8 pri_supported:1;
427 u8 pri_enabled:1;
428 u8 ats_supported:1;
429 u8 ats_enabled:1;
430 u8 ats_qdep;
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000431 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800432 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000433 struct dmar_domain *domain; /* pointer to domain */
434};
435
Jiang Liub94e4112014-02-19 14:07:25 +0800436struct dmar_rmrr_unit {
437 struct list_head list; /* list of rmrr units */
438 struct acpi_dmar_header *hdr; /* ACPI header */
439 u64 base_address; /* reserved base address*/
440 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000441 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800442 int devices_cnt; /* target device count */
Eric Auger0659b8d2017-01-19 20:57:53 +0000443 struct iommu_resv_region *resv; /* reserved region handle */
Jiang Liub94e4112014-02-19 14:07:25 +0800444};
445
446struct dmar_atsr_unit {
447 struct list_head list; /* list of ATSR units */
448 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000449 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800450 int devices_cnt; /* target device count */
451 u8 include_all:1; /* include all ports */
452};
453
454static LIST_HEAD(dmar_atsr_units);
455static LIST_HEAD(dmar_rmrr_units);
456
457#define for_each_rmrr_units(rmrr) \
458 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
459
mark gross5e0d2a62008-03-04 15:22:08 -0800460static void flush_unmaps_timeout(unsigned long data);
461
Omer Peleg314f1dc2016-04-20 11:32:45 +0300462struct deferred_flush_entry {
Omer Peleg2aac6302016-04-20 11:33:57 +0300463 unsigned long iova_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +0300464 unsigned long nrpages;
Omer Peleg314f1dc2016-04-20 11:32:45 +0300465 struct dmar_domain *domain;
466 struct page *freelist;
mark gross80b20dd2008-04-18 13:53:58 -0700467};
468
Omer Peleg314f1dc2016-04-20 11:32:45 +0300469#define HIGH_WATER_MARK 250
470struct deferred_flush_table {
471 int next;
472 struct deferred_flush_entry entries[HIGH_WATER_MARK];
473};
474
Omer Pelegaa473242016-04-20 11:33:02 +0300475struct deferred_flush_data {
476 spinlock_t lock;
477 int timer_on;
478 struct timer_list timer;
479 long size;
480 struct deferred_flush_table *tables;
481};
482
483DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
mark gross80b20dd2008-04-18 13:53:58 -0700484
mark gross5e0d2a62008-03-04 15:22:08 -0800485/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800486static int g_num_of_iommus;
487
Jiang Liu92d03cc2014-02-19 14:07:28 +0800488static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700489static void domain_remove_dev_info(struct dmar_domain *domain);
Joerg Roedele6de0f82015-07-22 16:30:36 +0200490static void dmar_remove_one_dev_info(struct dmar_domain *domain,
491 struct device *dev);
Joerg Roedel127c7612015-07-23 17:44:46 +0200492static void __dmar_remove_one_dev_info(struct device_domain_info *info);
Joerg Roedel2452d9d2015-07-23 16:20:14 +0200493static void domain_context_clear(struct intel_iommu *iommu,
494 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800495static int domain_detach_iommu(struct dmar_domain *domain,
496 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700497
Suresh Siddhad3f13812011-08-23 17:05:25 -0700498#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800499int dmar_disabled = 0;
500#else
501int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700502#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800503
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200504int intel_iommu_enabled = 0;
505EXPORT_SYMBOL_GPL(intel_iommu_enabled);
506
David Woodhouse2d9e6672010-06-15 10:57:57 +0100507static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700508static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800509static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100510static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100511static int intel_iommu_ecs = 1;
David Woodhouseae853dd2015-09-09 11:58:59 +0100512static int intel_iommu_pasid28;
513static int iommu_identity_mapping;
David Woodhousec83b2f22015-06-12 10:15:49 +0100514
David Woodhouseae853dd2015-09-09 11:58:59 +0100515#define IDENTMAP_ALL 1
516#define IDENTMAP_GFX 2
517#define IDENTMAP_AZALIA 4
David Woodhousec83b2f22015-06-12 10:15:49 +0100518
David Woodhoused42fde72015-10-24 21:33:01 +0200519/* Broadwell and Skylake have broken ECS support — normal so-called "second
520 * level" translation of DMA requests-without-PASID doesn't actually happen
521 * unless you also set the NESTE bit in an extended context-entry. Which of
522 * course means that SVM doesn't work because it's trying to do nested
523 * translation of the physical addresses it finds in the process page tables,
524 * through the IOVA->phys mapping found in the "second level" page tables.
525 *
526 * The VT-d specification was retroactively changed to change the definition
527 * of the capability bits and pretend that Broadwell/Skylake never happened...
528 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
529 * for some reason it was the PASID capability bit which was redefined (from
530 * bit 28 on BDW/SKL to bit 40 in future).
531 *
532 * So our test for ECS needs to eschew those implementations which set the old
533 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
534 * Unless we are working around the 'pasid28' limitations, that is, by putting
535 * the device into passthrough mode for normal DMA and thus masking the bug.
536 */
David Woodhousec83b2f22015-06-12 10:15:49 +0100537#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
David Woodhoused42fde72015-10-24 21:33:01 +0200538 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
539/* PASID support is thus enabled if ECS is enabled and *either* of the old
540 * or new capability bits are set. */
541#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
542 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700543
David Woodhousec0771df2011-10-14 20:59:46 +0100544int intel_iommu_gfx_mapped;
545EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
546
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700547#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
548static DEFINE_SPINLOCK(device_domain_lock);
549static LIST_HEAD(device_domain_list);
550
Thierry Redingb22f6432014-06-27 09:03:12 +0200551static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100552
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200553static bool translation_pre_enabled(struct intel_iommu *iommu)
554{
555 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
556}
557
Joerg Roedel091d42e2015-06-12 11:56:10 +0200558static void clear_translation_pre_enabled(struct intel_iommu *iommu)
559{
560 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
561}
562
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200563static void init_translation_status(struct intel_iommu *iommu)
564{
565 u32 gsts;
566
567 gsts = readl(iommu->reg + DMAR_GSTS_REG);
568 if (gsts & DMA_GSTS_TES)
569 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
570}
571
Joerg Roedel00a77de2015-03-26 13:43:08 +0100572/* Convert generic 'struct iommu_domain to private struct dmar_domain */
573static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
574{
575 return container_of(dom, struct dmar_domain, domain);
576}
577
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700578static int __init intel_iommu_setup(char *str)
579{
580 if (!str)
581 return -EINVAL;
582 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800583 if (!strncmp(str, "on", 2)) {
584 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200585 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800586 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700587 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200588 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700589 } else if (!strncmp(str, "igfx_off", 8)) {
590 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200591 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700592 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200593 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700594 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800595 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200596 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800597 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100598 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200599 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100600 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100601 } else if (!strncmp(str, "ecs_off", 7)) {
602 printk(KERN_INFO
603 "Intel-IOMMU: disable extended context table support\n");
604 intel_iommu_ecs = 0;
David Woodhouseae853dd2015-09-09 11:58:59 +0100605 } else if (!strncmp(str, "pasid28", 7)) {
606 printk(KERN_INFO
607 "Intel-IOMMU: enable pre-production PASID support\n");
608 intel_iommu_pasid28 = 1;
609 iommu_identity_mapping |= IDENTMAP_GFX;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700610 }
611
612 str += strcspn(str, ",");
613 while (*str == ',')
614 str++;
615 }
616 return 0;
617}
618__setup("intel_iommu=", intel_iommu_setup);
619
620static struct kmem_cache *iommu_domain_cache;
621static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700622
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200623static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
624{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200625 struct dmar_domain **domains;
626 int idx = did >> 8;
627
628 domains = iommu->domains[idx];
629 if (!domains)
630 return NULL;
631
632 return domains[did & 0xff];
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200633}
634
635static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
636 struct dmar_domain *domain)
637{
Joerg Roedel8bf47812015-07-21 10:41:21 +0200638 struct dmar_domain **domains;
639 int idx = did >> 8;
640
641 if (!iommu->domains[idx]) {
642 size_t size = 256 * sizeof(struct dmar_domain *);
643 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
644 }
645
646 domains = iommu->domains[idx];
647 if (WARN_ON(!domains))
648 return;
649 else
650 domains[did & 0xff] = domain;
Joerg Roedel9452d5b2015-07-21 10:00:56 +0200651}
652
Suresh Siddha4c923d42009-10-02 11:01:24 -0700653static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700654{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700655 struct page *page;
656 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700657
Suresh Siddha4c923d42009-10-02 11:01:24 -0700658 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
659 if (page)
660 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700661 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700662}
663
664static inline void free_pgtable_page(void *vaddr)
665{
666 free_page((unsigned long)vaddr);
667}
668
669static inline void *alloc_domain_mem(void)
670{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900671 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700672}
673
Kay, Allen M38717942008-09-09 18:37:29 +0300674static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700675{
676 kmem_cache_free(iommu_domain_cache, vaddr);
677}
678
679static inline void * alloc_devinfo_mem(void)
680{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900681 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700682}
683
684static inline void free_devinfo_mem(void *vaddr)
685{
686 kmem_cache_free(iommu_devinfo_cache, vaddr);
687}
688
Jiang Liuab8dfe22014-07-11 14:19:27 +0800689static inline int domain_type_is_vm(struct dmar_domain *domain)
690{
691 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
692}
693
Joerg Roedel28ccce02015-07-21 14:45:31 +0200694static inline int domain_type_is_si(struct dmar_domain *domain)
695{
696 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
697}
698
Jiang Liuab8dfe22014-07-11 14:19:27 +0800699static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
700{
701 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
702 DOMAIN_FLAG_STATIC_IDENTITY);
703}
Weidong Han1b573682008-12-08 15:34:06 +0800704
Jiang Liu162d1b12014-07-11 14:19:35 +0800705static inline int domain_pfn_supported(struct dmar_domain *domain,
706 unsigned long pfn)
707{
708 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
709
710 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
711}
712
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700713static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800714{
715 unsigned long sagaw;
716 int agaw = -1;
717
718 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700719 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800720 agaw >= 0; agaw--) {
721 if (test_bit(agaw, &sagaw))
722 break;
723 }
724
725 return agaw;
726}
727
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700728/*
729 * Calculate max SAGAW for each iommu.
730 */
731int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
732{
733 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
734}
735
736/*
737 * calculate agaw for each iommu.
738 * "SAGAW" may be different across iommus, use a default agaw, and
739 * get a supported less agaw for iommus that don't support the default agaw.
740 */
741int iommu_calculate_agaw(struct intel_iommu *iommu)
742{
743 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
744}
745
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700746/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800747static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
748{
749 int iommu_id;
750
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700751 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800752 BUG_ON(domain_type_is_vm_or_si(domain));
Joerg Roedel29a27712015-07-21 17:17:12 +0200753 for_each_domain_iommu(iommu_id, domain)
754 break;
755
Weidong Han8c11e792008-12-08 15:29:22 +0800756 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
757 return NULL;
758
759 return g_iommus[iommu_id];
760}
761
Weidong Han8e6040972008-12-08 15:49:06 +0800762static void domain_update_iommu_coherency(struct dmar_domain *domain)
763{
David Woodhoused0501962014-03-11 17:10:29 -0700764 struct dmar_drhd_unit *drhd;
765 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100766 bool found = false;
767 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800768
David Woodhoused0501962014-03-11 17:10:29 -0700769 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800770
Joerg Roedel29a27712015-07-21 17:17:12 +0200771 for_each_domain_iommu(i, domain) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100772 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800773 if (!ecap_coherent(g_iommus[i]->ecap)) {
774 domain->iommu_coherency = 0;
775 break;
776 }
Weidong Han8e6040972008-12-08 15:49:06 +0800777 }
David Woodhoused0501962014-03-11 17:10:29 -0700778 if (found)
779 return;
780
781 /* No hardware attached; use lowest common denominator */
782 rcu_read_lock();
783 for_each_active_iommu(iommu, drhd) {
784 if (!ecap_coherent(iommu->ecap)) {
785 domain->iommu_coherency = 0;
786 break;
787 }
788 }
789 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800790}
791
Jiang Liu161f6932014-07-11 14:19:37 +0800792static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100793{
Allen Kay8140a952011-10-14 12:32:17 -0700794 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800795 struct intel_iommu *iommu;
796 int ret = 1;
797
798 rcu_read_lock();
799 for_each_active_iommu(iommu, drhd) {
800 if (iommu != skip) {
801 if (!ecap_sc_support(iommu->ecap)) {
802 ret = 0;
803 break;
804 }
805 }
806 }
807 rcu_read_unlock();
808
809 return ret;
810}
811
812static int domain_update_iommu_superpage(struct intel_iommu *skip)
813{
814 struct dmar_drhd_unit *drhd;
815 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700816 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100817
818 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800819 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100820 }
821
Allen Kay8140a952011-10-14 12:32:17 -0700822 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800823 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700824 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800825 if (iommu != skip) {
826 mask &= cap_super_page_val(iommu->cap);
827 if (!mask)
828 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100829 }
830 }
Jiang Liu0e242612014-02-19 14:07:34 +0800831 rcu_read_unlock();
832
Jiang Liu161f6932014-07-11 14:19:37 +0800833 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100834}
835
Sheng Yang58c610b2009-03-18 15:33:05 +0800836/* Some capabilities may be different across iommus */
837static void domain_update_iommu_cap(struct dmar_domain *domain)
838{
839 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800840 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
841 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800842}
843
David Woodhouse03ecc322015-02-13 14:35:21 +0000844static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
845 u8 bus, u8 devfn, int alloc)
846{
847 struct root_entry *root = &iommu->root_entry[bus];
848 struct context_entry *context;
849 u64 *entry;
850
Joerg Roedel4df4eab2015-08-25 10:54:28 +0200851 entry = &root->lo;
David Woodhousec83b2f22015-06-12 10:15:49 +0100852 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000853 if (devfn >= 0x80) {
854 devfn -= 0x80;
855 entry = &root->hi;
856 }
857 devfn *= 2;
858 }
David Woodhouse03ecc322015-02-13 14:35:21 +0000859 if (*entry & 1)
860 context = phys_to_virt(*entry & VTD_PAGE_MASK);
861 else {
862 unsigned long phy_addr;
863 if (!alloc)
864 return NULL;
865
866 context = alloc_pgtable_page(iommu->node);
867 if (!context)
868 return NULL;
869
870 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
871 phy_addr = virt_to_phys((void *)context);
872 *entry = phy_addr | 1;
873 __iommu_flush_cache(iommu, entry, sizeof(*entry));
874 }
875 return &context[devfn];
876}
877
David Woodhouse4ed6a542015-05-11 14:59:20 +0100878static int iommu_dummy(struct device *dev)
879{
880 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
881}
882
David Woodhouse156baca2014-03-09 14:00:57 -0700883static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800884{
885 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800886 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700887 struct device *tmp;
888 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800889 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800890 int i;
891
David Woodhouse4ed6a542015-05-11 14:59:20 +0100892 if (iommu_dummy(dev))
893 return NULL;
894
David Woodhouse156baca2014-03-09 14:00:57 -0700895 if (dev_is_pci(dev)) {
Ashok Raj1c387182016-10-21 15:32:05 -0700896 struct pci_dev *pf_pdev;
897
David Woodhouse156baca2014-03-09 14:00:57 -0700898 pdev = to_pci_dev(dev);
Ashok Raj1c387182016-10-21 15:32:05 -0700899 /* VFs aren't listed in scope tables; we need to look up
900 * the PF instead to find the IOMMU. */
901 pf_pdev = pci_physfn(pdev);
902 dev = &pf_pdev->dev;
David Woodhouse156baca2014-03-09 14:00:57 -0700903 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100904 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700905 dev = &ACPI_COMPANION(dev)->dev;
906
Jiang Liu0e242612014-02-19 14:07:34 +0800907 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800908 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700909 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100910 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800911
Jiang Liub683b232014-02-19 14:07:32 +0800912 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700913 drhd->devices_cnt, i, tmp) {
914 if (tmp == dev) {
Ashok Raj1c387182016-10-21 15:32:05 -0700915 /* For a VF use its original BDF# not that of the PF
916 * which we used for the IOMMU lookup. Strictly speaking
917 * we could do this for all PCI devices; we only need to
918 * get the BDF# from the scope table for ACPI matches. */
919 if (pdev->is_virtfn)
920 goto got_pdev;
921
David Woodhouse156baca2014-03-09 14:00:57 -0700922 *bus = drhd->devices[i].bus;
923 *devfn = drhd->devices[i].devfn;
924 goto out;
925 }
926
927 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000928 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700929
930 ptmp = to_pci_dev(tmp);
931 if (ptmp->subordinate &&
932 ptmp->subordinate->number <= pdev->bus->number &&
933 ptmp->subordinate->busn_res.end >= pdev->bus->number)
934 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100935 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800936
David Woodhouse156baca2014-03-09 14:00:57 -0700937 if (pdev && drhd->include_all) {
938 got_pdev:
939 *bus = pdev->bus->number;
940 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800941 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700942 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800943 }
Jiang Liub683b232014-02-19 14:07:32 +0800944 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700945 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800946 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800947
Jiang Liub683b232014-02-19 14:07:32 +0800948 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800949}
950
Weidong Han5331fe62008-12-08 23:00:00 +0800951static void domain_flush_cache(struct dmar_domain *domain,
952 void *addr, int size)
953{
954 if (!domain->iommu_coherency)
955 clflush_cache_range(addr, size);
956}
957
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
959{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700960 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000961 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700962 unsigned long flags;
963
964 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000965 context = iommu_context_addr(iommu, bus, devfn, 0);
966 if (context)
967 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968 spin_unlock_irqrestore(&iommu->lock, flags);
969 return ret;
970}
971
972static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
973{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700974 struct context_entry *context;
975 unsigned long flags;
976
977 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000978 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000980 context_clear_entry(context);
981 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982 }
983 spin_unlock_irqrestore(&iommu->lock, flags);
984}
985
986static void free_context_table(struct intel_iommu *iommu)
987{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700988 int i;
989 unsigned long flags;
990 struct context_entry *context;
991
992 spin_lock_irqsave(&iommu->lock, flags);
993 if (!iommu->root_entry) {
994 goto out;
995 }
996 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000997 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998 if (context)
999 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +00001000
David Woodhousec83b2f22015-06-12 10:15:49 +01001001 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001002 continue;
1003
1004 context = iommu_context_addr(iommu, i, 0x80, 0);
1005 if (context)
1006 free_pgtable_page(context);
1007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008 }
1009 free_pgtable_page(iommu->root_entry);
1010 iommu->root_entry = NULL;
1011out:
1012 spin_unlock_irqrestore(&iommu->lock, flags);
1013}
1014
David Woodhouseb026fd22009-06-28 10:37:25 +01001015static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +00001016 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001018 struct dma_pte *parent, *pte = NULL;
1019 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -07001020 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001021
1022 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +02001023
Jiang Liu162d1b12014-07-11 14:19:35 +08001024 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +02001025 /* Address beyond IOMMU's addressing capabilities. */
1026 return NULL;
1027
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001028 parent = domain->pgd;
1029
David Woodhouse5cf0a762014-03-19 16:07:49 +00001030 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001031 void *tmp_page;
1032
David Woodhouseb026fd22009-06-28 10:37:25 +01001033 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001034 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +00001035 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001036 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +00001037 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038 break;
1039
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001040 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +01001041 uint64_t pteval;
1042
Suresh Siddha4c923d42009-10-02 11:01:24 -07001043 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001044
David Woodhouse206a73c2009-07-01 19:30:28 +01001045 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +01001047
David Woodhousec85994e2009-07-01 19:21:24 +01001048 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04001049 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +08001050 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +01001051 /* Someone else set it while we were thinking; use theirs. */
1052 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +08001053 else
David Woodhousec85994e2009-07-01 19:21:24 +01001054 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001055 }
David Woodhouse5cf0a762014-03-19 16:07:49 +00001056 if (level == 1)
1057 break;
1058
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001059 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001060 level--;
1061 }
1062
David Woodhouse5cf0a762014-03-19 16:07:49 +00001063 if (!*target_level)
1064 *target_level = level;
1065
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066 return pte;
1067}
1068
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001069
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +01001071static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1072 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001073 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074{
1075 struct dma_pte *parent, *pte = NULL;
1076 int total = agaw_to_level(domain->agaw);
1077 int offset;
1078
1079 parent = domain->pgd;
1080 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +01001081 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001082 pte = &parent[offset];
1083 if (level == total)
1084 return pte;
1085
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001086 if (!dma_pte_present(pte)) {
1087 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001088 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001089 }
1090
Yijing Wange16922a2014-05-20 20:37:51 +08001091 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001092 *large_page = total;
1093 return pte;
1094 }
1095
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001096 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001097 total--;
1098 }
1099 return NULL;
1100}
1101
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001102/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +00001103static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +01001104 unsigned long start_pfn,
1105 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001106{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001107 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001108 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001109
Jiang Liu162d1b12014-07-11 14:19:35 +08001110 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1111 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001112 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001113
David Woodhouse04b18e62009-06-27 19:15:01 +01001114 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001115 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001116 large_page = 1;
1117 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001118 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001119 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001120 continue;
1121 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001122 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001123 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001124 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001125 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001126 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1127
David Woodhouse310a5ab2009-06-28 18:52:20 +01001128 domain_flush_cache(domain, first_pte,
1129 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001130
1131 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001132}
1133
Alex Williamson3269ee02013-06-15 10:27:19 -06001134static void dma_pte_free_level(struct dmar_domain *domain, int level,
1135 struct dma_pte *pte, unsigned long pfn,
1136 unsigned long start_pfn, unsigned long last_pfn)
1137{
1138 pfn = max(start_pfn, pfn);
1139 pte = &pte[pfn_level_offset(pfn, level)];
1140
1141 do {
1142 unsigned long level_pfn;
1143 struct dma_pte *level_pte;
1144
1145 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1146 goto next;
1147
1148 level_pfn = pfn & level_mask(level - 1);
1149 level_pte = phys_to_virt(dma_pte_addr(pte));
1150
1151 if (level > 2)
1152 dma_pte_free_level(domain, level - 1, level_pte,
1153 level_pfn, start_pfn, last_pfn);
1154
1155 /* If range covers entire pagetable, free it */
1156 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001157 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001158 dma_clear_pte(pte);
1159 domain_flush_cache(domain, pte, sizeof(*pte));
1160 free_pgtable_page(level_pte);
1161 }
1162next:
1163 pfn += level_size(level);
1164 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1165}
1166
Michael S. Tsirkin3d1a2442016-03-23 20:34:19 +02001167/* clear last level (leaf) ptes and free page table pages. */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001169 unsigned long start_pfn,
1170 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171{
Jiang Liu162d1b12014-07-11 14:19:35 +08001172 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1173 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001174 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175
Jiang Liud41a4ad2014-07-11 14:19:34 +08001176 dma_pte_clear_range(domain, start_pfn, last_pfn);
1177
David Woodhousef3a0a522009-06-30 03:40:07 +01001178 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001179 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1180 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001181
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001183 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184 free_pgtable_page(domain->pgd);
1185 domain->pgd = NULL;
1186 }
1187}
1188
David Woodhouseea8ea462014-03-05 17:09:32 +00001189/* When a page at a given level is being unlinked from its parent, we don't
1190 need to *modify* it at all. All we need to do is make a list of all the
1191 pages which can be freed just as soon as we've flushed the IOTLB and we
1192 know the hardware page-walk will no longer touch them.
1193 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1194 be freed. */
1195static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1196 int level, struct dma_pte *pte,
1197 struct page *freelist)
1198{
1199 struct page *pg;
1200
1201 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1202 pg->freelist = freelist;
1203 freelist = pg;
1204
1205 if (level == 1)
1206 return freelist;
1207
Jiang Liuadeb2592014-04-09 10:20:39 +08001208 pte = page_address(pg);
1209 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001210 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1211 freelist = dma_pte_list_pagetables(domain, level - 1,
1212 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001213 pte++;
1214 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001215
1216 return freelist;
1217}
1218
1219static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1220 struct dma_pte *pte, unsigned long pfn,
1221 unsigned long start_pfn,
1222 unsigned long last_pfn,
1223 struct page *freelist)
1224{
1225 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1226
1227 pfn = max(start_pfn, pfn);
1228 pte = &pte[pfn_level_offset(pfn, level)];
1229
1230 do {
1231 unsigned long level_pfn;
1232
1233 if (!dma_pte_present(pte))
1234 goto next;
1235
1236 level_pfn = pfn & level_mask(level);
1237
1238 /* If range covers entire pagetable, free it */
1239 if (start_pfn <= level_pfn &&
1240 last_pfn >= level_pfn + level_size(level) - 1) {
1241 /* These suborbinate page tables are going away entirely. Don't
1242 bother to clear them; we're just going to *free* them. */
1243 if (level > 1 && !dma_pte_superpage(pte))
1244 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1245
1246 dma_clear_pte(pte);
1247 if (!first_pte)
1248 first_pte = pte;
1249 last_pte = pte;
1250 } else if (level > 1) {
1251 /* Recurse down into a level that isn't *entirely* obsolete */
1252 freelist = dma_pte_clear_level(domain, level - 1,
1253 phys_to_virt(dma_pte_addr(pte)),
1254 level_pfn, start_pfn, last_pfn,
1255 freelist);
1256 }
1257next:
1258 pfn += level_size(level);
1259 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1260
1261 if (first_pte)
1262 domain_flush_cache(domain, first_pte,
1263 (void *)++last_pte - (void *)first_pte);
1264
1265 return freelist;
1266}
1267
1268/* We can't just free the pages because the IOMMU may still be walking
1269 the page tables, and may have cached the intermediate levels. The
1270 pages can only be freed after the IOTLB flush has been done. */
Joerg Roedelb6904202015-08-13 11:32:18 +02001271static struct page *domain_unmap(struct dmar_domain *domain,
1272 unsigned long start_pfn,
1273 unsigned long last_pfn)
David Woodhouseea8ea462014-03-05 17:09:32 +00001274{
David Woodhouseea8ea462014-03-05 17:09:32 +00001275 struct page *freelist = NULL;
1276
Jiang Liu162d1b12014-07-11 14:19:35 +08001277 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1278 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001279 BUG_ON(start_pfn > last_pfn);
1280
1281 /* we don't need lock here; nobody else touches the iova range */
1282 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1283 domain->pgd, 0, start_pfn, last_pfn, NULL);
1284
1285 /* free pgd */
1286 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1287 struct page *pgd_page = virt_to_page(domain->pgd);
1288 pgd_page->freelist = freelist;
1289 freelist = pgd_page;
1290
1291 domain->pgd = NULL;
1292 }
1293
1294 return freelist;
1295}
1296
Joerg Roedelb6904202015-08-13 11:32:18 +02001297static void dma_free_pagelist(struct page *freelist)
David Woodhouseea8ea462014-03-05 17:09:32 +00001298{
1299 struct page *pg;
1300
1301 while ((pg = freelist)) {
1302 freelist = pg->freelist;
1303 free_pgtable_page(page_address(pg));
1304 }
1305}
1306
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307/* iommu handling */
1308static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1309{
1310 struct root_entry *root;
1311 unsigned long flags;
1312
Suresh Siddha4c923d42009-10-02 11:01:24 -07001313 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001314 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001315 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001316 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001318 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001320 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321
1322 spin_lock_irqsave(&iommu->lock, flags);
1323 iommu->root_entry = root;
1324 spin_unlock_irqrestore(&iommu->lock, flags);
1325
1326 return 0;
1327}
1328
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329static void iommu_set_root_entry(struct intel_iommu *iommu)
1330{
David Woodhouse03ecc322015-02-13 14:35:21 +00001331 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001332 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333 unsigned long flag;
1334
David Woodhouse03ecc322015-02-13 14:35:21 +00001335 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001336 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001337 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001340 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341
David Woodhousec416daa2009-05-10 20:30:58 +01001342 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001346 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001348 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349}
1350
1351static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1352{
1353 u32 val;
1354 unsigned long flag;
1355
David Woodhouse9af88142009-02-13 23:18:03 +00001356 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001359 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001360 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001361
1362 /* Make sure hardware complete it */
1363 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001364 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001365
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001366 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367}
1368
1369/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001370static void __iommu_flush_context(struct intel_iommu *iommu,
1371 u16 did, u16 source_id, u8 function_mask,
1372 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373{
1374 u64 val = 0;
1375 unsigned long flag;
1376
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001377 switch (type) {
1378 case DMA_CCMD_GLOBAL_INVL:
1379 val = DMA_CCMD_GLOBAL_INVL;
1380 break;
1381 case DMA_CCMD_DOMAIN_INVL:
1382 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1383 break;
1384 case DMA_CCMD_DEVICE_INVL:
1385 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1386 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1387 break;
1388 default:
1389 BUG();
1390 }
1391 val |= DMA_CCMD_ICC;
1392
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001393 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1395
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1398 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001400 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001401}
1402
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001404static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1405 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406{
1407 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1408 u64 val = 0, val_iva = 0;
1409 unsigned long flag;
1410
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411 switch (type) {
1412 case DMA_TLB_GLOBAL_FLUSH:
1413 /* global flush doesn't need set IVA_REG */
1414 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1415 break;
1416 case DMA_TLB_DSI_FLUSH:
1417 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1418 break;
1419 case DMA_TLB_PSI_FLUSH:
1420 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001421 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422 val_iva = size_order | addr;
1423 break;
1424 default:
1425 BUG();
1426 }
1427 /* Note: set drain read/write */
1428#if 0
1429 /*
1430 * This is probably to be super secure.. Looks like we can
1431 * ignore it without any impact.
1432 */
1433 if (cap_read_drain(iommu->cap))
1434 val |= DMA_TLB_READ_DRAIN;
1435#endif
1436 if (cap_write_drain(iommu->cap))
1437 val |= DMA_TLB_WRITE_DRAIN;
1438
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001439 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 /* Note: Only uses first TLB reg currently */
1441 if (val_iva)
1442 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1443 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1444
1445 /* Make sure hardware complete it */
1446 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1447 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1448
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001449 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450
1451 /* check IOTLB invalidation granularity */
1452 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001453 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001455 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001456 (unsigned long long)DMA_TLB_IIRG(type),
1457 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001458}
1459
David Woodhouse64ae8922014-03-09 12:52:30 -07001460static struct device_domain_info *
1461iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1462 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001463{
Yu Zhao93a23a72009-05-18 13:51:37 +08001464 struct device_domain_info *info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001465
Joerg Roedel55d94042015-07-22 16:50:40 +02001466 assert_spin_locked(&device_domain_lock);
1467
Yu Zhao93a23a72009-05-18 13:51:37 +08001468 if (!iommu->qi)
1469 return NULL;
1470
Yu Zhao93a23a72009-05-18 13:51:37 +08001471 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001472 if (info->iommu == iommu && info->bus == bus &&
1473 info->devfn == devfn) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001474 if (info->ats_supported && info->dev)
1475 return info;
Yu Zhao93a23a72009-05-18 13:51:37 +08001476 break;
1477 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001478
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001479 return NULL;
Yu Zhao93a23a72009-05-18 13:51:37 +08001480}
1481
Omer Peleg0824c592016-04-20 19:03:35 +03001482static void domain_update_iotlb(struct dmar_domain *domain)
1483{
1484 struct device_domain_info *info;
1485 bool has_iotlb_device = false;
1486
1487 assert_spin_locked(&device_domain_lock);
1488
1489 list_for_each_entry(info, &domain->devices, link) {
1490 struct pci_dev *pdev;
1491
1492 if (!info->dev || !dev_is_pci(info->dev))
1493 continue;
1494
1495 pdev = to_pci_dev(info->dev);
1496 if (pdev->ats_enabled) {
1497 has_iotlb_device = true;
1498 break;
1499 }
1500 }
1501
1502 domain->has_iotlb_device = has_iotlb_device;
1503}
1504
Yu Zhao93a23a72009-05-18 13:51:37 +08001505static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1506{
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001507 struct pci_dev *pdev;
1508
Omer Peleg0824c592016-04-20 19:03:35 +03001509 assert_spin_locked(&device_domain_lock);
1510
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001511 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001512 return;
1513
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001514 pdev = to_pci_dev(info->dev);
Bjorn Helgaasfb0cc3a2015-07-20 09:10:36 -05001515
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001516#ifdef CONFIG_INTEL_IOMMU_SVM
1517 /* The PCIe spec, in its wisdom, declares that the behaviour of
1518 the device if you enable PASID support after ATS support is
1519 undefined. So always enable PASID support on devices which
1520 have it, even if we can't yet know if we're ever going to
1521 use it. */
1522 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1523 info->pasid_enabled = 1;
1524
1525 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1526 info->pri_enabled = 1;
1527#endif
1528 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1529 info->ats_enabled = 1;
Omer Peleg0824c592016-04-20 19:03:35 +03001530 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001531 info->ats_qdep = pci_ats_queue_depth(pdev);
1532 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001533}
1534
1535static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1536{
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001537 struct pci_dev *pdev;
1538
Omer Peleg0824c592016-04-20 19:03:35 +03001539 assert_spin_locked(&device_domain_lock);
1540
Jeremy McNicollda972fb2016-01-14 21:33:06 -08001541 if (!dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001542 return;
1543
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001544 pdev = to_pci_dev(info->dev);
1545
1546 if (info->ats_enabled) {
1547 pci_disable_ats(pdev);
1548 info->ats_enabled = 0;
Omer Peleg0824c592016-04-20 19:03:35 +03001549 domain_update_iotlb(info->domain);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001550 }
1551#ifdef CONFIG_INTEL_IOMMU_SVM
1552 if (info->pri_enabled) {
1553 pci_disable_pri(pdev);
1554 info->pri_enabled = 0;
1555 }
1556 if (info->pasid_enabled) {
1557 pci_disable_pasid(pdev);
1558 info->pasid_enabled = 0;
1559 }
1560#endif
Yu Zhao93a23a72009-05-18 13:51:37 +08001561}
1562
1563static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1564 u64 addr, unsigned mask)
1565{
1566 u16 sid, qdep;
1567 unsigned long flags;
1568 struct device_domain_info *info;
1569
Omer Peleg0824c592016-04-20 19:03:35 +03001570 if (!domain->has_iotlb_device)
1571 return;
1572
Yu Zhao93a23a72009-05-18 13:51:37 +08001573 spin_lock_irqsave(&device_domain_lock, flags);
1574 list_for_each_entry(info, &domain->devices, link) {
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001575 if (!info->ats_enabled)
Yu Zhao93a23a72009-05-18 13:51:37 +08001576 continue;
1577
1578 sid = info->bus << 8 | info->devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01001579 qdep = info->ats_qdep;
Yu Zhao93a23a72009-05-18 13:51:37 +08001580 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1581 }
1582 spin_unlock_irqrestore(&device_domain_lock, flags);
1583}
1584
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001585static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1586 struct dmar_domain *domain,
1587 unsigned long pfn, unsigned int pages,
1588 int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001590 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001591 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02001592 u16 did = domain->iommu_did[iommu->seq_id];
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001594 BUG_ON(pages == 0);
1595
David Woodhouseea8ea462014-03-05 17:09:32 +00001596 if (ih)
1597 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001598 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001599 * Fallback to domain selective flush if no PSI support or the size is
1600 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601 * PSI requires page size to be 2 ^ x, and the base address is naturally
1602 * aligned to the size
1603 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001604 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1605 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001606 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001607 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001608 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001609 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001610
1611 /*
Nadav Amit82653632010-04-01 13:24:40 +03001612 * In caching mode, changes of pages from non-present to present require
1613 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001614 */
Nadav Amit82653632010-04-01 13:24:40 +03001615 if (!cap_caching_mode(iommu->cap) || !map)
Joerg Roedel9452d5b2015-07-21 10:00:56 +02001616 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1617 addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001618}
1619
mark grossf8bab732008-02-08 04:18:38 -08001620static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1621{
1622 u32 pmen;
1623 unsigned long flags;
1624
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001625 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001626 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1627 pmen &= ~DMA_PMEN_EPM;
1628 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1629
1630 /* wait for the protected region status bit to clear */
1631 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1632 readl, !(pmen & DMA_PMEN_PRS), pmen);
1633
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001634 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001635}
1636
Jiang Liu2a41cce2014-07-11 14:19:33 +08001637static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638{
1639 u32 sts;
1640 unsigned long flags;
1641
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001642 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001643 iommu->gcmd |= DMA_GCMD_TE;
1644 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645
1646 /* Make sure hardware complete it */
1647 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001648 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001650 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651}
1652
Jiang Liu2a41cce2014-07-11 14:19:33 +08001653static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654{
1655 u32 sts;
1656 unsigned long flag;
1657
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001658 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 iommu->gcmd &= ~DMA_GCMD_TE;
1660 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1661
1662 /* Make sure hardware complete it */
1663 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001664 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001666 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667}
1668
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001669
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001670static int iommu_init_domains(struct intel_iommu *iommu)
1671{
Joerg Roedel8bf47812015-07-21 10:41:21 +02001672 u32 ndomains, nlongs;
1673 size_t size;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674
1675 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001676 pr_debug("%s: Number of Domains supported <%d>\n",
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001677 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001678 nlongs = BITS_TO_LONGS(ndomains);
1679
Donald Dutile94a91b52009-08-20 16:51:34 -04001680 spin_lock_init(&iommu->lock);
1681
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1683 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001684 pr_err("%s: Allocating domain id array failed\n",
1685 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686 return -ENOMEM;
1687 }
Joerg Roedel8bf47812015-07-21 10:41:21 +02001688
Wei Yang86f004c2016-05-21 02:41:51 +00001689 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001690 iommu->domains = kzalloc(size, GFP_KERNEL);
1691
1692 if (iommu->domains) {
1693 size = 256 * sizeof(struct dmar_domain *);
1694 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1695 }
1696
1697 if (!iommu->domains || !iommu->domains[0]) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001698 pr_err("%s: Allocating domain array failed\n",
1699 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001700 kfree(iommu->domain_ids);
Joerg Roedel8bf47812015-07-21 10:41:21 +02001701 kfree(iommu->domains);
Jiang Liu852bdb02014-01-06 14:18:11 +08001702 iommu->domain_ids = NULL;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001703 iommu->domains = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704 return -ENOMEM;
1705 }
1706
Joerg Roedel8bf47812015-07-21 10:41:21 +02001707
1708
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001709 /*
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001710 * If Caching mode is set, then invalid translations are tagged
1711 * with domain-id 0, hence we need to pre-allocate it. We also
1712 * use domain-id 0 as a marker for non-allocated domain-id, so
1713 * make sure it is not used for a real domain.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714 */
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001715 set_bit(0, iommu->domain_ids);
1716
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717 return 0;
1718}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719
Jiang Liuffebeb42014-11-09 22:48:02 +08001720static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721{
Joerg Roedel29a27712015-07-21 17:17:12 +02001722 struct device_domain_info *info, *tmp;
Joerg Roedel55d94042015-07-22 16:50:40 +02001723 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724
Joerg Roedel29a27712015-07-21 17:17:12 +02001725 if (!iommu->domains || !iommu->domain_ids)
1726 return;
Jiang Liua4eaa862014-02-19 14:07:30 +08001727
Joerg Roedelbea64032016-11-08 15:08:26 +01001728again:
Joerg Roedel55d94042015-07-22 16:50:40 +02001729 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001730 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1731 struct dmar_domain *domain;
1732
1733 if (info->iommu != iommu)
1734 continue;
1735
1736 if (!info->dev || !info->domain)
1737 continue;
1738
1739 domain = info->domain;
1740
Joerg Roedelbea64032016-11-08 15:08:26 +01001741 __dmar_remove_one_dev_info(info);
Joerg Roedel29a27712015-07-21 17:17:12 +02001742
Joerg Roedelbea64032016-11-08 15:08:26 +01001743 if (!domain_type_is_vm_or_si(domain)) {
1744 /*
1745 * The domain_exit() function can't be called under
1746 * device_domain_lock, as it takes this lock itself.
1747 * So release the lock here and re-run the loop
1748 * afterwards.
1749 */
1750 spin_unlock_irqrestore(&device_domain_lock, flags);
Joerg Roedel29a27712015-07-21 17:17:12 +02001751 domain_exit(domain);
Joerg Roedelbea64032016-11-08 15:08:26 +01001752 goto again;
1753 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754 }
Joerg Roedel55d94042015-07-22 16:50:40 +02001755 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001756
1757 if (iommu->gcmd & DMA_GCMD_TE)
1758 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001759}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760
Jiang Liuffebeb42014-11-09 22:48:02 +08001761static void free_dmar_iommu(struct intel_iommu *iommu)
1762{
1763 if ((iommu->domains) && (iommu->domain_ids)) {
Wei Yang86f004c2016-05-21 02:41:51 +00001764 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
Joerg Roedel8bf47812015-07-21 10:41:21 +02001765 int i;
1766
1767 for (i = 0; i < elems; i++)
1768 kfree(iommu->domains[i]);
Jiang Liuffebeb42014-11-09 22:48:02 +08001769 kfree(iommu->domains);
1770 kfree(iommu->domain_ids);
1771 iommu->domains = NULL;
1772 iommu->domain_ids = NULL;
1773 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001774
Weidong Hand9630fe2008-12-08 11:06:32 +08001775 g_iommus[iommu->seq_id] = NULL;
1776
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001777 /* free context mapping */
1778 free_context_table(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001779
1780#ifdef CONFIG_INTEL_IOMMU_SVM
David Woodhousea222a7f2015-10-07 23:35:18 +01001781 if (pasid_enabled(iommu)) {
1782 if (ecap_prs(iommu->ecap))
1783 intel_svm_finish_prq(iommu);
David Woodhouse8a94ade2015-03-24 14:54:56 +00001784 intel_svm_free_pasid_tables(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01001785 }
David Woodhouse8a94ade2015-03-24 14:54:56 +00001786#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787}
1788
Jiang Liuab8dfe22014-07-11 14:19:27 +08001789static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792
1793 domain = alloc_domain_mem();
1794 if (!domain)
1795 return NULL;
1796
Jiang Liuab8dfe22014-07-11 14:19:27 +08001797 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001798 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001799 domain->flags = flags;
Omer Peleg0824c592016-04-20 19:03:35 +03001800 domain->has_iotlb_device = false;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001801 INIT_LIST_HEAD(&domain->devices);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001802
1803 return domain;
1804}
1805
Joerg Roedeld160aca2015-07-22 11:52:53 +02001806/* Must be called with iommu->lock */
1807static int domain_attach_iommu(struct dmar_domain *domain,
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001808 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001809{
Jiang Liu44bde612014-07-11 14:19:29 +08001810 unsigned long ndomains;
Joerg Roedel55d94042015-07-22 16:50:40 +02001811 int num;
Jiang Liu44bde612014-07-11 14:19:29 +08001812
Joerg Roedel55d94042015-07-22 16:50:40 +02001813 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001814 assert_spin_locked(&iommu->lock);
Jiang Liu44bde612014-07-11 14:19:29 +08001815
Joerg Roedel29a27712015-07-21 17:17:12 +02001816 domain->iommu_refcnt[iommu->seq_id] += 1;
1817 domain->iommu_count += 1;
1818 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
Jiang Liufb170fb2014-07-11 14:19:28 +08001819 ndomains = cap_ndoms(iommu->cap);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001820 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1821
1822 if (num >= ndomains) {
1823 pr_err("%s: No free domain ids\n", iommu->name);
1824 domain->iommu_refcnt[iommu->seq_id] -= 1;
1825 domain->iommu_count -= 1;
Joerg Roedel55d94042015-07-22 16:50:40 +02001826 return -ENOSPC;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001827 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001828
Joerg Roedeld160aca2015-07-22 11:52:53 +02001829 set_bit(num, iommu->domain_ids);
1830 set_iommu_domain(iommu, num, domain);
Jiang Liufb170fb2014-07-11 14:19:28 +08001831
Joerg Roedeld160aca2015-07-22 11:52:53 +02001832 domain->iommu_did[iommu->seq_id] = num;
1833 domain->nid = iommu->node;
1834
Jiang Liufb170fb2014-07-11 14:19:28 +08001835 domain_update_iommu_cap(domain);
1836 }
Joerg Roedeld160aca2015-07-22 11:52:53 +02001837
Joerg Roedel55d94042015-07-22 16:50:40 +02001838 return 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001839}
1840
1841static int domain_detach_iommu(struct dmar_domain *domain,
1842 struct intel_iommu *iommu)
1843{
Joerg Roedeld160aca2015-07-22 11:52:53 +02001844 int num, count = INT_MAX;
Jiang Liufb170fb2014-07-11 14:19:28 +08001845
Joerg Roedel55d94042015-07-22 16:50:40 +02001846 assert_spin_locked(&device_domain_lock);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001847 assert_spin_locked(&iommu->lock);
Jiang Liufb170fb2014-07-11 14:19:28 +08001848
Joerg Roedel29a27712015-07-21 17:17:12 +02001849 domain->iommu_refcnt[iommu->seq_id] -= 1;
1850 count = --domain->iommu_count;
1851 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02001852 num = domain->iommu_did[iommu->seq_id];
1853 clear_bit(num, iommu->domain_ids);
1854 set_iommu_domain(iommu, num, NULL);
1855
Jiang Liufb170fb2014-07-11 14:19:28 +08001856 domain_update_iommu_cap(domain);
Joerg Roedelc0e8a6c2015-07-21 09:39:46 +02001857 domain->iommu_did[iommu->seq_id] = 0;
Jiang Liufb170fb2014-07-11 14:19:28 +08001858 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001859
1860 return count;
1861}
1862
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001864static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865
Joseph Cihula51a63e62011-03-21 11:04:24 -07001866static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867{
1868 struct pci_dev *pdev = NULL;
1869 struct iova *iova;
1870 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001872 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1873 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874
Mark Gross8a443df2008-03-04 14:59:31 -08001875 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1876 &reserved_rbtree_key);
1877
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878 /* IOAPIC ranges shouldn't be accessed by DMA */
1879 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1880 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001881 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001882 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001883 return -ENODEV;
1884 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885
1886 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1887 for_each_pci_dev(pdev) {
1888 struct resource *r;
1889
1890 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1891 r = &pdev->resource[i];
1892 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1893 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001894 iova = reserve_iova(&reserved_iova_list,
1895 IOVA_PFN(r->start),
1896 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001897 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001898 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001899 return -ENODEV;
1900 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001901 }
1902 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001903 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904}
1905
1906static void domain_reserve_special_ranges(struct dmar_domain *domain)
1907{
1908 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1909}
1910
1911static inline int guestwidth_to_adjustwidth(int gaw)
1912{
1913 int agaw;
1914 int r = (gaw - 12) % 9;
1915
1916 if (r == 0)
1917 agaw = gaw;
1918 else
1919 agaw = gaw + 9 - r;
1920 if (agaw > 64)
1921 agaw = 64;
1922 return agaw;
1923}
1924
Joerg Roedeldc534b22015-07-22 12:44:02 +02001925static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1926 int guest_width)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001927{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 int adjust_width, agaw;
1929 unsigned long sagaw;
1930
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001931 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1932 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001933 domain_reserve_special_ranges(domain);
1934
1935 /* calculate AGAW */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936 if (guest_width > cap_mgaw(iommu->cap))
1937 guest_width = cap_mgaw(iommu->cap);
1938 domain->gaw = guest_width;
1939 adjust_width = guestwidth_to_adjustwidth(guest_width);
1940 agaw = width_to_agaw(adjust_width);
1941 sagaw = cap_sagaw(iommu->cap);
1942 if (!test_bit(agaw, &sagaw)) {
1943 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001944 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 agaw = find_next_bit(&sagaw, 5, agaw);
1946 if (agaw >= 5)
1947 return -ENODEV;
1948 }
1949 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001950
Weidong Han8e6040972008-12-08 15:49:06 +08001951 if (ecap_coherent(iommu->ecap))
1952 domain->iommu_coherency = 1;
1953 else
1954 domain->iommu_coherency = 0;
1955
Sheng Yang58c610b2009-03-18 15:33:05 +08001956 if (ecap_sc_support(iommu->ecap))
1957 domain->iommu_snooping = 1;
1958 else
1959 domain->iommu_snooping = 0;
1960
David Woodhouse214e39a2014-03-19 10:38:49 +00001961 if (intel_iommu_superpage)
1962 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1963 else
1964 domain->iommu_superpage = 0;
1965
Suresh Siddha4c923d42009-10-02 11:01:24 -07001966 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001969 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970 if (!domain->pgd)
1971 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001972 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973 return 0;
1974}
1975
1976static void domain_exit(struct dmar_domain *domain)
1977{
David Woodhouseea8ea462014-03-05 17:09:32 +00001978 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001979
1980 /* Domain 0 is reserved, so dont process it */
1981 if (!domain)
1982 return;
1983
Alex Williamson7b668352011-05-24 12:02:41 +01001984 /* Flush any lazy unmaps that may reference this domain */
Omer Pelegaa473242016-04-20 11:33:02 +03001985 if (!intel_iommu_strict) {
1986 int cpu;
1987
1988 for_each_possible_cpu(cpu)
1989 flush_unmaps_timeout(cpu);
1990 }
Alex Williamson7b668352011-05-24 12:02:41 +01001991
Joerg Roedeld160aca2015-07-22 11:52:53 +02001992 /* Remove associated devices and clear attached or cached domains */
1993 rcu_read_lock();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994 domain_remove_dev_info(domain);
Joerg Roedeld160aca2015-07-22 11:52:53 +02001995 rcu_read_unlock();
Jiang Liu92d03cc2014-02-19 14:07:28 +08001996
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001997 /* destroy iovas */
1998 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999
David Woodhouseea8ea462014-03-05 17:09:32 +00002000 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001
David Woodhouseea8ea462014-03-05 17:09:32 +00002002 dma_free_pagelist(freelist);
2003
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004 free_domain_mem(domain);
2005}
2006
David Woodhouse64ae8922014-03-09 12:52:30 -07002007static int domain_context_mapping_one(struct dmar_domain *domain,
2008 struct intel_iommu *iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002009 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002010{
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002011 u16 did = domain->iommu_did[iommu->seq_id];
Joerg Roedel28ccce02015-07-21 14:45:31 +02002012 int translation = CONTEXT_TT_MULTI_LEVEL;
2013 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002014 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002015 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08002016 struct dma_pte *pgd;
Joerg Roedel55d94042015-07-22 16:50:40 +02002017 int ret, agaw;
Joerg Roedel28ccce02015-07-21 14:45:31 +02002018
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002019 WARN_ON(did == 0);
2020
Joerg Roedel28ccce02015-07-21 14:45:31 +02002021 if (hw_pass_through && domain_type_is_si(domain))
2022 translation = CONTEXT_TT_PASS_THROUGH;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002023
2024 pr_debug("Set context mapping for %02x:%02x.%d\n",
2025 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002026
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08002028
Joerg Roedel55d94042015-07-22 16:50:40 +02002029 spin_lock_irqsave(&device_domain_lock, flags);
2030 spin_lock(&iommu->lock);
2031
2032 ret = -ENOMEM;
David Woodhouse03ecc322015-02-13 14:35:21 +00002033 context = iommu_context_addr(iommu, bus, devfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002034 if (!context)
Joerg Roedel55d94042015-07-22 16:50:40 +02002035 goto out_unlock;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002036
Joerg Roedel55d94042015-07-22 16:50:40 +02002037 ret = 0;
2038 if (context_present(context))
2039 goto out_unlock;
Joerg Roedelcf484d02015-06-12 12:21:46 +02002040
Xunlei Pangaec0e862016-12-05 20:09:07 +08002041 /*
2042 * For kdump cases, old valid entries may be cached due to the
2043 * in-flight DMA and copied pgtable, but there is no unmapping
2044 * behaviour for them, thus we need an explicit cache flush for
2045 * the newly-mapped device. For kdump, at this point, the device
2046 * is supposed to finish reset at its driver probe stage, so no
2047 * in-flight DMA will exist, and we don't need to worry anymore
2048 * hereafter.
2049 */
2050 if (context_copied(context)) {
2051 u16 did_old = context_domain_id(context);
2052
2053 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
2054 iommu->flush.flush_context(iommu, did_old,
2055 (((u16)bus) << 8) | devfn,
2056 DMA_CCMD_MASK_NOBIT,
2057 DMA_CCMD_DEVICE_INVL);
2058 }
2059
Weidong Hanea6606b2008-12-08 23:08:15 +08002060 pgd = domain->pgd;
2061
Joerg Roedelde24e552015-07-21 14:53:04 +02002062 context_clear_entry(context);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002063 context_set_domain_id(context, did);
Weidong Hanea6606b2008-12-08 23:08:15 +08002064
Joerg Roedelde24e552015-07-21 14:53:04 +02002065 /*
2066 * Skip top levels of page tables for iommu which has less agaw
2067 * than default. Unnecessary for PT mode.
2068 */
Yu Zhao93a23a72009-05-18 13:51:37 +08002069 if (translation != CONTEXT_TT_PASS_THROUGH) {
Joerg Roedelde24e552015-07-21 14:53:04 +02002070 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
Joerg Roedel55d94042015-07-22 16:50:40 +02002071 ret = -ENOMEM;
Joerg Roedelde24e552015-07-21 14:53:04 +02002072 pgd = phys_to_virt(dma_pte_addr(pgd));
Joerg Roedel55d94042015-07-22 16:50:40 +02002073 if (!dma_pte_present(pgd))
2074 goto out_unlock;
Joerg Roedelde24e552015-07-21 14:53:04 +02002075 }
2076
David Woodhouse64ae8922014-03-09 12:52:30 -07002077 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002078 if (info && info->ats_supported)
2079 translation = CONTEXT_TT_DEV_IOTLB;
2080 else
2081 translation = CONTEXT_TT_MULTI_LEVEL;
Joerg Roedelde24e552015-07-21 14:53:04 +02002082
Yu Zhao93a23a72009-05-18 13:51:37 +08002083 context_set_address_root(context, virt_to_phys(pgd));
2084 context_set_address_width(context, iommu->agaw);
Joerg Roedelde24e552015-07-21 14:53:04 +02002085 } else {
2086 /*
2087 * In pass through mode, AW must be programmed to
2088 * indicate the largest AGAW value supported by
2089 * hardware. And ASR is ignored by hardware.
2090 */
2091 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08002092 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002093
2094 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00002095 context_set_fault_enable(context);
2096 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08002097 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002098
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002099 /*
2100 * It's a non-present to present mapping. If hardware doesn't cache
2101 * non-present entry we only need to flush the write-buffer. If the
2102 * _does_ cache non-present entries, then it does so in the special
2103 * domain #0, which we have to flush:
2104 */
2105 if (cap_caching_mode(iommu->cap)) {
2106 iommu->flush.flush_context(iommu, 0,
2107 (((u16)bus) << 8) | devfn,
2108 DMA_CCMD_MASK_NOBIT,
2109 DMA_CCMD_DEVICE_INVL);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002110 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002111 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002113 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002114 iommu_enable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08002115
Joerg Roedel55d94042015-07-22 16:50:40 +02002116 ret = 0;
2117
2118out_unlock:
2119 spin_unlock(&iommu->lock);
2120 spin_unlock_irqrestore(&device_domain_lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08002121
Wei Yang5c365d12016-07-13 13:53:21 +00002122 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002123}
2124
Alex Williamson579305f2014-07-03 09:51:43 -06002125struct domain_context_mapping_data {
2126 struct dmar_domain *domain;
2127 struct intel_iommu *iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002128};
2129
2130static int domain_context_mapping_cb(struct pci_dev *pdev,
2131 u16 alias, void *opaque)
2132{
2133 struct domain_context_mapping_data *data = opaque;
2134
2135 return domain_context_mapping_one(data->domain, data->iommu,
Joerg Roedel28ccce02015-07-21 14:45:31 +02002136 PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06002137}
2138
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002139static int
Joerg Roedel28ccce02015-07-21 14:45:31 +02002140domain_context_mapping(struct dmar_domain *domain, struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141{
David Woodhouse64ae8922014-03-09 12:52:30 -07002142 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002143 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06002144 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145
David Woodhousee1f167f2014-03-09 15:24:46 -07002146 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07002147 if (!iommu)
2148 return -ENODEV;
2149
Alex Williamson579305f2014-07-03 09:51:43 -06002150 if (!dev_is_pci(dev))
Joerg Roedel28ccce02015-07-21 14:45:31 +02002151 return domain_context_mapping_one(domain, iommu, bus, devfn);
Alex Williamson579305f2014-07-03 09:51:43 -06002152
2153 data.domain = domain;
2154 data.iommu = iommu;
Alex Williamson579305f2014-07-03 09:51:43 -06002155
2156 return pci_for_each_dma_alias(to_pci_dev(dev),
2157 &domain_context_mapping_cb, &data);
2158}
2159
2160static int domain_context_mapped_cb(struct pci_dev *pdev,
2161 u16 alias, void *opaque)
2162{
2163 struct intel_iommu *iommu = opaque;
2164
2165 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002166}
2167
David Woodhousee1f167f2014-03-09 15:24:46 -07002168static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002169{
Weidong Han5331fe62008-12-08 23:00:00 +08002170 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002171 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002172
David Woodhousee1f167f2014-03-09 15:24:46 -07002173 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002174 if (!iommu)
2175 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176
Alex Williamson579305f2014-07-03 09:51:43 -06002177 if (!dev_is_pci(dev))
2178 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002179
Alex Williamson579305f2014-07-03 09:51:43 -06002180 return !pci_for_each_dma_alias(to_pci_dev(dev),
2181 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182}
2183
Fenghua Yuf5329592009-08-04 15:09:37 -07002184/* Returns a number of VTD pages, but aligned to MM page size */
2185static inline unsigned long aligned_nrpages(unsigned long host_addr,
2186 size_t size)
2187{
2188 host_addr &= ~PAGE_MASK;
2189 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2190}
2191
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002192/* Return largest possible superpage level for a given mapping */
2193static inline int hardware_largepage_caps(struct dmar_domain *domain,
2194 unsigned long iov_pfn,
2195 unsigned long phy_pfn,
2196 unsigned long pages)
2197{
2198 int support, level = 1;
2199 unsigned long pfnmerge;
2200
2201 support = domain->iommu_superpage;
2202
2203 /* To use a large page, the virtual *and* physical addresses
2204 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2205 of them will mean we have to use smaller pages. So just
2206 merge them and check both at once. */
2207 pfnmerge = iov_pfn | phy_pfn;
2208
2209 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2210 pages >>= VTD_STRIDE_SHIFT;
2211 if (!pages)
2212 break;
2213 pfnmerge >>= VTD_STRIDE_SHIFT;
2214 level++;
2215 support--;
2216 }
2217 return level;
2218}
2219
David Woodhouse9051aa02009-06-29 12:30:54 +01002220static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2221 struct scatterlist *sg, unsigned long phys_pfn,
2222 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002223{
2224 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002225 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002226 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002227 unsigned int largepage_lvl = 0;
2228 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002229
Jiang Liu162d1b12014-07-11 14:19:35 +08002230 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002231
2232 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2233 return -EINVAL;
2234
2235 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2236
Jiang Liucc4f14a2014-11-26 09:42:10 +08002237 if (!sg) {
2238 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002239 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2240 }
2241
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002242 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002243 uint64_t tmp;
2244
David Woodhousee1605492009-06-29 11:17:38 +01002245 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002246 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002247 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2248 sg->dma_length = sg->length;
Dan Williams3e6110f2015-12-15 12:54:06 -08002249 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002250 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002251 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002252
David Woodhousee1605492009-06-29 11:17:38 +01002253 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002254 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2255
David Woodhouse5cf0a762014-03-19 16:07:49 +00002256 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002257 if (!pte)
2258 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002259 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002260 if (largepage_lvl > 1) {
Christian Zanderba2374f2015-06-10 09:41:45 -07002261 unsigned long nr_superpages, end_pfn;
2262
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002263 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002264 lvl_pages = lvl_to_nr_pages(largepage_lvl);
Christian Zanderba2374f2015-06-10 09:41:45 -07002265
2266 nr_superpages = sg_res / lvl_pages;
2267 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2268
Jiang Liud41a4ad2014-07-11 14:19:34 +08002269 /*
2270 * Ensure that old small page tables are
Christian Zanderba2374f2015-06-10 09:41:45 -07002271 * removed to make room for superpage(s).
Jiang Liud41a4ad2014-07-11 14:19:34 +08002272 */
Christian Zanderba2374f2015-06-10 09:41:45 -07002273 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002274 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002275 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002276 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002277
David Woodhousee1605492009-06-29 11:17:38 +01002278 }
2279 /* We don't need lock here, nobody else
2280 * touches the iova range
2281 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002282 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002283 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002284 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002285 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2286 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002287 if (dumps) {
2288 dumps--;
2289 debug_dma_dump_mappings(NULL);
2290 }
2291 WARN_ON(1);
2292 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002293
2294 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2295
2296 BUG_ON(nr_pages < lvl_pages);
2297 BUG_ON(sg_res < lvl_pages);
2298
2299 nr_pages -= lvl_pages;
2300 iov_pfn += lvl_pages;
2301 phys_pfn += lvl_pages;
2302 pteval += lvl_pages * VTD_PAGE_SIZE;
2303 sg_res -= lvl_pages;
2304
2305 /* If the next PTE would be the first in a new page, then we
2306 need to flush the cache on the entries we've just written.
2307 And then we'll need to recalculate 'pte', so clear it and
2308 let it get set again in the if (!pte) block above.
2309
2310 If we're done (!nr_pages) we need to flush the cache too.
2311
2312 Also if we've been setting superpages, we may need to
2313 recalculate 'pte' and switch back to smaller pages for the
2314 end of the mapping, if the trailing size is not enough to
2315 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002316 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002317 if (!nr_pages || first_pte_in_page(pte) ||
2318 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002319 domain_flush_cache(domain, first_pte,
2320 (void *)pte - (void *)first_pte);
2321 pte = NULL;
2322 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002323
2324 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002325 sg = sg_next(sg);
2326 }
2327 return 0;
2328}
2329
David Woodhouse9051aa02009-06-29 12:30:54 +01002330static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2331 struct scatterlist *sg, unsigned long nr_pages,
2332 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002333{
David Woodhouse9051aa02009-06-29 12:30:54 +01002334 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2335}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002336
David Woodhouse9051aa02009-06-29 12:30:54 +01002337static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2338 unsigned long phys_pfn, unsigned long nr_pages,
2339 int prot)
2340{
2341 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002342}
2343
Joerg Roedel2452d9d2015-07-23 16:20:14 +02002344static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002345{
Weidong Hanc7151a82008-12-08 22:51:37 +08002346 if (!iommu)
2347 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002348
2349 clear_context_table(iommu, bus, devfn);
2350 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002351 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002352 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002353}
2354
David Woodhouse109b9b02012-05-25 17:43:02 +01002355static inline void unlink_domain_info(struct device_domain_info *info)
2356{
2357 assert_spin_locked(&device_domain_lock);
2358 list_del(&info->link);
2359 list_del(&info->global);
2360 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002361 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002362}
2363
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002364static void domain_remove_dev_info(struct dmar_domain *domain)
2365{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002366 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002367 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002368
2369 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel76f45fe2015-07-21 18:25:11 +02002370 list_for_each_entry_safe(info, tmp, &domain->devices, link)
Joerg Roedel127c7612015-07-23 17:44:46 +02002371 __dmar_remove_one_dev_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002372 spin_unlock_irqrestore(&device_domain_lock, flags);
2373}
2374
2375/*
2376 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002377 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 */
David Woodhouse1525a292014-03-06 16:19:30 +00002379static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380{
2381 struct device_domain_info *info;
2382
2383 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002384 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385 if (info)
2386 return info->domain;
2387 return NULL;
2388}
2389
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002390static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002391dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2392{
2393 struct device_domain_info *info;
2394
2395 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002396 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002397 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002398 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002399
2400 return NULL;
2401}
2402
Joerg Roedel5db31562015-07-22 12:40:43 +02002403static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2404 int bus, int devfn,
2405 struct device *dev,
2406 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002407{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002408 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002409 struct device_domain_info *info;
2410 unsigned long flags;
Joerg Roedeld160aca2015-07-22 11:52:53 +02002411 int ret;
Jiang Liu745f2582014-02-19 14:07:26 +08002412
2413 info = alloc_devinfo_mem();
2414 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002415 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002416
Jiang Liu745f2582014-02-19 14:07:26 +08002417 info->bus = bus;
2418 info->devfn = devfn;
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002419 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2420 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2421 info->ats_qdep = 0;
Jiang Liu745f2582014-02-19 14:07:26 +08002422 info->dev = dev;
2423 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002424 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002425
David Woodhouseb16d0cb2015-10-12 14:17:37 +01002426 if (dev && dev_is_pci(dev)) {
2427 struct pci_dev *pdev = to_pci_dev(info->dev);
2428
2429 if (ecap_dev_iotlb_support(iommu->ecap) &&
2430 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2431 dmar_find_matched_atsr_unit(pdev))
2432 info->ats_supported = 1;
2433
2434 if (ecs_enabled(iommu)) {
2435 if (pasid_enabled(iommu)) {
2436 int features = pci_pasid_features(pdev);
2437 if (features >= 0)
2438 info->pasid_supported = features | 1;
2439 }
2440
2441 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2442 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2443 info->pri_supported = 1;
2444 }
2445 }
2446
Jiang Liu745f2582014-02-19 14:07:26 +08002447 spin_lock_irqsave(&device_domain_lock, flags);
2448 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002449 found = find_domain(dev);
Joerg Roedelf303e502015-07-23 18:37:13 +02002450
2451 if (!found) {
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002452 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002453 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
Joerg Roedelf303e502015-07-23 18:37:13 +02002454 if (info2) {
2455 found = info2->domain;
2456 info2->dev = dev;
2457 }
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002458 }
Joerg Roedelf303e502015-07-23 18:37:13 +02002459
Jiang Liu745f2582014-02-19 14:07:26 +08002460 if (found) {
2461 spin_unlock_irqrestore(&device_domain_lock, flags);
2462 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002463 /* Caller must free the original domain */
2464 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002465 }
2466
Joerg Roedeld160aca2015-07-22 11:52:53 +02002467 spin_lock(&iommu->lock);
2468 ret = domain_attach_iommu(domain, iommu);
2469 spin_unlock(&iommu->lock);
2470
2471 if (ret) {
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002472 spin_unlock_irqrestore(&device_domain_lock, flags);
Sudip Mukherjee499f3aa2015-09-18 16:27:07 +05302473 free_devinfo_mem(info);
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002474 return NULL;
2475 }
Joerg Roedelc6c2ceb2015-07-22 13:11:53 +02002476
David Woodhouseb718cd32014-03-09 13:11:33 -07002477 list_add(&info->link, &domain->devices);
2478 list_add(&info->global, &device_domain_list);
2479 if (dev)
2480 dev->archdata.iommu = info;
2481 spin_unlock_irqrestore(&device_domain_lock, flags);
2482
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002483 if (dev && domain_context_mapping(domain, dev)) {
2484 pr_err("Domain context map for %s failed\n", dev_name(dev));
Joerg Roedele6de0f82015-07-22 16:30:36 +02002485 dmar_remove_one_dev_info(domain, dev);
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002486 return NULL;
2487 }
2488
David Woodhouseb718cd32014-03-09 13:11:33 -07002489 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002490}
2491
Alex Williamson579305f2014-07-03 09:51:43 -06002492static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2493{
2494 *(u16 *)opaque = alias;
2495 return 0;
2496}
2497
Joerg Roedel76208352016-08-25 14:25:12 +02002498static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002499{
Joerg Roedelcc4e2572015-07-22 10:04:36 +02002500 struct device_domain_info *info = NULL;
Joerg Roedel76208352016-08-25 14:25:12 +02002501 struct dmar_domain *domain = NULL;
Alex Williamson579305f2014-07-03 09:51:43 -06002502 struct intel_iommu *iommu;
Joerg Roedel08a7f452015-07-23 18:09:11 +02002503 u16 req_id, dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002504 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002505 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002506
David Woodhouse146922e2014-03-09 15:44:17 -07002507 iommu = device_to_iommu(dev, &bus, &devfn);
2508 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002509 return NULL;
2510
Joerg Roedel08a7f452015-07-23 18:09:11 +02002511 req_id = ((u16)bus << 8) | devfn;
2512
Alex Williamson579305f2014-07-03 09:51:43 -06002513 if (dev_is_pci(dev)) {
2514 struct pci_dev *pdev = to_pci_dev(dev);
2515
2516 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2517
2518 spin_lock_irqsave(&device_domain_lock, flags);
2519 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2520 PCI_BUS_NUM(dma_alias),
2521 dma_alias & 0xff);
2522 if (info) {
2523 iommu = info->iommu;
2524 domain = info->domain;
2525 }
2526 spin_unlock_irqrestore(&device_domain_lock, flags);
2527
Joerg Roedel76208352016-08-25 14:25:12 +02002528 /* DMA alias already has a domain, use it */
Alex Williamson579305f2014-07-03 09:51:43 -06002529 if (info)
Joerg Roedel76208352016-08-25 14:25:12 +02002530 goto out;
Alex Williamson579305f2014-07-03 09:51:43 -06002531 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002532
David Woodhouse146922e2014-03-09 15:44:17 -07002533 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002534 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002535 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002536 return NULL;
Joerg Roedeldc534b22015-07-22 12:44:02 +02002537 if (domain_init(domain, iommu, gaw)) {
Alex Williamson579305f2014-07-03 09:51:43 -06002538 domain_exit(domain);
2539 return NULL;
2540 }
2541
Joerg Roedel76208352016-08-25 14:25:12 +02002542out:
Alex Williamson579305f2014-07-03 09:51:43 -06002543
Joerg Roedel76208352016-08-25 14:25:12 +02002544 return domain;
2545}
2546
2547static struct dmar_domain *set_domain_for_dev(struct device *dev,
2548 struct dmar_domain *domain)
2549{
2550 struct intel_iommu *iommu;
2551 struct dmar_domain *tmp;
2552 u16 req_id, dma_alias;
2553 u8 bus, devfn;
2554
2555 iommu = device_to_iommu(dev, &bus, &devfn);
2556 if (!iommu)
2557 return NULL;
2558
2559 req_id = ((u16)bus << 8) | devfn;
2560
2561 if (dev_is_pci(dev)) {
2562 struct pci_dev *pdev = to_pci_dev(dev);
2563
2564 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2565
2566 /* register PCI DMA alias device */
2567 if (req_id != dma_alias) {
2568 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2569 dma_alias & 0xff, NULL, domain);
2570
2571 if (!tmp || tmp != domain)
2572 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002573 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002574 }
2575
Joerg Roedel5db31562015-07-22 12:40:43 +02002576 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
Joerg Roedel76208352016-08-25 14:25:12 +02002577 if (!tmp || tmp != domain)
2578 return tmp;
Alex Williamson579305f2014-07-03 09:51:43 -06002579
Joerg Roedel76208352016-08-25 14:25:12 +02002580 return domain;
2581}
2582
2583static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2584{
2585 struct dmar_domain *domain, *tmp;
2586
2587 domain = find_domain(dev);
2588 if (domain)
2589 goto out;
2590
2591 domain = find_or_alloc_domain(dev, gaw);
2592 if (!domain)
2593 goto out;
2594
2595 tmp = set_domain_for_dev(dev, domain);
2596 if (!tmp || domain != tmp) {
Alex Williamson579305f2014-07-03 09:51:43 -06002597 domain_exit(domain);
2598 domain = tmp;
2599 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002600
Joerg Roedel76208352016-08-25 14:25:12 +02002601out:
2602
David Woodhouseb718cd32014-03-09 13:11:33 -07002603 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604}
2605
David Woodhouseb2132032009-06-26 18:50:28 +01002606static int iommu_domain_identity_map(struct dmar_domain *domain,
2607 unsigned long long start,
2608 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002609{
David Woodhousec5395d52009-06-28 16:35:56 +01002610 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2611 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002612
David Woodhousec5395d52009-06-28 16:35:56 +01002613 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2614 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002615 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002616 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002617 }
2618
Joerg Roedelaf1089c2015-07-21 15:45:19 +02002619 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002620 /*
2621 * RMRR range might have overlap with physical memory range,
2622 * clear it first
2623 */
David Woodhousec5395d52009-06-28 16:35:56 +01002624 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002625
David Woodhousec5395d52009-06-28 16:35:56 +01002626 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2627 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002628 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002629}
2630
Joerg Roedeld66ce542015-09-23 19:00:10 +02002631static int domain_prepare_identity_map(struct device *dev,
2632 struct dmar_domain *domain,
2633 unsigned long long start,
2634 unsigned long long end)
David Woodhouseb2132032009-06-26 18:50:28 +01002635{
David Woodhouse19943b02009-08-04 16:19:20 +01002636 /* For _hardware_ passthrough, don't bother. But for software
2637 passthrough, we do it anyway -- it may indicate a memory
2638 range which is reserved in E820, so which didn't get set
2639 up to start with in si_domain */
2640 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002641 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2642 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002643 return 0;
2644 }
2645
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002646 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2647 dev_name(dev), start, end);
2648
David Woodhouse5595b522009-12-02 09:21:55 +00002649 if (end < start) {
2650 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2651 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2652 dmi_get_system_info(DMI_BIOS_VENDOR),
2653 dmi_get_system_info(DMI_BIOS_VERSION),
2654 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002655 return -EIO;
David Woodhouse5595b522009-12-02 09:21:55 +00002656 }
2657
David Woodhouse2ff729f2009-08-26 14:25:41 +01002658 if (end >> agaw_to_width(domain->agaw)) {
2659 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2660 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2661 agaw_to_width(domain->agaw),
2662 dmi_get_system_info(DMI_BIOS_VENDOR),
2663 dmi_get_system_info(DMI_BIOS_VERSION),
2664 dmi_get_system_info(DMI_PRODUCT_VERSION));
Joerg Roedeld66ce542015-09-23 19:00:10 +02002665 return -EIO;
David Woodhouse2ff729f2009-08-26 14:25:41 +01002666 }
David Woodhouse19943b02009-08-04 16:19:20 +01002667
Joerg Roedeld66ce542015-09-23 19:00:10 +02002668 return iommu_domain_identity_map(domain, start, end);
2669}
2670
2671static int iommu_prepare_identity_map(struct device *dev,
2672 unsigned long long start,
2673 unsigned long long end)
2674{
2675 struct dmar_domain *domain;
2676 int ret;
2677
2678 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2679 if (!domain)
2680 return -ENOMEM;
2681
2682 ret = domain_prepare_identity_map(dev, domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002683 if (ret)
Joerg Roedeld66ce542015-09-23 19:00:10 +02002684 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002685
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002686 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002687}
2688
2689static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002690 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002691{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002692 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002693 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002694 return iommu_prepare_identity_map(dev, rmrr->base_address,
2695 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002696}
2697
Suresh Siddhad3f13812011-08-23 17:05:25 -07002698#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002699static inline void iommu_prepare_isa(void)
2700{
2701 struct pci_dev *pdev;
2702 int ret;
2703
2704 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2705 if (!pdev)
2706 return;
2707
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002708 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002709 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002710
2711 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002712 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002713
Yijing Wang9b27e822014-05-20 20:37:52 +08002714 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002715}
2716#else
2717static inline void iommu_prepare_isa(void)
2718{
2719 return;
2720}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002721#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002722
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002723static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002724
Matt Kraai071e1372009-08-23 22:30:22 -07002725static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002726{
David Woodhousec7ab48d2009-06-26 19:10:36 +01002727 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002728
Jiang Liuab8dfe22014-07-11 14:19:27 +08002729 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002730 if (!si_domain)
2731 return -EFAULT;
2732
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002733 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2734 domain_exit(si_domain);
2735 return -EFAULT;
2736 }
2737
Joerg Roedel0dc79712015-07-21 15:40:06 +02002738 pr_debug("Identity mapping domain allocated\n");
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002739
David Woodhouse19943b02009-08-04 16:19:20 +01002740 if (hw)
2741 return 0;
2742
David Woodhousec7ab48d2009-06-26 19:10:36 +01002743 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002744 unsigned long start_pfn, end_pfn;
2745 int i;
2746
2747 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2748 ret = iommu_domain_identity_map(si_domain,
2749 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2750 if (ret)
2751 return ret;
2752 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002753 }
2754
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755 return 0;
2756}
2757
David Woodhouse9b226622014-03-09 14:03:28 -07002758static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759{
2760 struct device_domain_info *info;
2761
2762 if (likely(!iommu_identity_mapping))
2763 return 0;
2764
David Woodhouse9b226622014-03-09 14:03:28 -07002765 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002766 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2767 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002769 return 0;
2770}
2771
Joerg Roedel28ccce02015-07-21 14:45:31 +02002772static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002773{
David Woodhouse0ac72662014-03-09 13:19:22 -07002774 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002775 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002776 u8 bus, devfn;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002777
David Woodhouse5913c9b2014-03-09 16:27:31 -07002778 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002779 if (!iommu)
2780 return -ENODEV;
2781
Joerg Roedel5db31562015-07-22 12:40:43 +02002782 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002783 if (ndomain != domain)
2784 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002785
2786 return 0;
2787}
2788
David Woodhouse0b9d9752014-03-09 15:48:15 -07002789static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002790{
2791 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002792 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002793 int i;
2794
Jiang Liu0e242612014-02-19 14:07:34 +08002795 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002796 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002797 /*
2798 * Return TRUE if this RMRR contains the device that
2799 * is passed in.
2800 */
2801 for_each_active_dev_scope(rmrr->devices,
2802 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002803 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002804 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002805 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002806 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002807 }
Jiang Liu0e242612014-02-19 14:07:34 +08002808 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002809 return false;
2810}
2811
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002812/*
2813 * There are a couple cases where we need to restrict the functionality of
2814 * devices associated with RMRRs. The first is when evaluating a device for
2815 * identity mapping because problems exist when devices are moved in and out
2816 * of domains and their respective RMRR information is lost. This means that
2817 * a device with associated RMRRs will never be in a "passthrough" domain.
2818 * The second is use of the device through the IOMMU API. This interface
2819 * expects to have full control of the IOVA space for the device. We cannot
2820 * satisfy both the requirement that RMRR access is maintained and have an
2821 * unencumbered IOVA space. We also have no ability to quiesce the device's
2822 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2823 * We therefore prevent devices associated with an RMRR from participating in
2824 * the IOMMU API, which eliminates them from device assignment.
2825 *
2826 * In both cases we assume that PCI USB devices with RMRRs have them largely
2827 * for historical reasons and that the RMRR space is not actively used post
2828 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002829 *
2830 * The same exception is made for graphics devices, with the requirement that
2831 * any use of the RMRR regions will be torn down before assigning the device
2832 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002833 */
2834static bool device_is_rmrr_locked(struct device *dev)
2835{
2836 if (!device_has_rmrr(dev))
2837 return false;
2838
2839 if (dev_is_pci(dev)) {
2840 struct pci_dev *pdev = to_pci_dev(dev);
2841
David Woodhouse18436af2015-03-25 15:05:47 +00002842 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002843 return false;
2844 }
2845
2846 return true;
2847}
2848
David Woodhouse3bdb2592014-03-09 16:03:08 -07002849static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002850{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002851
David Woodhouse3bdb2592014-03-09 16:03:08 -07002852 if (dev_is_pci(dev)) {
2853 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002854
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002855 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002856 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002857
David Woodhouse3bdb2592014-03-09 16:03:08 -07002858 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2859 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002860
David Woodhouse3bdb2592014-03-09 16:03:08 -07002861 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2862 return 1;
2863
2864 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2865 return 0;
2866
2867 /*
2868 * We want to start off with all devices in the 1:1 domain, and
2869 * take them out later if we find they can't access all of memory.
2870 *
2871 * However, we can't do this for PCI devices behind bridges,
2872 * because all PCI devices behind the same bridge will end up
2873 * with the same source-id on their transactions.
2874 *
2875 * Practically speaking, we can't change things around for these
2876 * devices at run-time, because we can't be sure there'll be no
2877 * DMA transactions in flight for any of their siblings.
2878 *
2879 * So PCI devices (unless they're on the root bus) as well as
2880 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2881 * the 1:1 domain, just in _case_ one of their siblings turns out
2882 * not to be able to map all of memory.
2883 */
2884 if (!pci_is_pcie(pdev)) {
2885 if (!pci_is_root_bus(pdev->bus))
2886 return 0;
2887 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2888 return 0;
2889 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2890 return 0;
2891 } else {
2892 if (device_has_rmrr(dev))
2893 return 0;
2894 }
David Woodhouse6941af22009-07-04 18:24:27 +01002895
David Woodhouse3dfc8132009-07-04 19:11:08 +01002896 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002897 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002898 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002899 * take them out of the 1:1 domain later.
2900 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002901 if (!startup) {
2902 /*
2903 * If the device's dma_mask is less than the system's memory
2904 * size then this is not a candidate for identity mapping.
2905 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002906 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002907
David Woodhouse3bdb2592014-03-09 16:03:08 -07002908 if (dev->coherent_dma_mask &&
2909 dev->coherent_dma_mask < dma_mask)
2910 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002911
David Woodhouse3bdb2592014-03-09 16:03:08 -07002912 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002913 }
David Woodhouse6941af22009-07-04 18:24:27 +01002914
2915 return 1;
2916}
2917
David Woodhousecf04eee2014-03-21 16:49:04 +00002918static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2919{
2920 int ret;
2921
2922 if (!iommu_should_identity_map(dev, 1))
2923 return 0;
2924
Joerg Roedel28ccce02015-07-21 14:45:31 +02002925 ret = domain_add_dev_info(si_domain, dev);
David Woodhousecf04eee2014-03-21 16:49:04 +00002926 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002927 pr_info("%s identity mapping for device %s\n",
2928 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002929 else if (ret == -ENODEV)
2930 /* device not associated with an iommu */
2931 ret = 0;
2932
2933 return ret;
2934}
2935
2936
Matt Kraai071e1372009-08-23 22:30:22 -07002937static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002938{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002939 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002940 struct dmar_drhd_unit *drhd;
2941 struct intel_iommu *iommu;
2942 struct device *dev;
2943 int i;
2944 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002945
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002946 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002947 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2948 if (ret)
2949 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002950 }
2951
David Woodhousecf04eee2014-03-21 16:49:04 +00002952 for_each_active_iommu(iommu, drhd)
2953 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2954 struct acpi_device_physical_node *pn;
2955 struct acpi_device *adev;
2956
2957 if (dev->bus != &acpi_bus_type)
2958 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002959
David Woodhousecf04eee2014-03-21 16:49:04 +00002960 adev= to_acpi_device(dev);
2961 mutex_lock(&adev->physical_node_lock);
2962 list_for_each_entry(pn, &adev->physical_node_list, node) {
2963 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2964 if (ret)
2965 break;
2966 }
2967 mutex_unlock(&adev->physical_node_lock);
2968 if (ret)
2969 return ret;
2970 }
2971
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002972 return 0;
2973}
2974
Jiang Liuffebeb42014-11-09 22:48:02 +08002975static void intel_iommu_init_qi(struct intel_iommu *iommu)
2976{
2977 /*
2978 * Start from the sane iommu hardware state.
2979 * If the queued invalidation is already initialized by us
2980 * (for example, while enabling interrupt-remapping) then
2981 * we got the things already rolling from a sane state.
2982 */
2983 if (!iommu->qi) {
2984 /*
2985 * Clear any previous faults.
2986 */
2987 dmar_fault(-1, iommu);
2988 /*
2989 * Disable queued invalidation if supported and already enabled
2990 * before OS handover.
2991 */
2992 dmar_disable_qi(iommu);
2993 }
2994
2995 if (dmar_enable_qi(iommu)) {
2996 /*
2997 * Queued Invalidate not enabled, use Register Based Invalidate
2998 */
2999 iommu->flush.flush_context = __iommu_flush_context;
3000 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003001 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08003002 iommu->name);
3003 } else {
3004 iommu->flush.flush_context = qi_flush_context;
3005 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003006 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08003007 }
3008}
3009
Joerg Roedel091d42e2015-06-12 11:56:10 +02003010static int copy_context_table(struct intel_iommu *iommu,
Dan Williamsdfddb9692015-10-09 18:16:46 -04003011 struct root_entry *old_re,
Joerg Roedel091d42e2015-06-12 11:56:10 +02003012 struct context_entry **tbl,
3013 int bus, bool ext)
3014{
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003015 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003016 struct context_entry *new_ce = NULL, ce;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003017 struct context_entry *old_ce = NULL;
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003018 struct root_entry re;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003019 phys_addr_t old_ce_phys;
3020
3021 tbl_idx = ext ? bus * 2 : bus;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003022 memcpy(&re, old_re, sizeof(re));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003023
3024 for (devfn = 0; devfn < 256; devfn++) {
3025 /* First calculate the correct index */
3026 idx = (ext ? devfn * 2 : devfn) % 256;
3027
3028 if (idx == 0) {
3029 /* First save what we may have and clean up */
3030 if (new_ce) {
3031 tbl[tbl_idx] = new_ce;
3032 __iommu_flush_cache(iommu, new_ce,
3033 VTD_PAGE_SIZE);
3034 pos = 1;
3035 }
3036
3037 if (old_ce)
3038 iounmap(old_ce);
3039
3040 ret = 0;
3041 if (devfn < 0x80)
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003042 old_ce_phys = root_entry_lctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003043 else
Joerg Roedel543c8dc2015-08-13 11:56:59 +02003044 old_ce_phys = root_entry_uctp(&re);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003045
3046 if (!old_ce_phys) {
3047 if (ext && devfn == 0) {
3048 /* No LCTP, try UCTP */
3049 devfn = 0x7f;
3050 continue;
3051 } else {
3052 goto out;
3053 }
3054 }
3055
3056 ret = -ENOMEM;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003057 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3058 MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003059 if (!old_ce)
3060 goto out;
3061
3062 new_ce = alloc_pgtable_page(iommu->node);
3063 if (!new_ce)
3064 goto out_unmap;
3065
3066 ret = 0;
3067 }
3068
3069 /* Now copy the context entry */
Dan Williamsdfddb9692015-10-09 18:16:46 -04003070 memcpy(&ce, old_ce + idx, sizeof(ce));
Joerg Roedel091d42e2015-06-12 11:56:10 +02003071
Joerg Roedelcf484d02015-06-12 12:21:46 +02003072 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02003073 continue;
3074
Joerg Roedeldbcd8612015-06-12 12:02:09 +02003075 did = context_domain_id(&ce);
3076 if (did >= 0 && did < cap_ndoms(iommu->cap))
3077 set_bit(did, iommu->domain_ids);
3078
Joerg Roedelcf484d02015-06-12 12:21:46 +02003079 /*
3080 * We need a marker for copied context entries. This
3081 * marker needs to work for the old format as well as
3082 * for extended context entries.
3083 *
3084 * Bit 67 of the context entry is used. In the old
3085 * format this bit is available to software, in the
3086 * extended format it is the PGE bit, but PGE is ignored
3087 * by HW if PASIDs are disabled (and thus still
3088 * available).
3089 *
3090 * So disable PASIDs first and then mark the entry
3091 * copied. This means that we don't copy PASID
3092 * translations from the old kernel, but this is fine as
3093 * faults there are not fatal.
3094 */
3095 context_clear_pasid_enable(&ce);
3096 context_set_copied(&ce);
3097
Joerg Roedel091d42e2015-06-12 11:56:10 +02003098 new_ce[idx] = ce;
3099 }
3100
3101 tbl[tbl_idx + pos] = new_ce;
3102
3103 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3104
3105out_unmap:
Dan Williamsdfddb9692015-10-09 18:16:46 -04003106 memunmap(old_ce);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003107
3108out:
3109 return ret;
3110}
3111
3112static int copy_translation_tables(struct intel_iommu *iommu)
3113{
3114 struct context_entry **ctxt_tbls;
Dan Williamsdfddb9692015-10-09 18:16:46 -04003115 struct root_entry *old_rt;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003116 phys_addr_t old_rt_phys;
3117 int ctxt_table_entries;
3118 unsigned long flags;
3119 u64 rtaddr_reg;
3120 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02003121 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003122
3123 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3124 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02003125 new_ext = !!ecap_ecs(iommu->ecap);
3126
3127 /*
3128 * The RTT bit can only be changed when translation is disabled,
3129 * but disabling translation means to open a window for data
3130 * corruption. So bail out and don't copy anything if we would
3131 * have to change the bit.
3132 */
3133 if (new_ext != ext)
3134 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003135
3136 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3137 if (!old_rt_phys)
3138 return -EINVAL;
3139
Dan Williamsdfddb9692015-10-09 18:16:46 -04003140 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003141 if (!old_rt)
3142 return -ENOMEM;
3143
3144 /* This is too big for the stack - allocate it from slab */
3145 ctxt_table_entries = ext ? 512 : 256;
3146 ret = -ENOMEM;
3147 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3148 if (!ctxt_tbls)
3149 goto out_unmap;
3150
3151 for (bus = 0; bus < 256; bus++) {
3152 ret = copy_context_table(iommu, &old_rt[bus],
3153 ctxt_tbls, bus, ext);
3154 if (ret) {
3155 pr_err("%s: Failed to copy context table for bus %d\n",
3156 iommu->name, bus);
3157 continue;
3158 }
3159 }
3160
3161 spin_lock_irqsave(&iommu->lock, flags);
3162
3163 /* Context tables are copied, now write them to the root_entry table */
3164 for (bus = 0; bus < 256; bus++) {
3165 int idx = ext ? bus * 2 : bus;
3166 u64 val;
3167
3168 if (ctxt_tbls[idx]) {
3169 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3170 iommu->root_entry[bus].lo = val;
3171 }
3172
3173 if (!ext || !ctxt_tbls[idx + 1])
3174 continue;
3175
3176 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3177 iommu->root_entry[bus].hi = val;
3178 }
3179
3180 spin_unlock_irqrestore(&iommu->lock, flags);
3181
3182 kfree(ctxt_tbls);
3183
3184 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3185
3186 ret = 0;
3187
3188out_unmap:
Dan Williamsdfddb9692015-10-09 18:16:46 -04003189 memunmap(old_rt);
Joerg Roedel091d42e2015-06-12 11:56:10 +02003190
3191 return ret;
3192}
3193
Joseph Cihulab7792602011-05-03 00:08:37 -07003194static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003195{
3196 struct dmar_drhd_unit *drhd;
3197 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003198 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003199 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003200 struct intel_iommu *iommu;
Omer Pelegaa473242016-04-20 11:33:02 +03003201 int i, ret, cpu;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003202
3203 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003204 * for each drhd
3205 * allocate root
3206 * initialize and program root entry to not present
3207 * endfor
3208 */
3209 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003210 /*
3211 * lock not needed as this is only incremented in the single
3212 * threaded kernel __init code path all other access are read
3213 * only
3214 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003215 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003216 g_num_of_iommus++;
3217 continue;
3218 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003219 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003220 }
3221
Jiang Liuffebeb42014-11-09 22:48:02 +08003222 /* Preallocate enough resources for IOMMU hot-addition */
3223 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3224 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3225
Weidong Hand9630fe2008-12-08 11:06:32 +08003226 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3227 GFP_KERNEL);
3228 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003229 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003230 ret = -ENOMEM;
3231 goto error;
3232 }
3233
Omer Pelegaa473242016-04-20 11:33:02 +03003234 for_each_possible_cpu(cpu) {
3235 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3236 cpu);
3237
3238 dfd->tables = kzalloc(g_num_of_iommus *
3239 sizeof(struct deferred_flush_table),
3240 GFP_KERNEL);
3241 if (!dfd->tables) {
3242 ret = -ENOMEM;
3243 goto free_g_iommus;
3244 }
3245
3246 spin_lock_init(&dfd->lock);
3247 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
mark gross5e0d2a62008-03-04 15:22:08 -08003248 }
3249
Jiang Liu7c919772014-01-06 14:18:18 +08003250 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003251 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003253 intel_iommu_init_qi(iommu);
3254
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003255 ret = iommu_init_domains(iommu);
3256 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003257 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003258
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003259 init_translation_status(iommu);
3260
Joerg Roedel091d42e2015-06-12 11:56:10 +02003261 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3262 iommu_disable_translation(iommu);
3263 clear_translation_pre_enabled(iommu);
3264 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3265 iommu->name);
3266 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003267
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003268 /*
3269 * TBD:
3270 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003271 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003272 */
3273 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003274 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003275 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003276
Joerg Roedel091d42e2015-06-12 11:56:10 +02003277 if (translation_pre_enabled(iommu)) {
3278 pr_info("Translation already enabled - trying to copy translation structures\n");
3279
3280 ret = copy_translation_tables(iommu);
3281 if (ret) {
3282 /*
3283 * We found the IOMMU with translation
3284 * enabled - but failed to copy over the
3285 * old root-entry table. Try to proceed
3286 * by disabling translation now and
3287 * allocating a clean root-entry table.
3288 * This might cause DMAR faults, but
3289 * probably the dump will still succeed.
3290 */
3291 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3292 iommu->name);
3293 iommu_disable_translation(iommu);
3294 clear_translation_pre_enabled(iommu);
3295 } else {
3296 pr_info("Copied translation tables from previous kernel for %s\n",
3297 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003298 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003299 }
3300 }
3301
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003302 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003303 hw_pass_through = 0;
David Woodhouse8a94ade2015-03-24 14:54:56 +00003304#ifdef CONFIG_INTEL_IOMMU_SVM
3305 if (pasid_enabled(iommu))
3306 intel_svm_alloc_pasid_tables(iommu);
3307#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308 }
3309
Joerg Roedela4c34ff2016-06-17 11:29:48 +02003310 /*
3311 * Now that qi is enabled on all iommus, set the root entry and flush
3312 * caches. This is required on some Intel X58 chipsets, otherwise the
3313 * flush_context function will loop forever and the boot hangs.
3314 */
3315 for_each_active_iommu(iommu, drhd) {
3316 iommu_flush_write_buffer(iommu);
3317 iommu_set_root_entry(iommu);
3318 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3319 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3320 }
3321
David Woodhouse19943b02009-08-04 16:19:20 +01003322 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003323 iommu_identity_mapping |= IDENTMAP_ALL;
3324
Suresh Siddhad3f13812011-08-23 17:05:25 -07003325#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003326 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003327#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003328
Joerg Roedel86080cc2015-06-12 12:27:16 +02003329 if (iommu_identity_mapping) {
3330 ret = si_domain_init(hw_pass_through);
3331 if (ret)
3332 goto free_iommu;
3333 }
3334
David Woodhousee0fc7e02009-09-30 09:12:17 -07003335 check_tylersburg_isoch();
3336
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003337 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003338 * If we copied translations from a previous kernel in the kdump
3339 * case, we can not assign the devices to domains now, as that
3340 * would eliminate the old mappings. So skip this part and defer
3341 * the assignment to device driver initialization time.
3342 */
3343 if (copied_tables)
3344 goto domains_done;
3345
3346 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003347 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003348 * identity mappings for rmrr, gfx, and isa and may fall back to static
3349 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003350 */
David Woodhouse19943b02009-08-04 16:19:20 +01003351 if (iommu_identity_mapping) {
3352 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3353 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003354 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003355 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003356 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003357 }
David Woodhouse19943b02009-08-04 16:19:20 +01003358 /*
3359 * For each rmrr
3360 * for each dev attached to rmrr
3361 * do
3362 * locate drhd for dev, alloc domain for dev
3363 * allocate free domain
3364 * allocate page table entries for rmrr
3365 * if context not allocated for bus
3366 * allocate and init context
3367 * set present in root table for this bus
3368 * init context with domain, translation etc
3369 * endfor
3370 * endfor
3371 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003372 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003373 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003374 /* some BIOS lists non-exist devices in DMAR table. */
3375 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003376 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003377 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003378 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003379 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003380 }
3381 }
3382
3383 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003384
Joerg Roedela87f4912015-06-12 12:32:54 +02003385domains_done:
3386
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003387 /*
3388 * for each drhd
3389 * enable fault log
3390 * global invalidate context cache
3391 * global invalidate iotlb
3392 * enable translation
3393 */
Jiang Liu7c919772014-01-06 14:18:18 +08003394 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003395 if (drhd->ignored) {
3396 /*
3397 * we always have to disable PMRs or DMA may fail on
3398 * this device
3399 */
3400 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003401 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003402 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003403 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003404
3405 iommu_flush_write_buffer(iommu);
3406
David Woodhousea222a7f2015-10-07 23:35:18 +01003407#ifdef CONFIG_INTEL_IOMMU_SVM
3408 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3409 ret = intel_svm_enable_prq(iommu);
3410 if (ret)
3411 goto free_iommu;
3412 }
3413#endif
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003414 ret = dmar_set_interrupt(iommu);
3415 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003416 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003417
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003418 if (!translation_pre_enabled(iommu))
3419 iommu_enable_translation(iommu);
3420
David Woodhouseb94996c2009-09-19 15:28:12 -07003421 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003422 }
3423
3424 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003425
3426free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003427 for_each_active_iommu(iommu, drhd) {
3428 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003429 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003430 }
Jiang Liu989d51f2014-02-19 14:07:21 +08003431free_g_iommus:
Omer Pelegaa473242016-04-20 11:33:02 +03003432 for_each_possible_cpu(cpu)
3433 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
Weidong Hand9630fe2008-12-08 11:06:32 +08003434 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003435error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003436 return ret;
3437}
3438
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003439/* This takes a number of _MM_ pages, not VTD pages */
Omer Peleg2aac6302016-04-20 11:33:57 +03003440static unsigned long intel_alloc_iova(struct device *dev,
David Woodhouse875764d2009-06-28 21:20:51 +01003441 struct dmar_domain *domain,
3442 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003443{
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003444 unsigned long iova_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003445
David Woodhouse875764d2009-06-28 21:20:51 +01003446 /* Restrict dma_mask to the width that the iommu can handle */
3447 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
Robin Murphy8f6429c2015-07-16 19:40:12 +01003448 /* Ensure we reserve the whole size-aligned region */
3449 nrpages = __roundup_pow_of_two(nrpages);
David Woodhouse875764d2009-06-28 21:20:51 +01003450
3451 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003452 /*
3453 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003454 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003455 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003456 */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003457 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3458 IOVA_PFN(DMA_BIT_MASK(32)));
3459 if (iova_pfn)
3460 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003461 }
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003462 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3463 if (unlikely(!iova_pfn)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003464 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003465 nrpages, dev_name(dev));
Omer Peleg2aac6302016-04-20 11:33:57 +03003466 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003467 }
3468
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003469 return iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003470}
3471
David Woodhoused4b709f2014-03-09 16:07:40 -07003472static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003473{
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003474 struct dmar_domain *domain, *tmp;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003475 struct dmar_rmrr_unit *rmrr;
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003476 struct device *i_dev;
3477 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003478
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003479 domain = find_domain(dev);
3480 if (domain)
3481 goto out;
3482
3483 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3484 if (!domain)
3485 goto out;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003486
Joerg Roedelb1ce5b72015-09-23 19:16:01 +02003487 /* We have a new domain - setup possible RMRRs for the device */
3488 rcu_read_lock();
3489 for_each_rmrr_units(rmrr) {
3490 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3491 i, i_dev) {
3492 if (i_dev != dev)
3493 continue;
3494
3495 ret = domain_prepare_identity_map(dev, domain,
3496 rmrr->base_address,
3497 rmrr->end_address);
3498 if (ret)
3499 dev_err(dev, "Mapping reserved region failed\n");
3500 }
3501 }
3502 rcu_read_unlock();
3503
Joerg Roedel1c5ebba2016-08-25 13:52:51 +02003504 tmp = set_domain_for_dev(dev, domain);
3505 if (!tmp || domain != tmp) {
3506 domain_exit(domain);
3507 domain = tmp;
3508 }
3509
3510out:
3511
3512 if (!domain)
3513 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3514
3515
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003516 return domain;
3517}
3518
David Woodhoused4b709f2014-03-09 16:07:40 -07003519static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003520{
3521 struct device_domain_info *info;
3522
3523 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003524 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003525 if (likely(info))
3526 return info->domain;
3527
3528 return __get_valid_domain_for_dev(dev);
3529}
3530
David Woodhouseecb509e2014-03-09 16:29:55 -07003531/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003532static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003533{
3534 int found;
3535
David Woodhouse3d891942014-03-06 15:59:26 +00003536 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003537 return 1;
3538
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003539 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003540 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003541
David Woodhouse9b226622014-03-09 14:03:28 -07003542 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003543 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003544 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003545 return 1;
3546 else {
3547 /*
3548 * 32 bit DMA is removed from si_domain and fall back
3549 * to non-identity mapping.
3550 */
Joerg Roedele6de0f82015-07-22 16:30:36 +02003551 dmar_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003552 pr_info("32bit %s uses non-identity mapping\n",
3553 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003554 return 0;
3555 }
3556 } else {
3557 /*
3558 * In case of a detached 64 bit DMA device from vm, the device
3559 * is put into si_domain for identity mapping.
3560 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003561 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003562 int ret;
Joerg Roedel28ccce02015-07-21 14:45:31 +02003563 ret = domain_add_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003564 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003565 pr_info("64bit %s uses identity mapping\n",
3566 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003567 return 1;
3568 }
3569 }
3570 }
3571
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003572 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003573}
3574
David Woodhouse5040a912014-03-09 16:14:00 -07003575static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003576 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003577{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003578 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003579 phys_addr_t start_paddr;
Omer Peleg2aac6302016-04-20 11:33:57 +03003580 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003581 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003582 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003583 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003584 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003585
3586 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003587
David Woodhouse5040a912014-03-09 16:14:00 -07003588 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003589 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003590
David Woodhouse5040a912014-03-09 16:14:00 -07003591 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003592 if (!domain)
3593 return 0;
3594
Weidong Han8c11e792008-12-08 15:29:22 +08003595 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003596 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003597
Omer Peleg2aac6302016-04-20 11:33:57 +03003598 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3599 if (!iova_pfn)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003600 goto error;
3601
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003602 /*
3603 * Check if DMAR supports zero-length reads on write only
3604 * mappings..
3605 */
3606 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003607 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003608 prot |= DMA_PTE_READ;
3609 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3610 prot |= DMA_PTE_WRITE;
3611 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003612 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003613 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003614 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003615 * is not a big problem
3616 */
Omer Peleg2aac6302016-04-20 11:33:57 +03003617 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003618 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003619 if (ret)
3620 goto error;
3621
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003622 /* it's a non-present to present mapping. Only flush if caching mode */
3623 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003624 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003625 mm_to_dma_pfn(iova_pfn),
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003626 size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003627 else
Weidong Han8c11e792008-12-08 15:29:22 +08003628 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003629
Omer Peleg2aac6302016-04-20 11:33:57 +03003630 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
David Woodhouse03d6a242009-06-28 15:33:46 +01003631 start_paddr += paddr & ~PAGE_MASK;
3632 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003633
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003634error:
Omer Peleg2aac6302016-04-20 11:33:57 +03003635 if (iova_pfn)
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003636 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003637 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003638 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003639 return 0;
3640}
3641
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003642static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3643 unsigned long offset, size_t size,
3644 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003645 unsigned long attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003646{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003647 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003648 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003649}
3650
Omer Pelegaa473242016-04-20 11:33:02 +03003651static void flush_unmaps(struct deferred_flush_data *flush_data)
mark gross5e0d2a62008-03-04 15:22:08 -08003652{
mark gross80b20dd2008-04-18 13:53:58 -07003653 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003654
Omer Pelegaa473242016-04-20 11:33:02 +03003655 flush_data->timer_on = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003656
3657 /* just flush them all */
3658 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003659 struct intel_iommu *iommu = g_iommus[i];
Omer Pelegaa473242016-04-20 11:33:02 +03003660 struct deferred_flush_table *flush_table =
3661 &flush_data->tables[i];
Weidong Hana2bb8452008-12-08 11:24:12 +08003662 if (!iommu)
3663 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003664
Omer Pelegaa473242016-04-20 11:33:02 +03003665 if (!flush_table->next)
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003666 continue;
3667
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003668 /* In caching mode, global flushes turn emulation expensive */
3669 if (!cap_caching_mode(iommu->cap))
3670 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003671 DMA_TLB_GLOBAL_FLUSH);
Omer Pelegaa473242016-04-20 11:33:02 +03003672 for (j = 0; j < flush_table->next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003673 unsigned long mask;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003674 struct deferred_flush_entry *entry =
Omer Pelegaa473242016-04-20 11:33:02 +03003675 &flush_table->entries[j];
Omer Peleg2aac6302016-04-20 11:33:57 +03003676 unsigned long iova_pfn = entry->iova_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003677 unsigned long nrpages = entry->nrpages;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003678 struct dmar_domain *domain = entry->domain;
3679 struct page *freelist = entry->freelist;
Yu Zhao93a23a72009-05-18 13:51:37 +08003680
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003681 /* On real hardware multiple invalidations are expensive */
3682 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003683 iommu_flush_iotlb_psi(iommu, domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003684 mm_to_dma_pfn(iova_pfn),
Omer Peleg769530e2016-04-20 11:33:25 +03003685 nrpages, !freelist, 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003686 else {
Omer Peleg769530e2016-04-20 11:33:25 +03003687 mask = ilog2(nrpages);
Omer Peleg314f1dc2016-04-20 11:32:45 +03003688 iommu_flush_dev_iotlb(domain,
Omer Peleg2aac6302016-04-20 11:33:57 +03003689 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003690 }
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003691 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
Omer Peleg314f1dc2016-04-20 11:32:45 +03003692 if (freelist)
3693 dma_free_pagelist(freelist);
mark gross80b20dd2008-04-18 13:53:58 -07003694 }
Omer Pelegaa473242016-04-20 11:33:02 +03003695 flush_table->next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003696 }
3697
Omer Pelegaa473242016-04-20 11:33:02 +03003698 flush_data->size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003699}
3700
Omer Pelegaa473242016-04-20 11:33:02 +03003701static void flush_unmaps_timeout(unsigned long cpuid)
mark gross5e0d2a62008-03-04 15:22:08 -08003702{
Omer Pelegaa473242016-04-20 11:33:02 +03003703 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
mark gross80b20dd2008-04-18 13:53:58 -07003704 unsigned long flags;
3705
Omer Pelegaa473242016-04-20 11:33:02 +03003706 spin_lock_irqsave(&flush_data->lock, flags);
3707 flush_unmaps(flush_data);
3708 spin_unlock_irqrestore(&flush_data->lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003709}
3710
Omer Peleg2aac6302016-04-20 11:33:57 +03003711static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003712 unsigned long nrpages, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003713{
3714 unsigned long flags;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003715 int entry_id, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003716 struct intel_iommu *iommu;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003717 struct deferred_flush_entry *entry;
Omer Pelegaa473242016-04-20 11:33:02 +03003718 struct deferred_flush_data *flush_data;
3719 unsigned int cpuid;
mark gross5e0d2a62008-03-04 15:22:08 -08003720
Omer Pelegaa473242016-04-20 11:33:02 +03003721 cpuid = get_cpu();
3722 flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3723
3724 /* Flush all CPUs' entries to avoid deferring too much. If
3725 * this becomes a bottleneck, can just flush us, and rely on
3726 * flush timer for the rest.
3727 */
3728 if (flush_data->size == HIGH_WATER_MARK) {
3729 int cpu;
3730
3731 for_each_online_cpu(cpu)
3732 flush_unmaps_timeout(cpu);
3733 }
3734
3735 spin_lock_irqsave(&flush_data->lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003736
Weidong Han8c11e792008-12-08 15:29:22 +08003737 iommu = domain_get_iommu(dom);
3738 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003739
Omer Pelegaa473242016-04-20 11:33:02 +03003740 entry_id = flush_data->tables[iommu_id].next;
3741 ++(flush_data->tables[iommu_id].next);
mark gross5e0d2a62008-03-04 15:22:08 -08003742
Omer Pelegaa473242016-04-20 11:33:02 +03003743 entry = &flush_data->tables[iommu_id].entries[entry_id];
Omer Peleg314f1dc2016-04-20 11:32:45 +03003744 entry->domain = dom;
Omer Peleg2aac6302016-04-20 11:33:57 +03003745 entry->iova_pfn = iova_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003746 entry->nrpages = nrpages;
Omer Peleg314f1dc2016-04-20 11:32:45 +03003747 entry->freelist = freelist;
mark gross5e0d2a62008-03-04 15:22:08 -08003748
Omer Pelegaa473242016-04-20 11:33:02 +03003749 if (!flush_data->timer_on) {
3750 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3751 flush_data->timer_on = 1;
mark gross5e0d2a62008-03-04 15:22:08 -08003752 }
Omer Pelegaa473242016-04-20 11:33:02 +03003753 flush_data->size++;
3754 spin_unlock_irqrestore(&flush_data->lock, flags);
3755
3756 put_cpu();
mark gross5e0d2a62008-03-04 15:22:08 -08003757}
3758
Omer Peleg769530e2016-04-20 11:33:25 +03003759static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003760{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003761 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003762 unsigned long start_pfn, last_pfn;
Omer Peleg769530e2016-04-20 11:33:25 +03003763 unsigned long nrpages;
Omer Peleg2aac6302016-04-20 11:33:57 +03003764 unsigned long iova_pfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003765 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003766 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003767
David Woodhouse73676832009-07-04 14:08:36 +01003768 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003769 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003770
David Woodhouse1525a292014-03-06 16:19:30 +00003771 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003772 BUG_ON(!domain);
3773
Weidong Han8c11e792008-12-08 15:29:22 +08003774 iommu = domain_get_iommu(domain);
3775
Omer Peleg2aac6302016-04-20 11:33:57 +03003776 iova_pfn = IOVA_PFN(dev_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003777
Omer Peleg769530e2016-04-20 11:33:25 +03003778 nrpages = aligned_nrpages(dev_addr, size);
Omer Peleg2aac6302016-04-20 11:33:57 +03003779 start_pfn = mm_to_dma_pfn(iova_pfn);
Omer Peleg769530e2016-04-20 11:33:25 +03003780 last_pfn = start_pfn + nrpages - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003781
David Woodhoused794dc92009-06-28 00:27:49 +01003782 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003783 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003784
David Woodhouseea8ea462014-03-05 17:09:32 +00003785 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003786
mark gross5e0d2a62008-03-04 15:22:08 -08003787 if (intel_iommu_strict) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003788 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
Omer Peleg769530e2016-04-20 11:33:25 +03003789 nrpages, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003790 /* free iova */
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003791 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
David Woodhouseea8ea462014-03-05 17:09:32 +00003792 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003793 } else {
Omer Peleg2aac6302016-04-20 11:33:57 +03003794 add_unmap(domain, iova_pfn, nrpages, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003795 /*
3796 * queue up the release of the unmap to save the 1/6th of the
3797 * cpu used up by the iotlb flush operation...
3798 */
mark gross5e0d2a62008-03-04 15:22:08 -08003799 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003800}
3801
Jiang Liud41a4ad2014-07-11 14:19:34 +08003802static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3803 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003804 unsigned long attrs)
Jiang Liud41a4ad2014-07-11 14:19:34 +08003805{
Omer Peleg769530e2016-04-20 11:33:25 +03003806 intel_unmap(dev, dev_addr, size);
Jiang Liud41a4ad2014-07-11 14:19:34 +08003807}
3808
David Woodhouse5040a912014-03-09 16:14:00 -07003809static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003810 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003811 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003812{
Akinobu Mita36746432014-06-04 16:06:51 -07003813 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003814 int order;
3815
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003816 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003817 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003818
David Woodhouse5040a912014-03-09 16:14:00 -07003819 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003820 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003821 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3822 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003823 flags |= GFP_DMA;
3824 else
3825 flags |= GFP_DMA32;
3826 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003827
Mel Gormand0164ad2015-11-06 16:28:21 -08003828 if (gfpflags_allow_blocking(flags)) {
Akinobu Mita36746432014-06-04 16:06:51 -07003829 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003830
Akinobu Mita36746432014-06-04 16:06:51 -07003831 page = dma_alloc_from_contiguous(dev, count, order);
3832 if (page && iommu_no_mapping(dev) &&
3833 page_to_phys(page) + size > dev->coherent_dma_mask) {
3834 dma_release_from_contiguous(dev, page, count);
3835 page = NULL;
3836 }
3837 }
3838
3839 if (!page)
3840 page = alloc_pages(flags, order);
3841 if (!page)
3842 return NULL;
3843 memset(page_address(page), 0, size);
3844
3845 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003846 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003847 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003848 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003849 return page_address(page);
3850 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3851 __free_pages(page, order);
3852
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003853 return NULL;
3854}
3855
David Woodhouse5040a912014-03-09 16:14:00 -07003856static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003857 dma_addr_t dma_handle, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003858{
3859 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003860 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003861
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003862 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003863 order = get_order(size);
3864
Omer Peleg769530e2016-04-20 11:33:25 +03003865 intel_unmap(dev, dma_handle, size);
Akinobu Mita36746432014-06-04 16:06:51 -07003866 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3867 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003868}
3869
David Woodhouse5040a912014-03-09 16:14:00 -07003870static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003871 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003872 unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003873{
Omer Peleg769530e2016-04-20 11:33:25 +03003874 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3875 unsigned long nrpages = 0;
3876 struct scatterlist *sg;
3877 int i;
3878
3879 for_each_sg(sglist, sg, nelems, i) {
3880 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3881 }
3882
3883 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003884}
3885
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003886static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003887 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003888{
3889 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003890 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003891
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003892 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003893 BUG_ON(!sg_page(sg));
Dan Williams3e6110f2015-12-15 12:54:06 -08003894 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003895 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003896 }
3897 return nelems;
3898}
3899
David Woodhouse5040a912014-03-09 16:14:00 -07003900static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003901 enum dma_data_direction dir, unsigned long attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003902{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003903 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003904 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003905 size_t size = 0;
3906 int prot = 0;
Omer Peleg2aac6302016-04-20 11:33:57 +03003907 unsigned long iova_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003908 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003909 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003910 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003911 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003912
3913 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003914 if (iommu_no_mapping(dev))
3915 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003916
David Woodhouse5040a912014-03-09 16:14:00 -07003917 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003918 if (!domain)
3919 return 0;
3920
Weidong Han8c11e792008-12-08 15:29:22 +08003921 iommu = domain_get_iommu(domain);
3922
David Woodhouseb536d242009-06-28 14:49:31 +01003923 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003924 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003925
Omer Peleg2aac6302016-04-20 11:33:57 +03003926 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
David Woodhouse5040a912014-03-09 16:14:00 -07003927 *dev->dma_mask);
Omer Peleg2aac6302016-04-20 11:33:57 +03003928 if (!iova_pfn) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003929 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003930 return 0;
3931 }
3932
3933 /*
3934 * Check if DMAR supports zero-length reads on write only
3935 * mappings..
3936 */
3937 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003938 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003939 prot |= DMA_PTE_READ;
3940 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3941 prot |= DMA_PTE_WRITE;
3942
Omer Peleg2aac6302016-04-20 11:33:57 +03003943 start_vpfn = mm_to_dma_pfn(iova_pfn);
David Woodhousee1605492009-06-29 11:17:38 +01003944
Fenghua Yuf5329592009-08-04 15:09:37 -07003945 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003946 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003947 dma_pte_free_pagetable(domain, start_vpfn,
3948 start_vpfn + size - 1);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03003949 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
David Woodhousee1605492009-06-29 11:17:38 +01003950 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003951 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003952
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003953 /* it's a non-present to present mapping. Only flush if caching mode */
3954 if (cap_caching_mode(iommu->cap))
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02003955 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003956 else
Weidong Han8c11e792008-12-08 15:29:22 +08003957 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003958
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003959 return nelems;
3960}
3961
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003962static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3963{
3964 return !dma_addr;
3965}
3966
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003967struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003968 .alloc = intel_alloc_coherent,
3969 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003970 .map_sg = intel_map_sg,
3971 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003972 .map_page = intel_map_page,
3973 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003974 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003975};
3976
3977static inline int iommu_domain_cache_init(void)
3978{
3979 int ret = 0;
3980
3981 iommu_domain_cache = kmem_cache_create("iommu_domain",
3982 sizeof(struct dmar_domain),
3983 0,
3984 SLAB_HWCACHE_ALIGN,
3985
3986 NULL);
3987 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003988 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003989 ret = -ENOMEM;
3990 }
3991
3992 return ret;
3993}
3994
3995static inline int iommu_devinfo_cache_init(void)
3996{
3997 int ret = 0;
3998
3999 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
4000 sizeof(struct device_domain_info),
4001 0,
4002 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004003 NULL);
4004 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004005 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004006 ret = -ENOMEM;
4007 }
4008
4009 return ret;
4010}
4011
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004012static int __init iommu_init_mempool(void)
4013{
4014 int ret;
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004015 ret = iova_cache_get();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004016 if (ret)
4017 return ret;
4018
4019 ret = iommu_domain_cache_init();
4020 if (ret)
4021 goto domain_error;
4022
4023 ret = iommu_devinfo_cache_init();
4024 if (!ret)
4025 return ret;
4026
4027 kmem_cache_destroy(iommu_domain_cache);
4028domain_error:
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004029 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004030
4031 return -ENOMEM;
4032}
4033
4034static void __init iommu_exit_mempool(void)
4035{
4036 kmem_cache_destroy(iommu_devinfo_cache);
4037 kmem_cache_destroy(iommu_domain_cache);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +03004038 iova_cache_put();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004039}
4040
Dan Williams556ab452010-07-23 15:47:56 -07004041static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4042{
4043 struct dmar_drhd_unit *drhd;
4044 u32 vtbar;
4045 int rc;
4046
4047 /* We know that this device on this chipset has its own IOMMU.
4048 * If we find it under a different IOMMU, then the BIOS is lying
4049 * to us. Hope that the IOMMU for this device is actually
4050 * disabled, and it needs no translation...
4051 */
4052 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4053 if (rc) {
4054 /* "can't" happen */
4055 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4056 return;
4057 }
4058 vtbar &= 0xffff0000;
4059
4060 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4061 drhd = dmar_find_matched_drhd_unit(pdev);
4062 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4063 TAINT_FIRMWARE_WORKAROUND,
4064 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4065 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4066}
4067DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4068
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004069static void __init init_no_remapping_devices(void)
4070{
4071 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00004072 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08004073 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004074
4075 for_each_drhd_unit(drhd) {
4076 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08004077 for_each_active_dev_scope(drhd->devices,
4078 drhd->devices_cnt, i, dev)
4079 break;
David Woodhouse832bd852014-03-07 15:08:36 +00004080 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004081 if (i == drhd->devices_cnt)
4082 drhd->ignored = 1;
4083 }
4084 }
4085
Jiang Liu7c919772014-01-06 14:18:18 +08004086 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08004087 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004088 continue;
4089
Jiang Liub683b232014-02-19 14:07:32 +08004090 for_each_active_dev_scope(drhd->devices,
4091 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004092 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004093 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004094 if (i < drhd->devices_cnt)
4095 continue;
4096
David Woodhousec0771df2011-10-14 20:59:46 +01004097 /* This IOMMU has *only* gfx devices. Either bypass it or
4098 set the gfx_mapped flag, as appropriate */
4099 if (dmar_map_gfx) {
4100 intel_iommu_gfx_mapped = 1;
4101 } else {
4102 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08004103 for_each_active_dev_scope(drhd->devices,
4104 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00004105 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004106 }
4107 }
4108}
4109
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004110#ifdef CONFIG_SUSPEND
4111static int init_iommu_hw(void)
4112{
4113 struct dmar_drhd_unit *drhd;
4114 struct intel_iommu *iommu = NULL;
4115
4116 for_each_active_iommu(iommu, drhd)
4117 if (iommu->qi)
4118 dmar_reenable_qi(iommu);
4119
Joseph Cihulab7792602011-05-03 00:08:37 -07004120 for_each_iommu(iommu, drhd) {
4121 if (drhd->ignored) {
4122 /*
4123 * we always have to disable PMRs or DMA may fail on
4124 * this device
4125 */
4126 if (force_on)
4127 iommu_disable_protect_mem_regions(iommu);
4128 continue;
4129 }
4130
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004131 iommu_flush_write_buffer(iommu);
4132
4133 iommu_set_root_entry(iommu);
4134
4135 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004136 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08004137 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4138 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07004139 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004140 }
4141
4142 return 0;
4143}
4144
4145static void iommu_flush_all(void)
4146{
4147 struct dmar_drhd_unit *drhd;
4148 struct intel_iommu *iommu;
4149
4150 for_each_active_iommu(iommu, drhd) {
4151 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004152 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004153 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01004154 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004155 }
4156}
4157
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004158static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004159{
4160 struct dmar_drhd_unit *drhd;
4161 struct intel_iommu *iommu = NULL;
4162 unsigned long flag;
4163
4164 for_each_active_iommu(iommu, drhd) {
4165 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4166 GFP_ATOMIC);
4167 if (!iommu->iommu_state)
4168 goto nomem;
4169 }
4170
4171 iommu_flush_all();
4172
4173 for_each_active_iommu(iommu, drhd) {
4174 iommu_disable_translation(iommu);
4175
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004176 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004177
4178 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4179 readl(iommu->reg + DMAR_FECTL_REG);
4180 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4181 readl(iommu->reg + DMAR_FEDATA_REG);
4182 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4183 readl(iommu->reg + DMAR_FEADDR_REG);
4184 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4185 readl(iommu->reg + DMAR_FEUADDR_REG);
4186
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004187 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004188 }
4189 return 0;
4190
4191nomem:
4192 for_each_active_iommu(iommu, drhd)
4193 kfree(iommu->iommu_state);
4194
4195 return -ENOMEM;
4196}
4197
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004198static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004199{
4200 struct dmar_drhd_unit *drhd;
4201 struct intel_iommu *iommu = NULL;
4202 unsigned long flag;
4203
4204 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07004205 if (force_on)
4206 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4207 else
4208 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004209 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004210 }
4211
4212 for_each_active_iommu(iommu, drhd) {
4213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004214 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004215
4216 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4217 iommu->reg + DMAR_FECTL_REG);
4218 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4219 iommu->reg + DMAR_FEDATA_REG);
4220 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4221 iommu->reg + DMAR_FEADDR_REG);
4222 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4223 iommu->reg + DMAR_FEUADDR_REG);
4224
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02004225 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004226 }
4227
4228 for_each_active_iommu(iommu, drhd)
4229 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004230}
4231
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004232static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004233 .resume = iommu_resume,
4234 .suspend = iommu_suspend,
4235};
4236
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004237static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004238{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004239 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004240}
4241
4242#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02004243static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07004244#endif /* CONFIG_PM */
4245
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004246
Jiang Liuc2a0b532014-11-09 22:47:56 +08004247int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004248{
4249 struct acpi_dmar_reserved_memory *rmrr;
Eric Auger0659b8d2017-01-19 20:57:53 +00004250 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004251 struct dmar_rmrr_unit *rmrru;
Eric Auger0659b8d2017-01-19 20:57:53 +00004252 size_t length;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004253
4254 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4255 if (!rmrru)
Eric Auger0659b8d2017-01-19 20:57:53 +00004256 goto out;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004257
4258 rmrru->hdr = header;
4259 rmrr = (struct acpi_dmar_reserved_memory *)header;
4260 rmrru->base_address = rmrr->base_address;
4261 rmrru->end_address = rmrr->end_address;
Eric Auger0659b8d2017-01-19 20:57:53 +00004262
4263 length = rmrr->end_address - rmrr->base_address + 1;
4264 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4265 IOMMU_RESV_DIRECT);
4266 if (!rmrru->resv)
4267 goto free_rmrru;
4268
Jiang Liu2e455282014-02-19 14:07:36 +08004269 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4270 ((void *)rmrr) + rmrr->header.length,
4271 &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004272 if (rmrru->devices_cnt && rmrru->devices == NULL)
4273 goto free_all;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004274
Jiang Liu2e455282014-02-19 14:07:36 +08004275 list_add(&rmrru->list, &dmar_rmrr_units);
4276
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004277 return 0;
Eric Auger0659b8d2017-01-19 20:57:53 +00004278free_all:
4279 kfree(rmrru->resv);
4280free_rmrru:
4281 kfree(rmrru);
4282out:
4283 return -ENOMEM;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004284}
4285
Jiang Liu6b197242014-11-09 22:47:58 +08004286static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4287{
4288 struct dmar_atsr_unit *atsru;
4289 struct acpi_dmar_atsr *tmp;
4290
4291 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4292 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4293 if (atsr->segment != tmp->segment)
4294 continue;
4295 if (atsr->header.length != tmp->header.length)
4296 continue;
4297 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4298 return atsru;
4299 }
4300
4301 return NULL;
4302}
4303
4304int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004305{
4306 struct acpi_dmar_atsr *atsr;
4307 struct dmar_atsr_unit *atsru;
4308
Jiang Liu6b197242014-11-09 22:47:58 +08004309 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4310 return 0;
4311
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004312 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004313 atsru = dmar_find_atsr(atsr);
4314 if (atsru)
4315 return 0;
4316
4317 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004318 if (!atsru)
4319 return -ENOMEM;
4320
Jiang Liu6b197242014-11-09 22:47:58 +08004321 /*
4322 * If memory is allocated from slab by ACPI _DSM method, we need to
4323 * copy the memory content because the memory buffer will be freed
4324 * on return.
4325 */
4326 atsru->hdr = (void *)(atsru + 1);
4327 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004328 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004329 if (!atsru->include_all) {
4330 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4331 (void *)atsr + atsr->header.length,
4332 &atsru->devices_cnt);
4333 if (atsru->devices_cnt && atsru->devices == NULL) {
4334 kfree(atsru);
4335 return -ENOMEM;
4336 }
4337 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004338
Jiang Liu0e242612014-02-19 14:07:34 +08004339 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004340
4341 return 0;
4342}
4343
Jiang Liu9bdc5312014-01-06 14:18:27 +08004344static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4345{
4346 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4347 kfree(atsru);
4348}
4349
Jiang Liu6b197242014-11-09 22:47:58 +08004350int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4351{
4352 struct acpi_dmar_atsr *atsr;
4353 struct dmar_atsr_unit *atsru;
4354
4355 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4356 atsru = dmar_find_atsr(atsr);
4357 if (atsru) {
4358 list_del_rcu(&atsru->list);
4359 synchronize_rcu();
4360 intel_iommu_free_atsr(atsru);
4361 }
4362
4363 return 0;
4364}
4365
4366int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4367{
4368 int i;
4369 struct device *dev;
4370 struct acpi_dmar_atsr *atsr;
4371 struct dmar_atsr_unit *atsru;
4372
4373 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4374 atsru = dmar_find_atsr(atsr);
4375 if (!atsru)
4376 return 0;
4377
Linus Torvalds194dc872016-07-27 20:03:31 -07004378 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
Jiang Liu6b197242014-11-09 22:47:58 +08004379 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4380 i, dev)
4381 return -EBUSY;
Linus Torvalds194dc872016-07-27 20:03:31 -07004382 }
Jiang Liu6b197242014-11-09 22:47:58 +08004383
4384 return 0;
4385}
4386
Jiang Liuffebeb42014-11-09 22:48:02 +08004387static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4388{
4389 int sp, ret = 0;
4390 struct intel_iommu *iommu = dmaru->iommu;
4391
4392 if (g_iommus[iommu->seq_id])
4393 return 0;
4394
4395 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004396 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004397 iommu->name);
4398 return -ENXIO;
4399 }
4400 if (!ecap_sc_support(iommu->ecap) &&
4401 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004402 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004403 iommu->name);
4404 return -ENXIO;
4405 }
4406 sp = domain_update_iommu_superpage(iommu) - 1;
4407 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004408 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004409 iommu->name);
4410 return -ENXIO;
4411 }
4412
4413 /*
4414 * Disable translation if already enabled prior to OS handover.
4415 */
4416 if (iommu->gcmd & DMA_GCMD_TE)
4417 iommu_disable_translation(iommu);
4418
4419 g_iommus[iommu->seq_id] = iommu;
4420 ret = iommu_init_domains(iommu);
4421 if (ret == 0)
4422 ret = iommu_alloc_root_entry(iommu);
4423 if (ret)
4424 goto out;
4425
David Woodhouse8a94ade2015-03-24 14:54:56 +00004426#ifdef CONFIG_INTEL_IOMMU_SVM
4427 if (pasid_enabled(iommu))
4428 intel_svm_alloc_pasid_tables(iommu);
4429#endif
4430
Jiang Liuffebeb42014-11-09 22:48:02 +08004431 if (dmaru->ignored) {
4432 /*
4433 * we always have to disable PMRs or DMA may fail on this device
4434 */
4435 if (force_on)
4436 iommu_disable_protect_mem_regions(iommu);
4437 return 0;
4438 }
4439
4440 intel_iommu_init_qi(iommu);
4441 iommu_flush_write_buffer(iommu);
David Woodhousea222a7f2015-10-07 23:35:18 +01004442
4443#ifdef CONFIG_INTEL_IOMMU_SVM
4444 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4445 ret = intel_svm_enable_prq(iommu);
4446 if (ret)
4447 goto disable_iommu;
4448 }
4449#endif
Jiang Liuffebeb42014-11-09 22:48:02 +08004450 ret = dmar_set_interrupt(iommu);
4451 if (ret)
4452 goto disable_iommu;
4453
4454 iommu_set_root_entry(iommu);
4455 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4456 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4457 iommu_enable_translation(iommu);
4458
Jiang Liuffebeb42014-11-09 22:48:02 +08004459 iommu_disable_protect_mem_regions(iommu);
4460 return 0;
4461
4462disable_iommu:
4463 disable_dmar_iommu(iommu);
4464out:
4465 free_dmar_iommu(iommu);
4466 return ret;
4467}
4468
Jiang Liu6b197242014-11-09 22:47:58 +08004469int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4470{
Jiang Liuffebeb42014-11-09 22:48:02 +08004471 int ret = 0;
4472 struct intel_iommu *iommu = dmaru->iommu;
4473
4474 if (!intel_iommu_enabled)
4475 return 0;
4476 if (iommu == NULL)
4477 return -EINVAL;
4478
4479 if (insert) {
4480 ret = intel_iommu_add(dmaru);
4481 } else {
4482 disable_dmar_iommu(iommu);
4483 free_dmar_iommu(iommu);
4484 }
4485
4486 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004487}
4488
Jiang Liu9bdc5312014-01-06 14:18:27 +08004489static void intel_iommu_free_dmars(void)
4490{
4491 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4492 struct dmar_atsr_unit *atsru, *atsr_n;
4493
4494 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4495 list_del(&rmrru->list);
4496 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
Eric Auger0659b8d2017-01-19 20:57:53 +00004497 kfree(rmrru->resv);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004498 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004499 }
4500
Jiang Liu9bdc5312014-01-06 14:18:27 +08004501 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4502 list_del(&atsru->list);
4503 intel_iommu_free_atsr(atsru);
4504 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004505}
4506
4507int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4508{
Jiang Liub683b232014-02-19 14:07:32 +08004509 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004510 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004511 struct pci_dev *bridge = NULL;
4512 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004513 struct acpi_dmar_atsr *atsr;
4514 struct dmar_atsr_unit *atsru;
4515
4516 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004517 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004518 bridge = bus->self;
David Woodhoused14053b32015-10-15 09:28:06 +01004519 /* If it's an integrated device, allow ATS */
4520 if (!bridge)
4521 return 1;
4522 /* Connected via non-PCIe: no ATS */
4523 if (!pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004524 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004525 return 0;
David Woodhoused14053b32015-10-15 09:28:06 +01004526 /* If we found the root port, look it up in the ATSR */
Jiang Liub5f82dd2014-02-19 14:07:31 +08004527 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004528 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004529 }
4530
Jiang Liu0e242612014-02-19 14:07:34 +08004531 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004532 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4533 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4534 if (atsr->segment != pci_domain_nr(dev->bus))
4535 continue;
4536
Jiang Liub683b232014-02-19 14:07:32 +08004537 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004538 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004539 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004540
4541 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004542 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004543 }
Jiang Liub683b232014-02-19 14:07:32 +08004544 ret = 0;
4545out:
Jiang Liu0e242612014-02-19 14:07:34 +08004546 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004547
Jiang Liub683b232014-02-19 14:07:32 +08004548 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004549}
4550
Jiang Liu59ce0512014-02-19 14:07:35 +08004551int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4552{
4553 int ret = 0;
4554 struct dmar_rmrr_unit *rmrru;
4555 struct dmar_atsr_unit *atsru;
4556 struct acpi_dmar_atsr *atsr;
4557 struct acpi_dmar_reserved_memory *rmrr;
4558
4559 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4560 return 0;
4561
4562 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4563 rmrr = container_of(rmrru->hdr,
4564 struct acpi_dmar_reserved_memory, header);
4565 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4566 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4567 ((void *)rmrr) + rmrr->header.length,
4568 rmrr->segment, rmrru->devices,
4569 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004570 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004571 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004572 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004573 dmar_remove_dev_scope(info, rmrr->segment,
4574 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004575 }
4576 }
4577
4578 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4579 if (atsru->include_all)
4580 continue;
4581
4582 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4583 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4584 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4585 (void *)atsr + atsr->header.length,
4586 atsr->segment, atsru->devices,
4587 atsru->devices_cnt);
4588 if (ret > 0)
4589 break;
4590 else if(ret < 0)
4591 return ret;
Joerg Roedele6a8c9b2016-02-29 23:49:47 +01004592 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
Jiang Liu59ce0512014-02-19 14:07:35 +08004593 if (dmar_remove_dev_scope(info, atsr->segment,
4594 atsru->devices, atsru->devices_cnt))
4595 break;
4596 }
4597 }
4598
4599 return 0;
4600}
4601
Fenghua Yu99dcade2009-11-11 07:23:06 -08004602/*
4603 * Here we only respond to action of unbound device from driver.
4604 *
4605 * Added device is not attached to its DMAR domain here yet. That will happen
4606 * when mapping the device to iova.
4607 */
4608static int device_notifier(struct notifier_block *nb,
4609 unsigned long action, void *data)
4610{
4611 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004612 struct dmar_domain *domain;
4613
David Woodhouse3d891942014-03-06 15:59:26 +00004614 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004615 return 0;
4616
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004617 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004618 return 0;
4619
David Woodhouse1525a292014-03-06 16:19:30 +00004620 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004621 if (!domain)
4622 return 0;
4623
Joerg Roedele6de0f82015-07-22 16:30:36 +02004624 dmar_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004625 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004626 domain_exit(domain);
Alex Williamsona97590e2011-03-04 14:52:16 -07004627
Fenghua Yu99dcade2009-11-11 07:23:06 -08004628 return 0;
4629}
4630
4631static struct notifier_block device_nb = {
4632 .notifier_call = device_notifier,
4633};
4634
Jiang Liu75f05562014-02-19 14:07:37 +08004635static int intel_iommu_memory_notifier(struct notifier_block *nb,
4636 unsigned long val, void *v)
4637{
4638 struct memory_notify *mhp = v;
4639 unsigned long long start, end;
4640 unsigned long start_vpfn, last_vpfn;
4641
4642 switch (val) {
4643 case MEM_GOING_ONLINE:
4644 start = mhp->start_pfn << PAGE_SHIFT;
4645 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4646 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004647 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004648 start, end);
4649 return NOTIFY_BAD;
4650 }
4651 break;
4652
4653 case MEM_OFFLINE:
4654 case MEM_CANCEL_ONLINE:
4655 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4656 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4657 while (start_vpfn <= last_vpfn) {
4658 struct iova *iova;
4659 struct dmar_drhd_unit *drhd;
4660 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004661 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004662
4663 iova = find_iova(&si_domain->iovad, start_vpfn);
4664 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004665 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004666 start_vpfn);
4667 break;
4668 }
4669
4670 iova = split_and_remove_iova(&si_domain->iovad, iova,
4671 start_vpfn, last_vpfn);
4672 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004673 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004674 start_vpfn, last_vpfn);
4675 return NOTIFY_BAD;
4676 }
4677
David Woodhouseea8ea462014-03-05 17:09:32 +00004678 freelist = domain_unmap(si_domain, iova->pfn_lo,
4679 iova->pfn_hi);
4680
Jiang Liu75f05562014-02-19 14:07:37 +08004681 rcu_read_lock();
4682 for_each_active_iommu(iommu, drhd)
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02004683 iommu_flush_iotlb_psi(iommu, si_domain,
Jiang Liua156ef92014-07-11 14:19:36 +08004684 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004685 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004686 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004687 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004688
4689 start_vpfn = iova->pfn_hi + 1;
4690 free_iova_mem(iova);
4691 }
4692 break;
4693 }
4694
4695 return NOTIFY_OK;
4696}
4697
4698static struct notifier_block intel_iommu_memory_nb = {
4699 .notifier_call = intel_iommu_memory_notifier,
4700 .priority = 0
4701};
4702
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004703static void free_all_cpu_cached_iovas(unsigned int cpu)
4704{
4705 int i;
4706
4707 for (i = 0; i < g_num_of_iommus; i++) {
4708 struct intel_iommu *iommu = g_iommus[i];
4709 struct dmar_domain *domain;
Aaron Campbell0caa7612016-07-02 21:23:24 -03004710 int did;
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004711
4712 if (!iommu)
4713 continue;
4714
Jan Niehusmann3bd4f912016-06-06 14:20:11 +02004715 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
Aaron Campbell0caa7612016-07-02 21:23:24 -03004716 domain = get_iommu_domain(iommu, (u16)did);
Omer Peleg22e2f9f2016-04-20 11:34:11 +03004717
4718 if (!domain)
4719 continue;
4720 free_cpu_cached_iovas(cpu, &domain->iovad);
4721 }
4722 }
4723}
4724
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004725static int intel_iommu_cpu_dead(unsigned int cpu)
Omer Pelegaa473242016-04-20 11:33:02 +03004726{
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004727 free_all_cpu_cached_iovas(cpu);
4728 flush_unmaps_timeout(cpu);
4729 return 0;
Omer Pelegaa473242016-04-20 11:33:02 +03004730}
4731
Alex Williamsona5459cf2014-06-12 16:12:31 -06004732static ssize_t intel_iommu_show_version(struct device *dev,
4733 struct device_attribute *attr,
4734 char *buf)
4735{
4736 struct intel_iommu *iommu = dev_get_drvdata(dev);
4737 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4738 return sprintf(buf, "%d:%d\n",
4739 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4740}
4741static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4742
4743static ssize_t intel_iommu_show_address(struct device *dev,
4744 struct device_attribute *attr,
4745 char *buf)
4746{
4747 struct intel_iommu *iommu = dev_get_drvdata(dev);
4748 return sprintf(buf, "%llx\n", iommu->reg_phys);
4749}
4750static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4751
4752static ssize_t intel_iommu_show_cap(struct device *dev,
4753 struct device_attribute *attr,
4754 char *buf)
4755{
4756 struct intel_iommu *iommu = dev_get_drvdata(dev);
4757 return sprintf(buf, "%llx\n", iommu->cap);
4758}
4759static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4760
4761static ssize_t intel_iommu_show_ecap(struct device *dev,
4762 struct device_attribute *attr,
4763 char *buf)
4764{
4765 struct intel_iommu *iommu = dev_get_drvdata(dev);
4766 return sprintf(buf, "%llx\n", iommu->ecap);
4767}
4768static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4769
Alex Williamson2238c082015-07-14 15:24:53 -06004770static ssize_t intel_iommu_show_ndoms(struct device *dev,
4771 struct device_attribute *attr,
4772 char *buf)
4773{
4774 struct intel_iommu *iommu = dev_get_drvdata(dev);
4775 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4776}
4777static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4778
4779static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4780 struct device_attribute *attr,
4781 char *buf)
4782{
4783 struct intel_iommu *iommu = dev_get_drvdata(dev);
4784 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4785 cap_ndoms(iommu->cap)));
4786}
4787static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4788
Alex Williamsona5459cf2014-06-12 16:12:31 -06004789static struct attribute *intel_iommu_attrs[] = {
4790 &dev_attr_version.attr,
4791 &dev_attr_address.attr,
4792 &dev_attr_cap.attr,
4793 &dev_attr_ecap.attr,
Alex Williamson2238c082015-07-14 15:24:53 -06004794 &dev_attr_domains_supported.attr,
4795 &dev_attr_domains_used.attr,
Alex Williamsona5459cf2014-06-12 16:12:31 -06004796 NULL,
4797};
4798
4799static struct attribute_group intel_iommu_group = {
4800 .name = "intel-iommu",
4801 .attrs = intel_iommu_attrs,
4802};
4803
4804const struct attribute_group *intel_iommu_groups[] = {
4805 &intel_iommu_group,
4806 NULL,
4807};
4808
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004809int __init intel_iommu_init(void)
4810{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004811 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004812 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004813 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004814
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004815 /* VT-d is required for a TXT/tboot launch, so enforce that */
4816 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004817
Jiang Liu3a5670e2014-02-19 14:07:33 +08004818 if (iommu_init_mempool()) {
4819 if (force_on)
4820 panic("tboot: Failed to initialize iommu memory\n");
4821 return -ENOMEM;
4822 }
4823
4824 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004825 if (dmar_table_init()) {
4826 if (force_on)
4827 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004828 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004829 }
4830
Suresh Siddhac2c72862011-08-23 17:05:19 -07004831 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004832 if (force_on)
4833 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004834 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004835 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004836
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004837 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004838 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004839
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004840 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004841 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004842
4843 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004844 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004845
Joseph Cihula51a63e62011-03-21 11:04:24 -07004846 if (dmar_init_reserved_ranges()) {
4847 if (force_on)
4848 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004849 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004850 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004851
4852 init_no_remapping_devices();
4853
Joseph Cihulab7792602011-05-03 00:08:37 -07004854 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004855 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004856 if (force_on)
4857 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004858 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004859 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004860 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004861 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004862 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004863
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004864#ifdef CONFIG_SWIOTLB
4865 swiotlb = 0;
4866#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004867 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004868
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004869 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004870
Alex Williamsona5459cf2014-06-12 16:12:31 -06004871 for_each_active_iommu(iommu, drhd)
4872 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4873 intel_iommu_groups,
Kees Cook2439d4a2015-07-24 16:27:57 -07004874 "%s", iommu->name);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004875
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004876 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004877 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004878 if (si_domain && !hw_pass_through)
4879 register_memory_notifier(&intel_iommu_memory_nb);
Anna-Maria Gleixner21647612016-11-27 00:13:41 +01004880 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4881 intel_iommu_cpu_dead);
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004882 intel_iommu_enabled = 1;
4883
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004884 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004885
4886out_free_reserved_range:
4887 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004888out_free_dmar:
4889 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004890 up_write(&dmar_global_lock);
4891 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004892 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004893}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004894
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004895static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
Alex Williamson579305f2014-07-03 09:51:43 -06004896{
4897 struct intel_iommu *iommu = opaque;
4898
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004899 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Alex Williamson579305f2014-07-03 09:51:43 -06004900 return 0;
4901}
4902
4903/*
4904 * NB - intel-iommu lacks any sort of reference counting for the users of
4905 * dependent devices. If multiple endpoints have intersecting dependent
4906 * devices, unbinding the driver from any one of them will possibly leave
4907 * the others unable to operate.
4908 */
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004909static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004910{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004911 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004912 return;
4913
Joerg Roedel2452d9d2015-07-23 16:20:14 +02004914 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004915}
4916
Joerg Roedel127c7612015-07-23 17:44:46 +02004917static void __dmar_remove_one_dev_info(struct device_domain_info *info)
Weidong Hanc7151a82008-12-08 22:51:37 +08004918{
Weidong Hanc7151a82008-12-08 22:51:37 +08004919 struct intel_iommu *iommu;
4920 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08004921
Joerg Roedel55d94042015-07-22 16:50:40 +02004922 assert_spin_locked(&device_domain_lock);
4923
Joerg Roedelb608ac32015-07-21 18:19:08 +02004924 if (WARN_ON(!info))
Weidong Hanc7151a82008-12-08 22:51:37 +08004925 return;
4926
Joerg Roedel127c7612015-07-23 17:44:46 +02004927 iommu = info->iommu;
4928
4929 if (info->dev) {
4930 iommu_disable_dev_iotlb(info);
4931 domain_context_clear(iommu, info->dev);
4932 }
4933
Joerg Roedelb608ac32015-07-21 18:19:08 +02004934 unlink_domain_info(info);
Roland Dreier3e7abe22011-07-20 06:22:21 -07004935
Joerg Roedeld160aca2015-07-22 11:52:53 +02004936 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004937 domain_detach_iommu(info->domain, iommu);
Joerg Roedeld160aca2015-07-22 11:52:53 +02004938 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004939
4940 free_devinfo_mem(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004941}
4942
Joerg Roedel55d94042015-07-22 16:50:40 +02004943static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4944 struct device *dev)
4945{
Joerg Roedel127c7612015-07-23 17:44:46 +02004946 struct device_domain_info *info;
Joerg Roedel55d94042015-07-22 16:50:40 +02004947 unsigned long flags;
4948
Weidong Hanc7151a82008-12-08 22:51:37 +08004949 spin_lock_irqsave(&device_domain_lock, flags);
Joerg Roedel127c7612015-07-23 17:44:46 +02004950 info = dev->archdata.iommu;
4951 __dmar_remove_one_dev_info(info);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004952 spin_unlock_irqrestore(&device_domain_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004953}
4954
4955static int md_domain_init(struct dmar_domain *domain, int guest_width)
4956{
4957 int adjust_width;
4958
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004959 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4960 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004961 domain_reserve_special_ranges(domain);
4962
4963 /* calculate AGAW */
4964 domain->gaw = guest_width;
4965 adjust_width = guestwidth_to_adjustwidth(guest_width);
4966 domain->agaw = width_to_agaw(adjust_width);
4967
Weidong Han5e98c4b2008-12-08 23:03:27 +08004968 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004969 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004970 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004971 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004972
4973 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004974 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004975 if (!domain->pgd)
4976 return -ENOMEM;
4977 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4978 return 0;
4979}
4980
Joerg Roedel00a77de2015-03-26 13:43:08 +01004981static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004982{
Joerg Roedel5d450802008-12-03 14:52:32 +01004983 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004984 struct iommu_domain *domain;
4985
4986 if (type != IOMMU_DOMAIN_UNMANAGED)
4987 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004988
Jiang Liuab8dfe22014-07-11 14:19:27 +08004989 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004990 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004991 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004992 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004993 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004994 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004995 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004996 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004997 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004998 }
Allen Kay8140a952011-10-14 12:32:17 -07004999 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005000
Joerg Roedel00a77de2015-03-26 13:43:08 +01005001 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01005002 domain->geometry.aperture_start = 0;
5003 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5004 domain->geometry.force_aperture = true;
5005
Joerg Roedel00a77de2015-03-26 13:43:08 +01005006 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03005007}
Kay, Allen M38717942008-09-09 18:37:29 +03005008
Joerg Roedel00a77de2015-03-26 13:43:08 +01005009static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03005010{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005011 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03005012}
Kay, Allen M38717942008-09-09 18:37:29 +03005013
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005014static int intel_iommu_attach_device(struct iommu_domain *domain,
5015 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005016{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005017 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005018 struct intel_iommu *iommu;
5019 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07005020 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03005021
Alex Williamsonc875d2c2014-07-03 09:57:02 -06005022 if (device_is_rmrr_locked(dev)) {
5023 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5024 return -EPERM;
5025 }
5026
David Woodhouse7207d8f2014-03-09 16:31:06 -07005027 /* normally dev is not mapped */
5028 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005029 struct dmar_domain *old_domain;
5030
David Woodhouse1525a292014-03-06 16:19:30 +00005031 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005032 if (old_domain) {
Joerg Roedeld160aca2015-07-22 11:52:53 +02005033 rcu_read_lock();
Joerg Roedelde7e8882015-07-22 11:58:07 +02005034 dmar_remove_one_dev_info(old_domain, dev);
Joerg Roedeld160aca2015-07-22 11:52:53 +02005035 rcu_read_unlock();
Joerg Roedel62c22162014-12-09 12:56:45 +01005036
5037 if (!domain_type_is_vm_or_si(old_domain) &&
5038 list_empty(&old_domain->devices))
5039 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005040 }
5041 }
5042
David Woodhouse156baca2014-03-09 14:00:57 -07005043 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005044 if (!iommu)
5045 return -ENODEV;
5046
5047 /* check if this iommu agaw is sufficient for max mapped address */
5048 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01005049 if (addr_width > cap_mgaw(iommu->cap))
5050 addr_width = cap_mgaw(iommu->cap);
5051
5052 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005053 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005054 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01005055 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005056 return -EFAULT;
5057 }
Tom Lyona99c47a2010-05-17 08:20:45 +01005058 dmar_domain->gaw = addr_width;
5059
5060 /*
5061 * Knock out extra levels of page tables if necessary
5062 */
5063 while (iommu->agaw < dmar_domain->agaw) {
5064 struct dma_pte *pte;
5065
5066 pte = dmar_domain->pgd;
5067 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08005068 dmar_domain->pgd = (struct dma_pte *)
5069 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01005070 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01005071 }
5072 dmar_domain->agaw--;
5073 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005074
Joerg Roedel28ccce02015-07-21 14:45:31 +02005075 return domain_add_dev_info(dmar_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005076}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005077
Joerg Roedel4c5478c2008-12-03 14:58:24 +01005078static void intel_iommu_detach_device(struct iommu_domain *domain,
5079 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03005080{
Joerg Roedele6de0f82015-07-22 16:30:36 +02005081 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03005082}
Kay, Allen M38717942008-09-09 18:37:29 +03005083
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005084static int intel_iommu_map(struct iommu_domain *domain,
5085 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005086 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03005087{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005088 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005089 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005090 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005091 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005092
Joerg Roedeldde57a22008-12-03 15:04:09 +01005093 if (iommu_prot & IOMMU_READ)
5094 prot |= DMA_PTE_READ;
5095 if (iommu_prot & IOMMU_WRITE)
5096 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08005097 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5098 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005099
David Woodhouse163cc522009-06-28 00:51:17 +01005100 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01005101 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005102 u64 end;
5103
5104 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01005105 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005106 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005107 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005108 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01005109 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005110 return -EFAULT;
5111 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01005112 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005113 }
David Woodhousead051222009-06-28 14:22:28 +01005114 /* Round up size to next multiple of PAGE_SIZE, if it and
5115 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01005116 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01005117 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5118 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005119 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03005120}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005121
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02005122static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00005123 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005124{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005125 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00005126 struct page *freelist = NULL;
5127 struct intel_iommu *iommu;
5128 unsigned long start_pfn, last_pfn;
5129 unsigned int npages;
Joerg Roedel42e8c182015-07-21 15:50:02 +02005130 int iommu_id, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01005131
David Woodhouse5cf0a762014-03-19 16:07:49 +00005132 /* Cope with horrid API which requires us to unmap more than the
5133 size argument if it happens to be a large-page mapping. */
Joerg Roedeldc02e462015-08-13 11:15:13 +02005134 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
David Woodhouse5cf0a762014-03-19 16:07:49 +00005135
5136 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5137 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5138
David Woodhouseea8ea462014-03-05 17:09:32 +00005139 start_pfn = iova >> VTD_PAGE_SHIFT;
5140 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5141
5142 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5143
5144 npages = last_pfn - start_pfn + 1;
5145
Joerg Roedel29a27712015-07-21 17:17:12 +02005146 for_each_domain_iommu(iommu_id, dmar_domain) {
Joerg Roedela1ddcbe2015-07-21 15:20:32 +02005147 iommu = g_iommus[iommu_id];
David Woodhouseea8ea462014-03-05 17:09:32 +00005148
Joerg Roedel42e8c182015-07-21 15:50:02 +02005149 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5150 start_pfn, npages, !freelist, 0);
David Woodhouseea8ea462014-03-05 17:09:32 +00005151 }
5152
5153 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08005154
David Woodhouse163cc522009-06-28 00:51:17 +01005155 if (dmar_domain->max_addr == iova + size)
5156 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01005157
David Woodhouse5cf0a762014-03-19 16:07:49 +00005158 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005159}
Kay, Allen M38717942008-09-09 18:37:29 +03005160
Joerg Roedeld14d6572008-12-03 15:06:57 +01005161static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05305162 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03005163{
Joerg Roedel00a77de2015-03-26 13:43:08 +01005164 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03005165 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00005166 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005167 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03005168
David Woodhouse5cf0a762014-03-19 16:07:49 +00005169 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03005170 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005171 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03005172
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08005173 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03005174}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005175
Joerg Roedel5d587b82014-09-05 10:50:45 +02005176static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005177{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005178 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005179 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04005180 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02005181 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005182
Joerg Roedel5d587b82014-09-05 10:50:45 +02005183 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08005184}
5185
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005186static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005187{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005188 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005189 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07005190 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04005191
Alex Williamsona5459cf2014-06-12 16:12:31 -06005192 iommu = device_to_iommu(dev, &bus, &devfn);
5193 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04005194 return -ENODEV;
5195
Alex Williamsona5459cf2014-06-12 16:12:31 -06005196 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005197
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005198 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06005199
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005200 if (IS_ERR(group))
5201 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005202
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005203 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06005204 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005205}
5206
5207static void intel_iommu_remove_device(struct device *dev)
5208{
Alex Williamsona5459cf2014-06-12 16:12:31 -06005209 struct intel_iommu *iommu;
5210 u8 bus, devfn;
5211
5212 iommu = device_to_iommu(dev, &bus, &devfn);
5213 if (!iommu)
5214 return;
5215
Alex Williamsonabdfdde2012-05-30 14:19:19 -06005216 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06005217
5218 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04005219}
5220
Eric Auger0659b8d2017-01-19 20:57:53 +00005221static void intel_iommu_get_resv_regions(struct device *device,
5222 struct list_head *head)
5223{
5224 struct iommu_resv_region *reg;
5225 struct dmar_rmrr_unit *rmrr;
5226 struct device *i_dev;
5227 int i;
5228
5229 rcu_read_lock();
5230 for_each_rmrr_units(rmrr) {
5231 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5232 i, i_dev) {
5233 if (i_dev != device)
5234 continue;
5235
5236 list_add_tail(&rmrr->resv->list, head);
5237 }
5238 }
5239 rcu_read_unlock();
5240
5241 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5242 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5243 0, IOMMU_RESV_RESERVED);
5244 if (!reg)
5245 return;
5246 list_add_tail(&reg->list, head);
5247}
5248
5249static void intel_iommu_put_resv_regions(struct device *dev,
5250 struct list_head *head)
5251{
5252 struct iommu_resv_region *entry, *next;
5253
5254 list_for_each_entry_safe(entry, next, head, list) {
5255 if (entry->type == IOMMU_RESV_RESERVED)
5256 kfree(entry);
5257 }
5258}
5259
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005260#ifdef CONFIG_INTEL_IOMMU_SVM
Jacob Pan65ca7f52016-12-06 10:14:23 -08005261#define MAX_NR_PASID_BITS (20)
5262static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5263{
5264 /*
5265 * Convert ecap_pss to extend context entry pts encoding, also
5266 * respect the soft pasid_max value set by the iommu.
5267 * - number of PASID bits = ecap_pss + 1
5268 * - number of PASID table entries = 2^(pts + 5)
5269 * Therefore, pts = ecap_pss - 4
5270 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5271 */
5272 if (ecap_pss(iommu->ecap) < 5)
5273 return 0;
5274
5275 /* pasid_max is encoded as actual number of entries not the bits */
5276 return find_first_bit((unsigned long *)&iommu->pasid_max,
5277 MAX_NR_PASID_BITS) - 5;
5278}
5279
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005280int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5281{
5282 struct device_domain_info *info;
5283 struct context_entry *context;
5284 struct dmar_domain *domain;
5285 unsigned long flags;
5286 u64 ctx_lo;
5287 int ret;
5288
5289 domain = get_valid_domain_for_dev(sdev->dev);
5290 if (!domain)
5291 return -EINVAL;
5292
5293 spin_lock_irqsave(&device_domain_lock, flags);
5294 spin_lock(&iommu->lock);
5295
5296 ret = -EINVAL;
5297 info = sdev->dev->archdata.iommu;
5298 if (!info || !info->pasid_supported)
5299 goto out;
5300
5301 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5302 if (WARN_ON(!context))
5303 goto out;
5304
5305 ctx_lo = context[0].lo;
5306
5307 sdev->did = domain->iommu_did[iommu->seq_id];
5308 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5309
5310 if (!(ctx_lo & CONTEXT_PASIDE)) {
5311 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
Jacob Pan65ca7f52016-12-06 10:14:23 -08005312 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5313 intel_iommu_get_pts(iommu);
5314
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005315 wmb();
5316 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5317 * extended to permit requests-with-PASID if the PASIDE bit
5318 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5319 * however, the PASIDE bit is ignored and requests-with-PASID
5320 * are unconditionally blocked. Which makes less sense.
5321 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5322 * "guest mode" translation types depending on whether ATS
5323 * is available or not. Annoyingly, we can't use the new
5324 * modes *unless* PASIDE is set. */
5325 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5326 ctx_lo &= ~CONTEXT_TT_MASK;
5327 if (info->ats_supported)
5328 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5329 else
5330 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5331 }
5332 ctx_lo |= CONTEXT_PASIDE;
David Woodhouse907fea32015-10-13 14:11:13 +01005333 if (iommu->pasid_state_table)
5334 ctx_lo |= CONTEXT_DINVE;
David Woodhousea222a7f2015-10-07 23:35:18 +01005335 if (info->pri_supported)
5336 ctx_lo |= CONTEXT_PRS;
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005337 context[0].lo = ctx_lo;
5338 wmb();
5339 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5340 DMA_CCMD_MASK_NOBIT,
5341 DMA_CCMD_DEVICE_INVL);
5342 }
5343
5344 /* Enable PASID support in the device, if it wasn't already */
5345 if (!info->pasid_enabled)
5346 iommu_enable_dev_iotlb(info);
5347
5348 if (info->ats_enabled) {
5349 sdev->dev_iotlb = 1;
5350 sdev->qdep = info->ats_qdep;
5351 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5352 sdev->qdep = 0;
5353 }
5354 ret = 0;
5355
5356 out:
5357 spin_unlock(&iommu->lock);
5358 spin_unlock_irqrestore(&device_domain_lock, flags);
5359
5360 return ret;
5361}
5362
5363struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5364{
5365 struct intel_iommu *iommu;
5366 u8 bus, devfn;
5367
5368 if (iommu_dummy(dev)) {
5369 dev_warn(dev,
5370 "No IOMMU translation for device; cannot enable SVM\n");
5371 return NULL;
5372 }
5373
5374 iommu = device_to_iommu(dev, &bus, &devfn);
5375 if ((!iommu)) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005376 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005377 return NULL;
5378 }
5379
5380 if (!iommu->pasid_table) {
Sudeep Duttb9997e32015-10-18 20:54:37 -07005381 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
David Woodhouse2f26e0a2015-09-09 11:40:47 +01005382 return NULL;
5383 }
5384
5385 return iommu;
5386}
5387#endif /* CONFIG_INTEL_IOMMU_SVM */
5388
Thierry Redingb22f6432014-06-27 09:03:12 +02005389static const struct iommu_ops intel_iommu_ops = {
Eric Auger0659b8d2017-01-19 20:57:53 +00005390 .capable = intel_iommu_capable,
5391 .domain_alloc = intel_iommu_domain_alloc,
5392 .domain_free = intel_iommu_domain_free,
5393 .attach_dev = intel_iommu_attach_device,
5394 .detach_dev = intel_iommu_detach_device,
5395 .map = intel_iommu_map,
5396 .unmap = intel_iommu_unmap,
5397 .map_sg = default_iommu_map_sg,
5398 .iova_to_phys = intel_iommu_iova_to_phys,
5399 .add_device = intel_iommu_add_device,
5400 .remove_device = intel_iommu_remove_device,
5401 .get_resv_regions = intel_iommu_get_resv_regions,
5402 .put_resv_regions = intel_iommu_put_resv_regions,
5403 .device_group = pci_device_group,
5404 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01005405};
David Woodhouse9af88142009-02-13 23:18:03 +00005406
Daniel Vetter94526182013-01-20 23:50:13 +01005407static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5408{
5409 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005410 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01005411 dmar_map_gfx = 0;
5412}
5413
5414DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5415DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5416DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5417DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5418DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5419DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5420DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5421
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005422static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00005423{
5424 /*
5425 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01005426 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00005427 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005428 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00005429 rwbf_quirk = 1;
5430}
5431
5432DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01005433DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5434DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5435DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5436DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5437DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5438DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07005439
Adam Jacksoneecfd572010-08-25 21:17:34 +01005440#define GGC 0x52
5441#define GGC_MEMORY_SIZE_MASK (0xf << 8)
5442#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5443#define GGC_MEMORY_SIZE_1M (0x1 << 8)
5444#define GGC_MEMORY_SIZE_2M (0x3 << 8)
5445#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5446#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5447#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5448#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5449
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08005450static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01005451{
5452 unsigned short ggc;
5453
Adam Jacksoneecfd572010-08-25 21:17:34 +01005454 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01005455 return;
5456
Adam Jacksoneecfd572010-08-25 21:17:34 +01005457 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005458 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01005459 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005460 } else if (dmar_map_gfx) {
5461 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005462 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07005463 intel_iommu_strict = 1;
5464 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01005465}
5466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5467DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5469DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5470
David Woodhousee0fc7e02009-09-30 09:12:17 -07005471/* On Tylersburg chipsets, some BIOSes have been known to enable the
5472 ISOCH DMAR unit for the Azalia sound device, but not give it any
5473 TLB entries, which causes it to deadlock. Check for that. We do
5474 this in a function called from init_dmars(), instead of in a PCI
5475 quirk, because we don't want to print the obnoxious "BIOS broken"
5476 message if VT-d is actually disabled.
5477*/
5478static void __init check_tylersburg_isoch(void)
5479{
5480 struct pci_dev *pdev;
5481 uint32_t vtisochctrl;
5482
5483 /* If there's no Azalia in the system anyway, forget it. */
5484 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5485 if (!pdev)
5486 return;
5487 pci_dev_put(pdev);
5488
5489 /* System Management Registers. Might be hidden, in which case
5490 we can't do the sanity check. But that's OK, because the
5491 known-broken BIOSes _don't_ actually hide it, so far. */
5492 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5493 if (!pdev)
5494 return;
5495
5496 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5497 pci_dev_put(pdev);
5498 return;
5499 }
5500
5501 pci_dev_put(pdev);
5502
5503 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5504 if (vtisochctrl & 1)
5505 return;
5506
5507 /* Drop all bits other than the number of TLB entries */
5508 vtisochctrl &= 0x1c;
5509
5510 /* If we have the recommended number of TLB entries (16), fine. */
5511 if (vtisochctrl == 0x10)
5512 return;
5513
5514 /* Zero TLB entries? You get to ride the short bus to school. */
5515 if (!vtisochctrl) {
5516 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5517 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5518 dmi_get_system_info(DMI_BIOS_VENDOR),
5519 dmi_get_system_info(DMI_BIOS_VERSION),
5520 dmi_get_system_info(DMI_PRODUCT_VERSION));
5521 iommu_identity_mapping |= IDENTMAP_AZALIA;
5522 return;
5523 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005524
5525 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005526 vtisochctrl);
5527}