blob: 3b32aa55f27c2b742e1449b4481d486adbcc0466 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070045#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020046#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070047#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090049#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051#include "irq_remapping.h"
52
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000057#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070059#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
61#define IOAPIC_RANGE_START (0xfee00000)
62#define IOAPIC_RANGE_END (0xfeefffff)
63#define IOVA_START_ADDR (0x1000)
64
65#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080068#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069
David Woodhouse2ebe3152009-09-19 07:34:04 -070070#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070078
Robin Murphy1b722502015-01-12 17:51:15 +000079/* IO virtual address start page frame number */
80#define IOVA_START_PFN (1)
81
Mark McLoughlinf27be032008-11-20 15:49:43 +000082#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070083#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070084#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
184
185/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000186 * 0: Present
187 * 1-11: Reserved
188 * 12-63: Context Ptr (12 - (haw-1))
189 * 64-127: Reserved
190 */
191struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000192 u64 lo;
193 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000194};
195#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196
Joerg Roedel091d42e2015-06-12 11:56:10 +0200197/*
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199 * if marked present.
200 */
201static phys_addr_t root_entry_lctp(struct root_entry *re)
202{
203 if (!(re->lo & 1))
204 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000205
Joerg Roedel091d42e2015-06-12 11:56:10 +0200206 return re->lo & VTD_PAGE_MASK;
207}
208
209/*
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211 * if marked present.
212 */
213static phys_addr_t root_entry_uctp(struct root_entry *re)
214{
215 if (!(re->hi & 1))
216 return 0;
217
218 return re->hi & VTD_PAGE_MASK;
219}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000220/*
221 * low 64 bits:
222 * 0: present
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
226 * high 64 bits:
227 * 0-2: address width
228 * 3-6: aval
229 * 8-23: domain id
230 */
231struct context_entry {
232 u64 lo;
233 u64 hi;
234};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000235
Joerg Roedelcf484d02015-06-12 12:21:46 +0200236static inline void context_clear_pasid_enable(struct context_entry *context)
237{
238 context->lo &= ~(1ULL << 11);
239}
240
241static inline bool context_pasid_enabled(struct context_entry *context)
242{
243 return !!(context->lo & (1ULL << 11));
244}
245
246static inline void context_set_copied(struct context_entry *context)
247{
248 context->hi |= (1ull << 3);
249}
250
251static inline bool context_copied(struct context_entry *context)
252{
253 return !!(context->hi & (1ULL << 3));
254}
255
256static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000257{
258 return (context->lo & 1);
259}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200260
261static inline bool context_present(struct context_entry *context)
262{
263 return context_pasid_enabled(context) ?
264 __context_present(context) :
265 __context_present(context) && !context_copied(context);
266}
267
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000268static inline void context_set_present(struct context_entry *context)
269{
270 context->lo |= 1;
271}
272
273static inline void context_set_fault_enable(struct context_entry *context)
274{
275 context->lo &= (((u64)-1) << 2) | 1;
276}
277
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000278static inline void context_set_translation_type(struct context_entry *context,
279 unsigned long value)
280{
281 context->lo &= (((u64)-1) << 4) | 3;
282 context->lo |= (value & 3) << 2;
283}
284
285static inline void context_set_address_root(struct context_entry *context,
286 unsigned long value)
287{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800288 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000289 context->lo |= value & VTD_PAGE_MASK;
290}
291
292static inline void context_set_address_width(struct context_entry *context,
293 unsigned long value)
294{
295 context->hi |= value & 7;
296}
297
298static inline void context_set_domain_id(struct context_entry *context,
299 unsigned long value)
300{
301 context->hi |= (value & ((1 << 16) - 1)) << 8;
302}
303
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200304static inline int context_domain_id(struct context_entry *c)
305{
306 return((c->hi >> 8) & 0xffff);
307}
308
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000309static inline void context_clear_entry(struct context_entry *context)
310{
311 context->lo = 0;
312 context->hi = 0;
313}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000314
Mark McLoughlin622ba122008-11-20 15:49:46 +0000315/*
316 * 0: readable
317 * 1: writable
318 * 2-6: reserved
319 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800320 * 8-10: available
321 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000322 * 12-63: Host physcial address
323 */
324struct dma_pte {
325 u64 val;
326};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000327
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000328static inline void dma_clear_pte(struct dma_pte *pte)
329{
330 pte->val = 0;
331}
332
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000333static inline u64 dma_pte_addr(struct dma_pte *pte)
334{
David Woodhousec85994e2009-07-01 19:21:24 +0100335#ifdef CONFIG_64BIT
336 return pte->val & VTD_PAGE_MASK;
337#else
338 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100339 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100340#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000341}
342
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000343static inline bool dma_pte_present(struct dma_pte *pte)
344{
345 return (pte->val & 3) != 0;
346}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000347
Allen Kay4399c8b2011-10-14 12:32:46 -0700348static inline bool dma_pte_superpage(struct dma_pte *pte)
349{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200350 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700351}
352
David Woodhouse75e6bf92009-07-02 11:21:16 +0100353static inline int first_pte_in_page(struct dma_pte *pte)
354{
355 return !((unsigned long)pte & ~VTD_PAGE_MASK);
356}
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/*
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
363 */
David Woodhouse19943b02009-08-04 16:19:20 +0100364static struct dmar_domain *si_domain;
365static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700366
Weidong Han1ce28fe2008-12-08 16:35:39 +0800367/* domain represents a virtual machine, more than one devices
368 * across iommus may be owned in one domain, e.g. kvm guest.
369 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800370#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800371
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700372/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800373#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374
Mark McLoughlin99126f72008-11-20 15:49:47 +0000375struct dmar_domain {
376 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700377 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800378 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800379 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000380
Joerg Roedel00a77de2015-03-26 13:43:08 +0100381 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000382 struct iova_domain iovad; /* iova's that belong to this domain */
383
384 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000385 int gaw; /* max guest address width */
386
387 /* adjusted guest address width, 0 is level 2 30-bit */
388 int agaw;
389
Weidong Han3b5410e2008-12-08 09:17:15 +0800390 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800391
392 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800393 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800394 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100395 int iommu_superpage;/* Level of superpages supported:
396 0 == 4KiB (no superpages), 1 == 2MiB,
397 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800398 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800399 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100400
401 struct iommu_domain domain; /* generic domain data structure for
402 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000403};
404
Mark McLoughlina647dac2008-11-20 15:49:48 +0000405/* PCI domain-device relationship */
406struct device_domain_info {
407 struct list_head link; /* link to domain siblings */
408 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100409 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000410 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000411 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800412 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000413 struct dmar_domain *domain; /* pointer to domain */
414};
415
Jiang Liub94e4112014-02-19 14:07:25 +0800416struct dmar_rmrr_unit {
417 struct list_head list; /* list of rmrr units */
418 struct acpi_dmar_header *hdr; /* ACPI header */
419 u64 base_address; /* reserved base address*/
420 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000421 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800422 int devices_cnt; /* target device count */
423};
424
425struct dmar_atsr_unit {
426 struct list_head list; /* list of ATSR units */
427 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000428 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800429 int devices_cnt; /* target device count */
430 u8 include_all:1; /* include all ports */
431};
432
433static LIST_HEAD(dmar_atsr_units);
434static LIST_HEAD(dmar_rmrr_units);
435
436#define for_each_rmrr_units(rmrr) \
437 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
438
mark gross5e0d2a62008-03-04 15:22:08 -0800439static void flush_unmaps_timeout(unsigned long data);
440
Jiang Liub707cb02014-01-06 14:18:26 +0800441static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800442
mark gross80b20dd2008-04-18 13:53:58 -0700443#define HIGH_WATER_MARK 250
444struct deferred_flush_tables {
445 int next;
446 struct iova *iova[HIGH_WATER_MARK];
447 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000448 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700449};
450
451static struct deferred_flush_tables *deferred_flush;
452
mark gross5e0d2a62008-03-04 15:22:08 -0800453/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800454static int g_num_of_iommus;
455
456static DEFINE_SPINLOCK(async_umap_flush_lock);
457static LIST_HEAD(unmaps_to_do);
458
459static int timer_on;
460static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800461
Jiang Liu92d03cc2014-02-19 14:07:28 +0800462static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800464static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700465 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800466static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000467 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800468static int domain_detach_iommu(struct dmar_domain *domain,
469 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700470
Suresh Siddhad3f13812011-08-23 17:05:25 -0700471#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800472int dmar_disabled = 0;
473#else
474int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700475#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800476
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200477int intel_iommu_enabled = 0;
478EXPORT_SYMBOL_GPL(intel_iommu_enabled);
479
David Woodhouse2d9e6672010-06-15 10:57:57 +0100480static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700481static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800482static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100483static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100484static int intel_iommu_ecs = 1;
485
486/* We only actually use ECS when PASID support (on the new bit 40)
487 * is also advertised. Some early implementations — the ones with
488 * PASID support on bit 28 — have issues even when we *only* use
489 * extended root/context tables. */
490#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
491 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700492
David Woodhousec0771df2011-10-14 20:59:46 +0100493int intel_iommu_gfx_mapped;
494EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
495
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700496#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
497static DEFINE_SPINLOCK(device_domain_lock);
498static LIST_HEAD(device_domain_list);
499
Thierry Redingb22f6432014-06-27 09:03:12 +0200500static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100501
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200502static bool translation_pre_enabled(struct intel_iommu *iommu)
503{
504 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
505}
506
Joerg Roedel091d42e2015-06-12 11:56:10 +0200507static void clear_translation_pre_enabled(struct intel_iommu *iommu)
508{
509 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
510}
511
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200512static void init_translation_status(struct intel_iommu *iommu)
513{
514 u32 gsts;
515
516 gsts = readl(iommu->reg + DMAR_GSTS_REG);
517 if (gsts & DMA_GSTS_TES)
518 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
519}
520
Joerg Roedel00a77de2015-03-26 13:43:08 +0100521/* Convert generic 'struct iommu_domain to private struct dmar_domain */
522static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
523{
524 return container_of(dom, struct dmar_domain, domain);
525}
526
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700527static int __init intel_iommu_setup(char *str)
528{
529 if (!str)
530 return -EINVAL;
531 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800532 if (!strncmp(str, "on", 2)) {
533 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200534 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800535 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700536 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200537 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700538 } else if (!strncmp(str, "igfx_off", 8)) {
539 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200540 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700541 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200542 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700543 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800544 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200545 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800546 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100547 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200548 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100549 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100550 } else if (!strncmp(str, "ecs_off", 7)) {
551 printk(KERN_INFO
552 "Intel-IOMMU: disable extended context table support\n");
553 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700554 }
555
556 str += strcspn(str, ",");
557 while (*str == ',')
558 str++;
559 }
560 return 0;
561}
562__setup("intel_iommu=", intel_iommu_setup);
563
564static struct kmem_cache *iommu_domain_cache;
565static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700566
Suresh Siddha4c923d42009-10-02 11:01:24 -0700567static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700568{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700569 struct page *page;
570 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700571
Suresh Siddha4c923d42009-10-02 11:01:24 -0700572 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
573 if (page)
574 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700575 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700576}
577
578static inline void free_pgtable_page(void *vaddr)
579{
580 free_page((unsigned long)vaddr);
581}
582
583static inline void *alloc_domain_mem(void)
584{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900585 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700586}
587
Kay, Allen M38717942008-09-09 18:37:29 +0300588static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700589{
590 kmem_cache_free(iommu_domain_cache, vaddr);
591}
592
593static inline void * alloc_devinfo_mem(void)
594{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900595 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700596}
597
598static inline void free_devinfo_mem(void *vaddr)
599{
600 kmem_cache_free(iommu_devinfo_cache, vaddr);
601}
602
Jiang Liuab8dfe22014-07-11 14:19:27 +0800603static inline int domain_type_is_vm(struct dmar_domain *domain)
604{
605 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
606}
607
608static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
609{
610 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
611 DOMAIN_FLAG_STATIC_IDENTITY);
612}
Weidong Han1b573682008-12-08 15:34:06 +0800613
Jiang Liu162d1b12014-07-11 14:19:35 +0800614static inline int domain_pfn_supported(struct dmar_domain *domain,
615 unsigned long pfn)
616{
617 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
618
619 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
620}
621
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700622static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800623{
624 unsigned long sagaw;
625 int agaw = -1;
626
627 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700628 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800629 agaw >= 0; agaw--) {
630 if (test_bit(agaw, &sagaw))
631 break;
632 }
633
634 return agaw;
635}
636
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700637/*
638 * Calculate max SAGAW for each iommu.
639 */
640int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
641{
642 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
643}
644
645/*
646 * calculate agaw for each iommu.
647 * "SAGAW" may be different across iommus, use a default agaw, and
648 * get a supported less agaw for iommus that don't support the default agaw.
649 */
650int iommu_calculate_agaw(struct intel_iommu *iommu)
651{
652 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
653}
654
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700655/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800656static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
657{
658 int iommu_id;
659
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700660 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800661 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800662 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800663 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
664 return NULL;
665
666 return g_iommus[iommu_id];
667}
668
Weidong Han8e6040972008-12-08 15:49:06 +0800669static void domain_update_iommu_coherency(struct dmar_domain *domain)
670{
David Woodhoused0501962014-03-11 17:10:29 -0700671 struct dmar_drhd_unit *drhd;
672 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100673 bool found = false;
674 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800675
David Woodhoused0501962014-03-11 17:10:29 -0700676 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800677
Mike Travis1b198bb2012-03-05 15:05:16 -0800678 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100679 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800680 if (!ecap_coherent(g_iommus[i]->ecap)) {
681 domain->iommu_coherency = 0;
682 break;
683 }
Weidong Han8e6040972008-12-08 15:49:06 +0800684 }
David Woodhoused0501962014-03-11 17:10:29 -0700685 if (found)
686 return;
687
688 /* No hardware attached; use lowest common denominator */
689 rcu_read_lock();
690 for_each_active_iommu(iommu, drhd) {
691 if (!ecap_coherent(iommu->ecap)) {
692 domain->iommu_coherency = 0;
693 break;
694 }
695 }
696 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800697}
698
Jiang Liu161f6932014-07-11 14:19:37 +0800699static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100700{
Allen Kay8140a952011-10-14 12:32:17 -0700701 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800702 struct intel_iommu *iommu;
703 int ret = 1;
704
705 rcu_read_lock();
706 for_each_active_iommu(iommu, drhd) {
707 if (iommu != skip) {
708 if (!ecap_sc_support(iommu->ecap)) {
709 ret = 0;
710 break;
711 }
712 }
713 }
714 rcu_read_unlock();
715
716 return ret;
717}
718
719static int domain_update_iommu_superpage(struct intel_iommu *skip)
720{
721 struct dmar_drhd_unit *drhd;
722 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700723 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100724
725 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800726 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100727 }
728
Allen Kay8140a952011-10-14 12:32:17 -0700729 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800730 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700731 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800732 if (iommu != skip) {
733 mask &= cap_super_page_val(iommu->cap);
734 if (!mask)
735 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100736 }
737 }
Jiang Liu0e242612014-02-19 14:07:34 +0800738 rcu_read_unlock();
739
Jiang Liu161f6932014-07-11 14:19:37 +0800740 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100741}
742
Sheng Yang58c610b2009-03-18 15:33:05 +0800743/* Some capabilities may be different across iommus */
744static void domain_update_iommu_cap(struct dmar_domain *domain)
745{
746 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800747 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
748 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800749}
750
David Woodhouse03ecc322015-02-13 14:35:21 +0000751static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
752 u8 bus, u8 devfn, int alloc)
753{
754 struct root_entry *root = &iommu->root_entry[bus];
755 struct context_entry *context;
756 u64 *entry;
757
David Woodhousec83b2f22015-06-12 10:15:49 +0100758 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000759 if (devfn >= 0x80) {
760 devfn -= 0x80;
761 entry = &root->hi;
762 }
763 devfn *= 2;
764 }
765 entry = &root->lo;
766 if (*entry & 1)
767 context = phys_to_virt(*entry & VTD_PAGE_MASK);
768 else {
769 unsigned long phy_addr;
770 if (!alloc)
771 return NULL;
772
773 context = alloc_pgtable_page(iommu->node);
774 if (!context)
775 return NULL;
776
777 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
778 phy_addr = virt_to_phys((void *)context);
779 *entry = phy_addr | 1;
780 __iommu_flush_cache(iommu, entry, sizeof(*entry));
781 }
782 return &context[devfn];
783}
784
David Woodhouse4ed6a542015-05-11 14:59:20 +0100785static int iommu_dummy(struct device *dev)
786{
787 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
788}
789
David Woodhouse156baca2014-03-09 14:00:57 -0700790static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800791{
792 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800793 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700794 struct device *tmp;
795 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800796 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800797 int i;
798
David Woodhouse4ed6a542015-05-11 14:59:20 +0100799 if (iommu_dummy(dev))
800 return NULL;
801
David Woodhouse156baca2014-03-09 14:00:57 -0700802 if (dev_is_pci(dev)) {
803 pdev = to_pci_dev(dev);
804 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100805 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700806 dev = &ACPI_COMPANION(dev)->dev;
807
Jiang Liu0e242612014-02-19 14:07:34 +0800808 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800809 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700810 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100811 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800812
Jiang Liub683b232014-02-19 14:07:32 +0800813 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700814 drhd->devices_cnt, i, tmp) {
815 if (tmp == dev) {
816 *bus = drhd->devices[i].bus;
817 *devfn = drhd->devices[i].devfn;
818 goto out;
819 }
820
821 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000822 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700823
824 ptmp = to_pci_dev(tmp);
825 if (ptmp->subordinate &&
826 ptmp->subordinate->number <= pdev->bus->number &&
827 ptmp->subordinate->busn_res.end >= pdev->bus->number)
828 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100829 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800830
David Woodhouse156baca2014-03-09 14:00:57 -0700831 if (pdev && drhd->include_all) {
832 got_pdev:
833 *bus = pdev->bus->number;
834 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800835 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700836 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800837 }
Jiang Liub683b232014-02-19 14:07:32 +0800838 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700839 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800840 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800841
Jiang Liub683b232014-02-19 14:07:32 +0800842 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800843}
844
Weidong Han5331fe62008-12-08 23:00:00 +0800845static void domain_flush_cache(struct dmar_domain *domain,
846 void *addr, int size)
847{
848 if (!domain->iommu_coherency)
849 clflush_cache_range(addr, size);
850}
851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
853{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000855 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856 unsigned long flags;
857
858 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000859 context = iommu_context_addr(iommu, bus, devfn, 0);
860 if (context)
861 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862 spin_unlock_irqrestore(&iommu->lock, flags);
863 return ret;
864}
865
866static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
867{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 struct context_entry *context;
869 unsigned long flags;
870
871 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000872 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000874 context_clear_entry(context);
875 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 }
877 spin_unlock_irqrestore(&iommu->lock, flags);
878}
879
880static void free_context_table(struct intel_iommu *iommu)
881{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882 int i;
883 unsigned long flags;
884 struct context_entry *context;
885
886 spin_lock_irqsave(&iommu->lock, flags);
887 if (!iommu->root_entry) {
888 goto out;
889 }
890 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000891 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 if (context)
893 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000894
David Woodhousec83b2f22015-06-12 10:15:49 +0100895 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000896 continue;
897
898 context = iommu_context_addr(iommu, i, 0x80, 0);
899 if (context)
900 free_pgtable_page(context);
901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902 }
903 free_pgtable_page(iommu->root_entry);
904 iommu->root_entry = NULL;
905out:
906 spin_unlock_irqrestore(&iommu->lock, flags);
907}
908
David Woodhouseb026fd22009-06-28 10:37:25 +0100909static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000910 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700911{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912 struct dma_pte *parent, *pte = NULL;
913 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700914 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700915
916 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200917
Jiang Liu162d1b12014-07-11 14:19:35 +0800918 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200919 /* Address beyond IOMMU's addressing capabilities. */
920 return NULL;
921
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700922 parent = domain->pgd;
923
David Woodhouse5cf0a762014-03-19 16:07:49 +0000924 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700925 void *tmp_page;
926
David Woodhouseb026fd22009-06-28 10:37:25 +0100927 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700928 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000929 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000931 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932 break;
933
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000934 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100935 uint64_t pteval;
936
Suresh Siddha4c923d42009-10-02 11:01:24 -0700937 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938
David Woodhouse206a73c12009-07-01 19:30:28 +0100939 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100941
David Woodhousec85994e2009-07-01 19:21:24 +0100942 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400943 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800944 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100945 /* Someone else set it while we were thinking; use theirs. */
946 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800947 else
David Woodhousec85994e2009-07-01 19:21:24 +0100948 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700949 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000950 if (level == 1)
951 break;
952
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000953 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 level--;
955 }
956
David Woodhouse5cf0a762014-03-19 16:07:49 +0000957 if (!*target_level)
958 *target_level = level;
959
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700960 return pte;
961}
962
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100963
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100965static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
966 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100967 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968{
969 struct dma_pte *parent, *pte = NULL;
970 int total = agaw_to_level(domain->agaw);
971 int offset;
972
973 parent = domain->pgd;
974 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100975 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976 pte = &parent[offset];
977 if (level == total)
978 return pte;
979
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100980 if (!dma_pte_present(pte)) {
981 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100983 }
984
Yijing Wange16922a2014-05-20 20:37:51 +0800985 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100986 *large_page = total;
987 return pte;
988 }
989
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000990 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991 total--;
992 }
993 return NULL;
994}
995
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000997static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100998 unsigned long start_pfn,
999 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001001 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001002 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003
Jiang Liu162d1b12014-07-11 14:19:35 +08001004 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1005 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001006 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001007
David Woodhouse04b18e62009-06-27 19:15:01 +01001008 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001009 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001010 large_page = 1;
1011 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001012 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001013 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001014 continue;
1015 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001016 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001017 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001018 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001019 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001020 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1021
David Woodhouse310a5ab2009-06-28 18:52:20 +01001022 domain_flush_cache(domain, first_pte,
1023 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001024
1025 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026}
1027
Alex Williamson3269ee02013-06-15 10:27:19 -06001028static void dma_pte_free_level(struct dmar_domain *domain, int level,
1029 struct dma_pte *pte, unsigned long pfn,
1030 unsigned long start_pfn, unsigned long last_pfn)
1031{
1032 pfn = max(start_pfn, pfn);
1033 pte = &pte[pfn_level_offset(pfn, level)];
1034
1035 do {
1036 unsigned long level_pfn;
1037 struct dma_pte *level_pte;
1038
1039 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1040 goto next;
1041
1042 level_pfn = pfn & level_mask(level - 1);
1043 level_pte = phys_to_virt(dma_pte_addr(pte));
1044
1045 if (level > 2)
1046 dma_pte_free_level(domain, level - 1, level_pte,
1047 level_pfn, start_pfn, last_pfn);
1048
1049 /* If range covers entire pagetable, free it */
1050 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001051 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001052 dma_clear_pte(pte);
1053 domain_flush_cache(domain, pte, sizeof(*pte));
1054 free_pgtable_page(level_pte);
1055 }
1056next:
1057 pfn += level_size(level);
1058 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1059}
1060
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001061/* free page table pages. last level pte should already be cleared */
1062static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001063 unsigned long start_pfn,
1064 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065{
Jiang Liu162d1b12014-07-11 14:19:35 +08001066 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1067 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001068 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001069
Jiang Liud41a4ad2014-07-11 14:19:34 +08001070 dma_pte_clear_range(domain, start_pfn, last_pfn);
1071
David Woodhousef3a0a522009-06-30 03:40:07 +01001072 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001073 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1074 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001075
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001077 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001078 free_pgtable_page(domain->pgd);
1079 domain->pgd = NULL;
1080 }
1081}
1082
David Woodhouseea8ea462014-03-05 17:09:32 +00001083/* When a page at a given level is being unlinked from its parent, we don't
1084 need to *modify* it at all. All we need to do is make a list of all the
1085 pages which can be freed just as soon as we've flushed the IOTLB and we
1086 know the hardware page-walk will no longer touch them.
1087 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1088 be freed. */
1089static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1090 int level, struct dma_pte *pte,
1091 struct page *freelist)
1092{
1093 struct page *pg;
1094
1095 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1096 pg->freelist = freelist;
1097 freelist = pg;
1098
1099 if (level == 1)
1100 return freelist;
1101
Jiang Liuadeb2592014-04-09 10:20:39 +08001102 pte = page_address(pg);
1103 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001104 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1105 freelist = dma_pte_list_pagetables(domain, level - 1,
1106 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001107 pte++;
1108 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001109
1110 return freelist;
1111}
1112
1113static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1114 struct dma_pte *pte, unsigned long pfn,
1115 unsigned long start_pfn,
1116 unsigned long last_pfn,
1117 struct page *freelist)
1118{
1119 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1120
1121 pfn = max(start_pfn, pfn);
1122 pte = &pte[pfn_level_offset(pfn, level)];
1123
1124 do {
1125 unsigned long level_pfn;
1126
1127 if (!dma_pte_present(pte))
1128 goto next;
1129
1130 level_pfn = pfn & level_mask(level);
1131
1132 /* If range covers entire pagetable, free it */
1133 if (start_pfn <= level_pfn &&
1134 last_pfn >= level_pfn + level_size(level) - 1) {
1135 /* These suborbinate page tables are going away entirely. Don't
1136 bother to clear them; we're just going to *free* them. */
1137 if (level > 1 && !dma_pte_superpage(pte))
1138 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1139
1140 dma_clear_pte(pte);
1141 if (!first_pte)
1142 first_pte = pte;
1143 last_pte = pte;
1144 } else if (level > 1) {
1145 /* Recurse down into a level that isn't *entirely* obsolete */
1146 freelist = dma_pte_clear_level(domain, level - 1,
1147 phys_to_virt(dma_pte_addr(pte)),
1148 level_pfn, start_pfn, last_pfn,
1149 freelist);
1150 }
1151next:
1152 pfn += level_size(level);
1153 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1154
1155 if (first_pte)
1156 domain_flush_cache(domain, first_pte,
1157 (void *)++last_pte - (void *)first_pte);
1158
1159 return freelist;
1160}
1161
1162/* We can't just free the pages because the IOMMU may still be walking
1163 the page tables, and may have cached the intermediate levels. The
1164 pages can only be freed after the IOTLB flush has been done. */
1165struct page *domain_unmap(struct dmar_domain *domain,
1166 unsigned long start_pfn,
1167 unsigned long last_pfn)
1168{
David Woodhouseea8ea462014-03-05 17:09:32 +00001169 struct page *freelist = NULL;
1170
Jiang Liu162d1b12014-07-11 14:19:35 +08001171 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1172 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001173 BUG_ON(start_pfn > last_pfn);
1174
1175 /* we don't need lock here; nobody else touches the iova range */
1176 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1177 domain->pgd, 0, start_pfn, last_pfn, NULL);
1178
1179 /* free pgd */
1180 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1181 struct page *pgd_page = virt_to_page(domain->pgd);
1182 pgd_page->freelist = freelist;
1183 freelist = pgd_page;
1184
1185 domain->pgd = NULL;
1186 }
1187
1188 return freelist;
1189}
1190
1191void dma_free_pagelist(struct page *freelist)
1192{
1193 struct page *pg;
1194
1195 while ((pg = freelist)) {
1196 freelist = pg->freelist;
1197 free_pgtable_page(page_address(pg));
1198 }
1199}
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201/* iommu handling */
1202static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1203{
1204 struct root_entry *root;
1205 unsigned long flags;
1206
Suresh Siddha4c923d42009-10-02 11:01:24 -07001207 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001208 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001209 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001210 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001212 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001214 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215
1216 spin_lock_irqsave(&iommu->lock, flags);
1217 iommu->root_entry = root;
1218 spin_unlock_irqrestore(&iommu->lock, flags);
1219
1220 return 0;
1221}
1222
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223static void iommu_set_root_entry(struct intel_iommu *iommu)
1224{
David Woodhouse03ecc322015-02-13 14:35:21 +00001225 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001226 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 unsigned long flag;
1228
David Woodhouse03ecc322015-02-13 14:35:21 +00001229 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001230 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001231 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001233 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001234 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235
David Woodhousec416daa2009-05-10 20:30:58 +01001236 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001240 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001242 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243}
1244
1245static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1246{
1247 u32 val;
1248 unsigned long flag;
1249
David Woodhouse9af88142009-02-13 23:18:03 +00001250 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001254 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255
1256 /* Make sure hardware complete it */
1257 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001258 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001260 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261}
1262
1263/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001264static void __iommu_flush_context(struct intel_iommu *iommu,
1265 u16 did, u16 source_id, u8 function_mask,
1266 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001267{
1268 u64 val = 0;
1269 unsigned long flag;
1270
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 switch (type) {
1272 case DMA_CCMD_GLOBAL_INVL:
1273 val = DMA_CCMD_GLOBAL_INVL;
1274 break;
1275 case DMA_CCMD_DOMAIN_INVL:
1276 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1277 break;
1278 case DMA_CCMD_DEVICE_INVL:
1279 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1280 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1281 break;
1282 default:
1283 BUG();
1284 }
1285 val |= DMA_CCMD_ICC;
1286
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001287 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1289
1290 /* Make sure hardware complete it */
1291 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1292 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1293
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001294 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295}
1296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001298static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1299 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001300{
1301 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1302 u64 val = 0, val_iva = 0;
1303 unsigned long flag;
1304
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 switch (type) {
1306 case DMA_TLB_GLOBAL_FLUSH:
1307 /* global flush doesn't need set IVA_REG */
1308 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1309 break;
1310 case DMA_TLB_DSI_FLUSH:
1311 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1312 break;
1313 case DMA_TLB_PSI_FLUSH:
1314 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001315 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316 val_iva = size_order | addr;
1317 break;
1318 default:
1319 BUG();
1320 }
1321 /* Note: set drain read/write */
1322#if 0
1323 /*
1324 * This is probably to be super secure.. Looks like we can
1325 * ignore it without any impact.
1326 */
1327 if (cap_read_drain(iommu->cap))
1328 val |= DMA_TLB_READ_DRAIN;
1329#endif
1330 if (cap_write_drain(iommu->cap))
1331 val |= DMA_TLB_WRITE_DRAIN;
1332
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001333 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 /* Note: Only uses first TLB reg currently */
1335 if (val_iva)
1336 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1337 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1338
1339 /* Make sure hardware complete it */
1340 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1341 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1342
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
1345 /* check IOTLB invalidation granularity */
1346 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001347 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001349 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001350 (unsigned long long)DMA_TLB_IIRG(type),
1351 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352}
1353
David Woodhouse64ae8922014-03-09 12:52:30 -07001354static struct device_domain_info *
1355iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1356 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001358 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001359 unsigned long flags;
1360 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001361 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001362
1363 if (!ecap_dev_iotlb_support(iommu->ecap))
1364 return NULL;
1365
1366 if (!iommu->qi)
1367 return NULL;
1368
1369 spin_lock_irqsave(&device_domain_lock, flags);
1370 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001371 if (info->iommu == iommu && info->bus == bus &&
1372 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001373 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001374 break;
1375 }
1376 spin_unlock_irqrestore(&device_domain_lock, flags);
1377
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001378 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001379 return NULL;
1380
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001381 pdev = to_pci_dev(info->dev);
1382
1383 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001384 return NULL;
1385
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001386 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001387 return NULL;
1388
Yu Zhao93a23a72009-05-18 13:51:37 +08001389 return info;
1390}
1391
1392static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1393{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001394 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001395 return;
1396
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001397 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001398}
1399
1400static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1401{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001402 if (!info->dev || !dev_is_pci(info->dev) ||
1403 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001404 return;
1405
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001406 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001407}
1408
1409static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1410 u64 addr, unsigned mask)
1411{
1412 u16 sid, qdep;
1413 unsigned long flags;
1414 struct device_domain_info *info;
1415
1416 spin_lock_irqsave(&device_domain_lock, flags);
1417 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001418 struct pci_dev *pdev;
1419 if (!info->dev || !dev_is_pci(info->dev))
1420 continue;
1421
1422 pdev = to_pci_dev(info->dev);
1423 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001424 continue;
1425
1426 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001427 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001428 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1429 }
1430 spin_unlock_irqrestore(&device_domain_lock, flags);
1431}
1432
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001433static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001434 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001436 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001437 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001439 BUG_ON(pages == 0);
1440
David Woodhouseea8ea462014-03-05 17:09:32 +00001441 if (ih)
1442 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001444 * Fallback to domain selective flush if no PSI support or the size is
1445 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 * PSI requires page size to be 2 ^ x, and the base address is naturally
1447 * aligned to the size
1448 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001449 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1450 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001451 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001452 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001453 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001454 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001455
1456 /*
Nadav Amit82653632010-04-01 13:24:40 +03001457 * In caching mode, changes of pages from non-present to present require
1458 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001459 */
Nadav Amit82653632010-04-01 13:24:40 +03001460 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001461 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462}
1463
mark grossf8bab732008-02-08 04:18:38 -08001464static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1465{
1466 u32 pmen;
1467 unsigned long flags;
1468
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001469 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001470 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1471 pmen &= ~DMA_PMEN_EPM;
1472 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1473
1474 /* wait for the protected region status bit to clear */
1475 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1476 readl, !(pmen & DMA_PMEN_PRS), pmen);
1477
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001478 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001479}
1480
Jiang Liu2a41cce2014-07-11 14:19:33 +08001481static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001482{
1483 u32 sts;
1484 unsigned long flags;
1485
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001486 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001487 iommu->gcmd |= DMA_GCMD_TE;
1488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489
1490 /* Make sure hardware complete it */
1491 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001492 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001493
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001494 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495}
1496
Jiang Liu2a41cce2014-07-11 14:19:33 +08001497static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498{
1499 u32 sts;
1500 unsigned long flag;
1501
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001502 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 iommu->gcmd &= ~DMA_GCMD_TE;
1504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1505
1506 /* Make sure hardware complete it */
1507 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001508 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001510 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511}
1512
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001513
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514static int iommu_init_domains(struct intel_iommu *iommu)
1515{
1516 unsigned long ndomains;
1517 unsigned long nlongs;
1518
1519 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001520 pr_debug("%s: Number of Domains supported <%ld>\n",
1521 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522 nlongs = BITS_TO_LONGS(ndomains);
1523
Donald Dutile94a91b52009-08-20 16:51:34 -04001524 spin_lock_init(&iommu->lock);
1525
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526 /* TBD: there might be 64K domains,
1527 * consider other allocation for future chip
1528 */
1529 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1530 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001531 pr_err("%s: Allocating domain id array failed\n",
1532 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533 return -ENOMEM;
1534 }
1535 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1536 GFP_KERNEL);
1537 if (!iommu->domains) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001538 pr_err("%s: Allocating domain array failed\n",
1539 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001540 kfree(iommu->domain_ids);
1541 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 return -ENOMEM;
1543 }
1544
1545 /*
1546 * if Caching mode is set, then invalid translations are tagged
1547 * with domainid 0. Hence we need to pre-allocate it.
1548 */
1549 if (cap_caching_mode(iommu->cap))
1550 set_bit(0, iommu->domain_ids);
1551 return 0;
1552}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001553
Jiang Liuffebeb42014-11-09 22:48:02 +08001554static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555{
1556 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001557 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558
Donald Dutile94a91b52009-08-20 16:51:34 -04001559 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001560 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001561 /*
1562 * Domain id 0 is reserved for invalid translation
1563 * if hardware supports caching mode.
1564 */
1565 if (cap_caching_mode(iommu->cap) && i == 0)
1566 continue;
1567
Donald Dutile94a91b52009-08-20 16:51:34 -04001568 domain = iommu->domains[i];
1569 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001570 if (domain_detach_iommu(domain, iommu) == 0 &&
1571 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001572 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001573 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574 }
1575
1576 if (iommu->gcmd & DMA_GCMD_TE)
1577 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001578}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579
Jiang Liuffebeb42014-11-09 22:48:02 +08001580static void free_dmar_iommu(struct intel_iommu *iommu)
1581{
1582 if ((iommu->domains) && (iommu->domain_ids)) {
1583 kfree(iommu->domains);
1584 kfree(iommu->domain_ids);
1585 iommu->domains = NULL;
1586 iommu->domain_ids = NULL;
1587 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001588
Weidong Hand9630fe2008-12-08 11:06:32 +08001589 g_iommus[iommu->seq_id] = NULL;
1590
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001591 /* free context mapping */
1592 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593}
1594
Jiang Liuab8dfe22014-07-11 14:19:27 +08001595static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001597 /* domain id for virtual machine, it won't be set in context */
1598 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001600
1601 domain = alloc_domain_mem();
1602 if (!domain)
1603 return NULL;
1604
Jiang Liuab8dfe22014-07-11 14:19:27 +08001605 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001606 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001607 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001608 spin_lock_init(&domain->iommu_lock);
1609 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001610 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001611 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612
1613 return domain;
1614}
1615
Jiang Liufb170fb2014-07-11 14:19:28 +08001616static int __iommu_attach_domain(struct dmar_domain *domain,
1617 struct intel_iommu *iommu)
1618{
1619 int num;
1620 unsigned long ndomains;
1621
1622 ndomains = cap_ndoms(iommu->cap);
1623 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1624 if (num < ndomains) {
1625 set_bit(num, iommu->domain_ids);
1626 iommu->domains[num] = domain;
1627 } else {
1628 num = -ENOSPC;
1629 }
1630
1631 return num;
1632}
1633
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001634static int iommu_attach_domain(struct dmar_domain *domain,
1635 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001637 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638 unsigned long flags;
1639
Weidong Han8c11e792008-12-08 15:29:22 +08001640 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001641 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001642 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001643 if (num < 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001644 pr_err("%s: No free domain ids\n", iommu->name);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001645
Jiang Liufb170fb2014-07-11 14:19:28 +08001646 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001647}
1648
Jiang Liu44bde612014-07-11 14:19:29 +08001649static int iommu_attach_vm_domain(struct dmar_domain *domain,
1650 struct intel_iommu *iommu)
1651{
1652 int num;
1653 unsigned long ndomains;
1654
1655 ndomains = cap_ndoms(iommu->cap);
1656 for_each_set_bit(num, iommu->domain_ids, ndomains)
1657 if (iommu->domains[num] == domain)
1658 return num;
1659
1660 return __iommu_attach_domain(domain, iommu);
1661}
1662
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001663static void iommu_detach_domain(struct dmar_domain *domain,
1664 struct intel_iommu *iommu)
1665{
1666 unsigned long flags;
1667 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001668
1669 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001670 if (domain_type_is_vm_or_si(domain)) {
1671 ndomains = cap_ndoms(iommu->cap);
1672 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1673 if (iommu->domains[num] == domain) {
1674 clear_bit(num, iommu->domain_ids);
1675 iommu->domains[num] = NULL;
1676 break;
1677 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001678 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001679 } else {
1680 clear_bit(domain->id, iommu->domain_ids);
1681 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001682 }
Weidong Han8c11e792008-12-08 15:29:22 +08001683 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001684}
1685
Jiang Liufb170fb2014-07-11 14:19:28 +08001686static void domain_attach_iommu(struct dmar_domain *domain,
1687 struct intel_iommu *iommu)
1688{
1689 unsigned long flags;
1690
1691 spin_lock_irqsave(&domain->iommu_lock, flags);
1692 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1693 domain->iommu_count++;
1694 if (domain->iommu_count == 1)
1695 domain->nid = iommu->node;
1696 domain_update_iommu_cap(domain);
1697 }
1698 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1699}
1700
1701static int domain_detach_iommu(struct dmar_domain *domain,
1702 struct intel_iommu *iommu)
1703{
1704 unsigned long flags;
1705 int count = INT_MAX;
1706
1707 spin_lock_irqsave(&domain->iommu_lock, flags);
1708 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1709 count = --domain->iommu_count;
1710 domain_update_iommu_cap(domain);
1711 }
1712 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1713
1714 return count;
1715}
1716
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001718static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719
Joseph Cihula51a63e62011-03-21 11:04:24 -07001720static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721{
1722 struct pci_dev *pdev = NULL;
1723 struct iova *iova;
1724 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001726 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1727 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728
Mark Gross8a443df2008-03-04 14:59:31 -08001729 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1730 &reserved_rbtree_key);
1731
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001732 /* IOAPIC ranges shouldn't be accessed by DMA */
1733 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1734 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001735 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001736 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001737 return -ENODEV;
1738 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739
1740 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1741 for_each_pci_dev(pdev) {
1742 struct resource *r;
1743
1744 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1745 r = &pdev->resource[i];
1746 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1747 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001748 iova = reserve_iova(&reserved_iova_list,
1749 IOVA_PFN(r->start),
1750 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001751 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001752 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001753 return -ENODEV;
1754 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755 }
1756 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001757 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758}
1759
1760static void domain_reserve_special_ranges(struct dmar_domain *domain)
1761{
1762 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1763}
1764
1765static inline int guestwidth_to_adjustwidth(int gaw)
1766{
1767 int agaw;
1768 int r = (gaw - 12) % 9;
1769
1770 if (r == 0)
1771 agaw = gaw;
1772 else
1773 agaw = gaw + 9 - r;
1774 if (agaw > 64)
1775 agaw = 64;
1776 return agaw;
1777}
1778
1779static int domain_init(struct dmar_domain *domain, int guest_width)
1780{
1781 struct intel_iommu *iommu;
1782 int adjust_width, agaw;
1783 unsigned long sagaw;
1784
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001785 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1786 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 domain_reserve_special_ranges(domain);
1788
1789 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001790 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 if (guest_width > cap_mgaw(iommu->cap))
1792 guest_width = cap_mgaw(iommu->cap);
1793 domain->gaw = guest_width;
1794 adjust_width = guestwidth_to_adjustwidth(guest_width);
1795 agaw = width_to_agaw(adjust_width);
1796 sagaw = cap_sagaw(iommu->cap);
1797 if (!test_bit(agaw, &sagaw)) {
1798 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001799 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001800 agaw = find_next_bit(&sagaw, 5, agaw);
1801 if (agaw >= 5)
1802 return -ENODEV;
1803 }
1804 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805
Weidong Han8e6040972008-12-08 15:49:06 +08001806 if (ecap_coherent(iommu->ecap))
1807 domain->iommu_coherency = 1;
1808 else
1809 domain->iommu_coherency = 0;
1810
Sheng Yang58c610b2009-03-18 15:33:05 +08001811 if (ecap_sc_support(iommu->ecap))
1812 domain->iommu_snooping = 1;
1813 else
1814 domain->iommu_snooping = 0;
1815
David Woodhouse214e39a2014-03-19 10:38:49 +00001816 if (intel_iommu_superpage)
1817 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1818 else
1819 domain->iommu_superpage = 0;
1820
Suresh Siddha4c923d42009-10-02 11:01:24 -07001821 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001822
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001824 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001825 if (!domain->pgd)
1826 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001827 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001828 return 0;
1829}
1830
1831static void domain_exit(struct dmar_domain *domain)
1832{
David Woodhouseea8ea462014-03-05 17:09:32 +00001833 struct page *freelist = NULL;
Alex Williamson71684402015-03-04 11:30:10 -07001834 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001835
1836 /* Domain 0 is reserved, so dont process it */
1837 if (!domain)
1838 return;
1839
Alex Williamson7b668352011-05-24 12:02:41 +01001840 /* Flush any lazy unmaps that may reference this domain */
1841 if (!intel_iommu_strict)
1842 flush_unmaps_timeout(0);
1843
Jiang Liu92d03cc2014-02-19 14:07:28 +08001844 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001845 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001846
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001847 /* destroy iovas */
1848 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001849
David Woodhouseea8ea462014-03-05 17:09:32 +00001850 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851
Jiang Liu92d03cc2014-02-19 14:07:28 +08001852 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001853 rcu_read_lock();
Alex Williamson71684402015-03-04 11:30:10 -07001854 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1855 iommu_detach_domain(domain, g_iommus[i]);
Jiang Liu0e242612014-02-19 14:07:34 +08001856 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001857
David Woodhouseea8ea462014-03-05 17:09:32 +00001858 dma_free_pagelist(freelist);
1859
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001860 free_domain_mem(domain);
1861}
1862
David Woodhouse64ae8922014-03-09 12:52:30 -07001863static int domain_context_mapping_one(struct dmar_domain *domain,
1864 struct intel_iommu *iommu,
1865 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001866{
1867 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001868 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001869 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001870 int id;
1871 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001872 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873
1874 pr_debug("Set context mapping for %02x:%02x.%d\n",
1875 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001876
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001878 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1879 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001880
David Woodhouse03ecc322015-02-13 14:35:21 +00001881 spin_lock_irqsave(&iommu->lock, flags);
1882 context = iommu_context_addr(iommu, bus, devfn, 1);
1883 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001884 if (!context)
1885 return -ENOMEM;
1886 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001887 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001888 spin_unlock_irqrestore(&iommu->lock, flags);
1889 return 0;
1890 }
1891
Joerg Roedelcf484d02015-06-12 12:21:46 +02001892 context_clear_entry(context);
1893
Weidong Hanea6606b2008-12-08 23:08:15 +08001894 id = domain->id;
1895 pgd = domain->pgd;
1896
Jiang Liuab8dfe22014-07-11 14:19:27 +08001897 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001898 if (domain_type_is_vm(domain)) {
1899 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001900 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001901 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001902 pr_err("%s: No free domain ids\n", iommu->name);
Weidong Hanea6606b2008-12-08 23:08:15 +08001903 return -EFAULT;
1904 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001905 }
1906
1907 /* Skip top levels of page tables for
1908 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001909 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001910 */
Chris Wright1672af12009-12-02 12:06:34 -08001911 if (translation != CONTEXT_TT_PASS_THROUGH) {
1912 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1913 pgd = phys_to_virt(dma_pte_addr(pgd));
1914 if (!dma_pte_present(pgd)) {
1915 spin_unlock_irqrestore(&iommu->lock, flags);
1916 return -ENOMEM;
1917 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001918 }
1919 }
1920 }
1921
1922 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001923
Yu Zhao93a23a72009-05-18 13:51:37 +08001924 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001925 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001926 translation = info ? CONTEXT_TT_DEV_IOTLB :
1927 CONTEXT_TT_MULTI_LEVEL;
1928 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001929 /*
1930 * In pass through mode, AW must be programmed to indicate the largest
1931 * AGAW value supported by hardware. And ASR is ignored by hardware.
1932 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001933 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001934 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001935 else {
1936 context_set_address_root(context, virt_to_phys(pgd));
1937 context_set_address_width(context, iommu->agaw);
1938 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001939
1940 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001941 context_set_fault_enable(context);
1942 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001943 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001945 /*
1946 * It's a non-present to present mapping. If hardware doesn't cache
1947 * non-present entry we only need to flush the write-buffer. If the
1948 * _does_ cache non-present entries, then it does so in the special
1949 * domain #0, which we have to flush:
1950 */
1951 if (cap_caching_mode(iommu->cap)) {
1952 iommu->flush.flush_context(iommu, 0,
1953 (((u16)bus) << 8) | devfn,
1954 DMA_CCMD_MASK_NOBIT,
1955 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001956 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001957 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001958 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001959 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001960 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001961 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001962
Jiang Liufb170fb2014-07-11 14:19:28 +08001963 domain_attach_iommu(domain, iommu);
1964
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001965 return 0;
1966}
1967
Alex Williamson579305f2014-07-03 09:51:43 -06001968struct domain_context_mapping_data {
1969 struct dmar_domain *domain;
1970 struct intel_iommu *iommu;
1971 int translation;
1972};
1973
1974static int domain_context_mapping_cb(struct pci_dev *pdev,
1975 u16 alias, void *opaque)
1976{
1977 struct domain_context_mapping_data *data = opaque;
1978
1979 return domain_context_mapping_one(data->domain, data->iommu,
1980 PCI_BUS_NUM(alias), alias & 0xff,
1981 data->translation);
1982}
1983
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001985domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1986 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001987{
David Woodhouse64ae8922014-03-09 12:52:30 -07001988 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001989 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001990 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991
David Woodhousee1f167f2014-03-09 15:24:46 -07001992 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001993 if (!iommu)
1994 return -ENODEV;
1995
Alex Williamson579305f2014-07-03 09:51:43 -06001996 if (!dev_is_pci(dev))
1997 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001998 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001999
2000 data.domain = domain;
2001 data.iommu = iommu;
2002 data.translation = translation;
2003
2004 return pci_for_each_dma_alias(to_pci_dev(dev),
2005 &domain_context_mapping_cb, &data);
2006}
2007
2008static int domain_context_mapped_cb(struct pci_dev *pdev,
2009 u16 alias, void *opaque)
2010{
2011 struct intel_iommu *iommu = opaque;
2012
2013 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002014}
2015
David Woodhousee1f167f2014-03-09 15:24:46 -07002016static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017{
Weidong Han5331fe62008-12-08 23:00:00 +08002018 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002019 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002020
David Woodhousee1f167f2014-03-09 15:24:46 -07002021 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002022 if (!iommu)
2023 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002024
Alex Williamson579305f2014-07-03 09:51:43 -06002025 if (!dev_is_pci(dev))
2026 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002027
Alex Williamson579305f2014-07-03 09:51:43 -06002028 return !pci_for_each_dma_alias(to_pci_dev(dev),
2029 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030}
2031
Fenghua Yuf5329592009-08-04 15:09:37 -07002032/* Returns a number of VTD pages, but aligned to MM page size */
2033static inline unsigned long aligned_nrpages(unsigned long host_addr,
2034 size_t size)
2035{
2036 host_addr &= ~PAGE_MASK;
2037 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2038}
2039
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002040/* Return largest possible superpage level for a given mapping */
2041static inline int hardware_largepage_caps(struct dmar_domain *domain,
2042 unsigned long iov_pfn,
2043 unsigned long phy_pfn,
2044 unsigned long pages)
2045{
2046 int support, level = 1;
2047 unsigned long pfnmerge;
2048
2049 support = domain->iommu_superpage;
2050
2051 /* To use a large page, the virtual *and* physical addresses
2052 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2053 of them will mean we have to use smaller pages. So just
2054 merge them and check both at once. */
2055 pfnmerge = iov_pfn | phy_pfn;
2056
2057 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2058 pages >>= VTD_STRIDE_SHIFT;
2059 if (!pages)
2060 break;
2061 pfnmerge >>= VTD_STRIDE_SHIFT;
2062 level++;
2063 support--;
2064 }
2065 return level;
2066}
2067
David Woodhouse9051aa02009-06-29 12:30:54 +01002068static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2069 struct scatterlist *sg, unsigned long phys_pfn,
2070 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002071{
2072 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002073 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002074 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002075 unsigned int largepage_lvl = 0;
2076 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002077
Jiang Liu162d1b12014-07-11 14:19:35 +08002078 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002079
2080 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2081 return -EINVAL;
2082
2083 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2084
Jiang Liucc4f14a2014-11-26 09:42:10 +08002085 if (!sg) {
2086 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002087 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2088 }
2089
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002090 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002091 uint64_t tmp;
2092
David Woodhousee1605492009-06-29 11:17:38 +01002093 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002094 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002095 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2096 sg->dma_length = sg->length;
2097 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002098 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002099 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002100
David Woodhousee1605492009-06-29 11:17:38 +01002101 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002102 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2103
David Woodhouse5cf0a762014-03-19 16:07:49 +00002104 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002105 if (!pte)
2106 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002107 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002108 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002109 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002110 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2111 /*
2112 * Ensure that old small page tables are
2113 * removed to make room for superpage,
2114 * if they exist.
2115 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002116 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002117 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002118 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002119 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002120 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002121
David Woodhousee1605492009-06-29 11:17:38 +01002122 }
2123 /* We don't need lock here, nobody else
2124 * touches the iova range
2125 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002126 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002127 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002128 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002129 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2130 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002131 if (dumps) {
2132 dumps--;
2133 debug_dma_dump_mappings(NULL);
2134 }
2135 WARN_ON(1);
2136 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002137
2138 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2139
2140 BUG_ON(nr_pages < lvl_pages);
2141 BUG_ON(sg_res < lvl_pages);
2142
2143 nr_pages -= lvl_pages;
2144 iov_pfn += lvl_pages;
2145 phys_pfn += lvl_pages;
2146 pteval += lvl_pages * VTD_PAGE_SIZE;
2147 sg_res -= lvl_pages;
2148
2149 /* If the next PTE would be the first in a new page, then we
2150 need to flush the cache on the entries we've just written.
2151 And then we'll need to recalculate 'pte', so clear it and
2152 let it get set again in the if (!pte) block above.
2153
2154 If we're done (!nr_pages) we need to flush the cache too.
2155
2156 Also if we've been setting superpages, we may need to
2157 recalculate 'pte' and switch back to smaller pages for the
2158 end of the mapping, if the trailing size is not enough to
2159 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002160 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002161 if (!nr_pages || first_pte_in_page(pte) ||
2162 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002163 domain_flush_cache(domain, first_pte,
2164 (void *)pte - (void *)first_pte);
2165 pte = NULL;
2166 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002167
2168 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002169 sg = sg_next(sg);
2170 }
2171 return 0;
2172}
2173
David Woodhouse9051aa02009-06-29 12:30:54 +01002174static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2175 struct scatterlist *sg, unsigned long nr_pages,
2176 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002177{
David Woodhouse9051aa02009-06-29 12:30:54 +01002178 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2179}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002180
David Woodhouse9051aa02009-06-29 12:30:54 +01002181static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2182 unsigned long phys_pfn, unsigned long nr_pages,
2183 int prot)
2184{
2185 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002186}
2187
Weidong Hanc7151a82008-12-08 22:51:37 +08002188static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002189{
Weidong Hanc7151a82008-12-08 22:51:37 +08002190 if (!iommu)
2191 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002192
2193 clear_context_table(iommu, bus, devfn);
2194 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002195 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002196 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002197}
2198
David Woodhouse109b9b02012-05-25 17:43:02 +01002199static inline void unlink_domain_info(struct device_domain_info *info)
2200{
2201 assert_spin_locked(&device_domain_lock);
2202 list_del(&info->link);
2203 list_del(&info->global);
2204 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002205 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002206}
2207
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002208static void domain_remove_dev_info(struct dmar_domain *domain)
2209{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002210 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002211 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002212
2213 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002214 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002215 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002216 spin_unlock_irqrestore(&device_domain_lock, flags);
2217
Yu Zhao93a23a72009-05-18 13:51:37 +08002218 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002219 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002220
Jiang Liuab8dfe22014-07-11 14:19:27 +08002221 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002222 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002223 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002224 }
2225
2226 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002227 spin_lock_irqsave(&device_domain_lock, flags);
2228 }
2229 spin_unlock_irqrestore(&device_domain_lock, flags);
2230}
2231
2232/*
2233 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002234 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002235 */
David Woodhouse1525a292014-03-06 16:19:30 +00002236static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002237{
2238 struct device_domain_info *info;
2239
2240 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002241 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002242 if (info)
2243 return info->domain;
2244 return NULL;
2245}
2246
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002247static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002248dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2249{
2250 struct device_domain_info *info;
2251
2252 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002253 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002254 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002255 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002256
2257 return NULL;
2258}
2259
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002260static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002261 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002262 struct device *dev,
2263 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002264{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002265 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002266 struct device_domain_info *info;
2267 unsigned long flags;
2268
2269 info = alloc_devinfo_mem();
2270 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002271 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002272
Jiang Liu745f2582014-02-19 14:07:26 +08002273 info->bus = bus;
2274 info->devfn = devfn;
2275 info->dev = dev;
2276 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002277 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002278
2279 spin_lock_irqsave(&device_domain_lock, flags);
2280 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002281 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002282 else {
2283 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002284 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002285 if (info2)
2286 found = info2->domain;
2287 }
Jiang Liu745f2582014-02-19 14:07:26 +08002288 if (found) {
2289 spin_unlock_irqrestore(&device_domain_lock, flags);
2290 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002291 /* Caller must free the original domain */
2292 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002293 }
2294
David Woodhouseb718cd32014-03-09 13:11:33 -07002295 list_add(&info->link, &domain->devices);
2296 list_add(&info->global, &device_domain_list);
2297 if (dev)
2298 dev->archdata.iommu = info;
2299 spin_unlock_irqrestore(&device_domain_lock, flags);
2300
2301 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002302}
2303
Alex Williamson579305f2014-07-03 09:51:43 -06002304static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2305{
2306 *(u16 *)opaque = alias;
2307 return 0;
2308}
2309
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002310/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002311static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312{
Alex Williamson579305f2014-07-03 09:51:43 -06002313 struct dmar_domain *domain, *tmp;
2314 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002315 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002316 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002318 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002319
David Woodhouse146922e2014-03-09 15:44:17 -07002320 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002321 if (domain)
2322 return domain;
2323
David Woodhouse146922e2014-03-09 15:44:17 -07002324 iommu = device_to_iommu(dev, &bus, &devfn);
2325 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002326 return NULL;
2327
2328 if (dev_is_pci(dev)) {
2329 struct pci_dev *pdev = to_pci_dev(dev);
2330
2331 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2332
2333 spin_lock_irqsave(&device_domain_lock, flags);
2334 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2335 PCI_BUS_NUM(dma_alias),
2336 dma_alias & 0xff);
2337 if (info) {
2338 iommu = info->iommu;
2339 domain = info->domain;
2340 }
2341 spin_unlock_irqrestore(&device_domain_lock, flags);
2342
2343 /* DMA alias already has a domain, uses it */
2344 if (info)
2345 goto found_domain;
2346 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002347
David Woodhouse146922e2014-03-09 15:44:17 -07002348 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002349 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002350 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002351 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002352 domain->id = iommu_attach_domain(domain, iommu);
2353 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002354 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002355 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002356 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002357 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002358 if (domain_init(domain, gaw)) {
2359 domain_exit(domain);
2360 return NULL;
2361 }
2362
2363 /* register PCI DMA alias device */
2364 if (dev_is_pci(dev)) {
2365 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2366 dma_alias & 0xff, NULL, domain);
2367
2368 if (!tmp || tmp != domain) {
2369 domain_exit(domain);
2370 domain = tmp;
2371 }
2372
David Woodhouseb718cd32014-03-09 13:11:33 -07002373 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002374 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002375 }
2376
2377found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002378 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2379
2380 if (!tmp || tmp != domain) {
2381 domain_exit(domain);
2382 domain = tmp;
2383 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002384
2385 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386}
2387
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002388static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002389#define IDENTMAP_ALL 1
2390#define IDENTMAP_GFX 2
2391#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002392
David Woodhouseb2132032009-06-26 18:50:28 +01002393static int iommu_domain_identity_map(struct dmar_domain *domain,
2394 unsigned long long start,
2395 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396{
David Woodhousec5395d52009-06-28 16:35:56 +01002397 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2398 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399
David Woodhousec5395d52009-06-28 16:35:56 +01002400 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2401 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002402 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002403 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002404 }
2405
David Woodhousec5395d52009-06-28 16:35:56 +01002406 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2407 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002408 /*
2409 * RMRR range might have overlap with physical memory range,
2410 * clear it first
2411 */
David Woodhousec5395d52009-06-28 16:35:56 +01002412 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002413
David Woodhousec5395d52009-06-28 16:35:56 +01002414 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2415 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002416 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002417}
2418
David Woodhouse0b9d9752014-03-09 15:48:15 -07002419static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002420 unsigned long long start,
2421 unsigned long long end)
2422{
2423 struct dmar_domain *domain;
2424 int ret;
2425
David Woodhouse0b9d9752014-03-09 15:48:15 -07002426 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002427 if (!domain)
2428 return -ENOMEM;
2429
David Woodhouse19943b02009-08-04 16:19:20 +01002430 /* For _hardware_ passthrough, don't bother. But for software
2431 passthrough, we do it anyway -- it may indicate a memory
2432 range which is reserved in E820, so which didn't get set
2433 up to start with in si_domain */
2434 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002435 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2436 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002437 return 0;
2438 }
2439
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002440 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2441 dev_name(dev), start, end);
2442
David Woodhouse5595b522009-12-02 09:21:55 +00002443 if (end < start) {
2444 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2445 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2446 dmi_get_system_info(DMI_BIOS_VENDOR),
2447 dmi_get_system_info(DMI_BIOS_VERSION),
2448 dmi_get_system_info(DMI_PRODUCT_VERSION));
2449 ret = -EIO;
2450 goto error;
2451 }
2452
David Woodhouse2ff729f2009-08-26 14:25:41 +01002453 if (end >> agaw_to_width(domain->agaw)) {
2454 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2455 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2456 agaw_to_width(domain->agaw),
2457 dmi_get_system_info(DMI_BIOS_VENDOR),
2458 dmi_get_system_info(DMI_BIOS_VERSION),
2459 dmi_get_system_info(DMI_PRODUCT_VERSION));
2460 ret = -EIO;
2461 goto error;
2462 }
David Woodhouse19943b02009-08-04 16:19:20 +01002463
David Woodhouseb2132032009-06-26 18:50:28 +01002464 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002465 if (ret)
2466 goto error;
2467
2468 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002469 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002470 if (ret)
2471 goto error;
2472
2473 return 0;
2474
2475 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002476 domain_exit(domain);
2477 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002478}
2479
2480static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002481 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002482{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002483 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002484 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002485 return iommu_prepare_identity_map(dev, rmrr->base_address,
2486 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002487}
2488
Suresh Siddhad3f13812011-08-23 17:05:25 -07002489#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002490static inline void iommu_prepare_isa(void)
2491{
2492 struct pci_dev *pdev;
2493 int ret;
2494
2495 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2496 if (!pdev)
2497 return;
2498
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002499 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002500 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002501
2502 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002503 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002504
Yijing Wang9b27e822014-05-20 20:37:52 +08002505 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002506}
2507#else
2508static inline void iommu_prepare_isa(void)
2509{
2510 return;
2511}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002512#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002513
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002514static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002515
Matt Kraai071e1372009-08-23 22:30:22 -07002516static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002517{
2518 struct dmar_drhd_unit *drhd;
2519 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002520 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002521 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002522
Jiang Liuab8dfe22014-07-11 14:19:27 +08002523 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002524 if (!si_domain)
2525 return -EFAULT;
2526
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002527 for_each_active_iommu(iommu, drhd) {
2528 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002529 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002530 domain_exit(si_domain);
2531 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002532 } else if (first) {
2533 si_domain->id = ret;
2534 first = false;
2535 } else if (si_domain->id != ret) {
2536 domain_exit(si_domain);
2537 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002538 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002539 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002540 }
2541
2542 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2543 domain_exit(si_domain);
2544 return -EFAULT;
2545 }
2546
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002547 pr_debug("Identity mapping domain is domain %d\n",
Jiang Liu9544c002014-01-06 14:18:13 +08002548 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002549
David Woodhouse19943b02009-08-04 16:19:20 +01002550 if (hw)
2551 return 0;
2552
David Woodhousec7ab48d2009-06-26 19:10:36 +01002553 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002554 unsigned long start_pfn, end_pfn;
2555 int i;
2556
2557 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2558 ret = iommu_domain_identity_map(si_domain,
2559 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2560 if (ret)
2561 return ret;
2562 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002563 }
2564
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002565 return 0;
2566}
2567
David Woodhouse9b226622014-03-09 14:03:28 -07002568static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002569{
2570 struct device_domain_info *info;
2571
2572 if (likely(!iommu_identity_mapping))
2573 return 0;
2574
David Woodhouse9b226622014-03-09 14:03:28 -07002575 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002576 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2577 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002578
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002579 return 0;
2580}
2581
2582static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002583 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002584{
David Woodhouse0ac72662014-03-09 13:19:22 -07002585 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002586 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002587 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002588 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002589
David Woodhouse5913c9b2014-03-09 16:27:31 -07002590 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002591 if (!iommu)
2592 return -ENODEV;
2593
David Woodhouse5913c9b2014-03-09 16:27:31 -07002594 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002595 if (ndomain != domain)
2596 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002597
David Woodhouse5913c9b2014-03-09 16:27:31 -07002598 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002599 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002600 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002601 return ret;
2602 }
2603
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002604 return 0;
2605}
2606
David Woodhouse0b9d9752014-03-09 15:48:15 -07002607static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002608{
2609 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002610 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002611 int i;
2612
Jiang Liu0e242612014-02-19 14:07:34 +08002613 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002614 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002615 /*
2616 * Return TRUE if this RMRR contains the device that
2617 * is passed in.
2618 */
2619 for_each_active_dev_scope(rmrr->devices,
2620 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002621 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002622 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002623 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002624 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002625 }
Jiang Liu0e242612014-02-19 14:07:34 +08002626 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002627 return false;
2628}
2629
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002630/*
2631 * There are a couple cases where we need to restrict the functionality of
2632 * devices associated with RMRRs. The first is when evaluating a device for
2633 * identity mapping because problems exist when devices are moved in and out
2634 * of domains and their respective RMRR information is lost. This means that
2635 * a device with associated RMRRs will never be in a "passthrough" domain.
2636 * The second is use of the device through the IOMMU API. This interface
2637 * expects to have full control of the IOVA space for the device. We cannot
2638 * satisfy both the requirement that RMRR access is maintained and have an
2639 * unencumbered IOVA space. We also have no ability to quiesce the device's
2640 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2641 * We therefore prevent devices associated with an RMRR from participating in
2642 * the IOMMU API, which eliminates them from device assignment.
2643 *
2644 * In both cases we assume that PCI USB devices with RMRRs have them largely
2645 * for historical reasons and that the RMRR space is not actively used post
2646 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002647 *
2648 * The same exception is made for graphics devices, with the requirement that
2649 * any use of the RMRR regions will be torn down before assigning the device
2650 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002651 */
2652static bool device_is_rmrr_locked(struct device *dev)
2653{
2654 if (!device_has_rmrr(dev))
2655 return false;
2656
2657 if (dev_is_pci(dev)) {
2658 struct pci_dev *pdev = to_pci_dev(dev);
2659
David Woodhouse18436af2015-03-25 15:05:47 +00002660 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002661 return false;
2662 }
2663
2664 return true;
2665}
2666
David Woodhouse3bdb2592014-03-09 16:03:08 -07002667static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002668{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002669
David Woodhouse3bdb2592014-03-09 16:03:08 -07002670 if (dev_is_pci(dev)) {
2671 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002672
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002673 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002674 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002675
David Woodhouse3bdb2592014-03-09 16:03:08 -07002676 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2677 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002678
David Woodhouse3bdb2592014-03-09 16:03:08 -07002679 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2680 return 1;
2681
2682 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2683 return 0;
2684
2685 /*
2686 * We want to start off with all devices in the 1:1 domain, and
2687 * take them out later if we find they can't access all of memory.
2688 *
2689 * However, we can't do this for PCI devices behind bridges,
2690 * because all PCI devices behind the same bridge will end up
2691 * with the same source-id on their transactions.
2692 *
2693 * Practically speaking, we can't change things around for these
2694 * devices at run-time, because we can't be sure there'll be no
2695 * DMA transactions in flight for any of their siblings.
2696 *
2697 * So PCI devices (unless they're on the root bus) as well as
2698 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2699 * the 1:1 domain, just in _case_ one of their siblings turns out
2700 * not to be able to map all of memory.
2701 */
2702 if (!pci_is_pcie(pdev)) {
2703 if (!pci_is_root_bus(pdev->bus))
2704 return 0;
2705 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2706 return 0;
2707 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2708 return 0;
2709 } else {
2710 if (device_has_rmrr(dev))
2711 return 0;
2712 }
David Woodhouse6941af22009-07-04 18:24:27 +01002713
David Woodhouse3dfc8132009-07-04 19:11:08 +01002714 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002715 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002716 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002717 * take them out of the 1:1 domain later.
2718 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002719 if (!startup) {
2720 /*
2721 * If the device's dma_mask is less than the system's memory
2722 * size then this is not a candidate for identity mapping.
2723 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002724 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002725
David Woodhouse3bdb2592014-03-09 16:03:08 -07002726 if (dev->coherent_dma_mask &&
2727 dev->coherent_dma_mask < dma_mask)
2728 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002729
David Woodhouse3bdb2592014-03-09 16:03:08 -07002730 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002731 }
David Woodhouse6941af22009-07-04 18:24:27 +01002732
2733 return 1;
2734}
2735
David Woodhousecf04eee2014-03-21 16:49:04 +00002736static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2737{
2738 int ret;
2739
2740 if (!iommu_should_identity_map(dev, 1))
2741 return 0;
2742
2743 ret = domain_add_dev_info(si_domain, dev,
2744 hw ? CONTEXT_TT_PASS_THROUGH :
2745 CONTEXT_TT_MULTI_LEVEL);
2746 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002747 pr_info("%s identity mapping for device %s\n",
2748 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002749 else if (ret == -ENODEV)
2750 /* device not associated with an iommu */
2751 ret = 0;
2752
2753 return ret;
2754}
2755
2756
Matt Kraai071e1372009-08-23 22:30:22 -07002757static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002758{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002759 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002760 struct dmar_drhd_unit *drhd;
2761 struct intel_iommu *iommu;
2762 struct device *dev;
2763 int i;
2764 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002765
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002766 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002767 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2768 if (ret)
2769 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002770 }
2771
David Woodhousecf04eee2014-03-21 16:49:04 +00002772 for_each_active_iommu(iommu, drhd)
2773 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2774 struct acpi_device_physical_node *pn;
2775 struct acpi_device *adev;
2776
2777 if (dev->bus != &acpi_bus_type)
2778 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002779
David Woodhousecf04eee2014-03-21 16:49:04 +00002780 adev= to_acpi_device(dev);
2781 mutex_lock(&adev->physical_node_lock);
2782 list_for_each_entry(pn, &adev->physical_node_list, node) {
2783 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2784 if (ret)
2785 break;
2786 }
2787 mutex_unlock(&adev->physical_node_lock);
2788 if (ret)
2789 return ret;
2790 }
2791
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002792 return 0;
2793}
2794
Jiang Liuffebeb42014-11-09 22:48:02 +08002795static void intel_iommu_init_qi(struct intel_iommu *iommu)
2796{
2797 /*
2798 * Start from the sane iommu hardware state.
2799 * If the queued invalidation is already initialized by us
2800 * (for example, while enabling interrupt-remapping) then
2801 * we got the things already rolling from a sane state.
2802 */
2803 if (!iommu->qi) {
2804 /*
2805 * Clear any previous faults.
2806 */
2807 dmar_fault(-1, iommu);
2808 /*
2809 * Disable queued invalidation if supported and already enabled
2810 * before OS handover.
2811 */
2812 dmar_disable_qi(iommu);
2813 }
2814
2815 if (dmar_enable_qi(iommu)) {
2816 /*
2817 * Queued Invalidate not enabled, use Register Based Invalidate
2818 */
2819 iommu->flush.flush_context = __iommu_flush_context;
2820 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002821 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002822 iommu->name);
2823 } else {
2824 iommu->flush.flush_context = qi_flush_context;
2825 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002826 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002827 }
2828}
2829
Joerg Roedel091d42e2015-06-12 11:56:10 +02002830static int copy_context_table(struct intel_iommu *iommu,
2831 struct root_entry *old_re,
2832 struct context_entry **tbl,
2833 int bus, bool ext)
2834{
2835 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002836 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002837 phys_addr_t old_ce_phys;
2838
2839 tbl_idx = ext ? bus * 2 : bus;
2840
2841 for (devfn = 0; devfn < 256; devfn++) {
2842 /* First calculate the correct index */
2843 idx = (ext ? devfn * 2 : devfn) % 256;
2844
2845 if (idx == 0) {
2846 /* First save what we may have and clean up */
2847 if (new_ce) {
2848 tbl[tbl_idx] = new_ce;
2849 __iommu_flush_cache(iommu, new_ce,
2850 VTD_PAGE_SIZE);
2851 pos = 1;
2852 }
2853
2854 if (old_ce)
2855 iounmap(old_ce);
2856
2857 ret = 0;
2858 if (devfn < 0x80)
2859 old_ce_phys = root_entry_lctp(old_re);
2860 else
2861 old_ce_phys = root_entry_uctp(old_re);
2862
2863 if (!old_ce_phys) {
2864 if (ext && devfn == 0) {
2865 /* No LCTP, try UCTP */
2866 devfn = 0x7f;
2867 continue;
2868 } else {
2869 goto out;
2870 }
2871 }
2872
2873 ret = -ENOMEM;
2874 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2875 if (!old_ce)
2876 goto out;
2877
2878 new_ce = alloc_pgtable_page(iommu->node);
2879 if (!new_ce)
2880 goto out_unmap;
2881
2882 ret = 0;
2883 }
2884
2885 /* Now copy the context entry */
2886 ce = old_ce[idx];
2887
Joerg Roedelcf484d02015-06-12 12:21:46 +02002888 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02002889 continue;
2890
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002891 did = context_domain_id(&ce);
2892 if (did >= 0 && did < cap_ndoms(iommu->cap))
2893 set_bit(did, iommu->domain_ids);
2894
Joerg Roedelcf484d02015-06-12 12:21:46 +02002895 /*
2896 * We need a marker for copied context entries. This
2897 * marker needs to work for the old format as well as
2898 * for extended context entries.
2899 *
2900 * Bit 67 of the context entry is used. In the old
2901 * format this bit is available to software, in the
2902 * extended format it is the PGE bit, but PGE is ignored
2903 * by HW if PASIDs are disabled (and thus still
2904 * available).
2905 *
2906 * So disable PASIDs first and then mark the entry
2907 * copied. This means that we don't copy PASID
2908 * translations from the old kernel, but this is fine as
2909 * faults there are not fatal.
2910 */
2911 context_clear_pasid_enable(&ce);
2912 context_set_copied(&ce);
2913
Joerg Roedel091d42e2015-06-12 11:56:10 +02002914 new_ce[idx] = ce;
2915 }
2916
2917 tbl[tbl_idx + pos] = new_ce;
2918
2919 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2920
2921out_unmap:
2922 iounmap(old_ce);
2923
2924out:
2925 return ret;
2926}
2927
2928static int copy_translation_tables(struct intel_iommu *iommu)
2929{
2930 struct context_entry **ctxt_tbls;
2931 struct root_entry *old_rt;
2932 phys_addr_t old_rt_phys;
2933 int ctxt_table_entries;
2934 unsigned long flags;
2935 u64 rtaddr_reg;
2936 int bus, ret;
2937 bool ext;
2938
2939 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2940 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
2941
2942 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2943 if (!old_rt_phys)
2944 return -EINVAL;
2945
2946 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2947 if (!old_rt)
2948 return -ENOMEM;
2949
2950 /* This is too big for the stack - allocate it from slab */
2951 ctxt_table_entries = ext ? 512 : 256;
2952 ret = -ENOMEM;
2953 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2954 if (!ctxt_tbls)
2955 goto out_unmap;
2956
2957 for (bus = 0; bus < 256; bus++) {
2958 ret = copy_context_table(iommu, &old_rt[bus],
2959 ctxt_tbls, bus, ext);
2960 if (ret) {
2961 pr_err("%s: Failed to copy context table for bus %d\n",
2962 iommu->name, bus);
2963 continue;
2964 }
2965 }
2966
2967 spin_lock_irqsave(&iommu->lock, flags);
2968
2969 /* Context tables are copied, now write them to the root_entry table */
2970 for (bus = 0; bus < 256; bus++) {
2971 int idx = ext ? bus * 2 : bus;
2972 u64 val;
2973
2974 if (ctxt_tbls[idx]) {
2975 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2976 iommu->root_entry[bus].lo = val;
2977 }
2978
2979 if (!ext || !ctxt_tbls[idx + 1])
2980 continue;
2981
2982 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2983 iommu->root_entry[bus].hi = val;
2984 }
2985
2986 spin_unlock_irqrestore(&iommu->lock, flags);
2987
2988 kfree(ctxt_tbls);
2989
2990 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2991
2992 ret = 0;
2993
2994out_unmap:
2995 iounmap(old_rt);
2996
2997 return ret;
2998}
2999
Joseph Cihulab7792602011-05-03 00:08:37 -07003000static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003001{
3002 struct dmar_drhd_unit *drhd;
3003 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003004 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003005 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003006 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07003007 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003008
3009 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003010 * for each drhd
3011 * allocate root
3012 * initialize and program root entry to not present
3013 * endfor
3014 */
3015 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003016 /*
3017 * lock not needed as this is only incremented in the single
3018 * threaded kernel __init code path all other access are read
3019 * only
3020 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003021 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003022 g_num_of_iommus++;
3023 continue;
3024 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003025 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003026 }
3027
Jiang Liuffebeb42014-11-09 22:48:02 +08003028 /* Preallocate enough resources for IOMMU hot-addition */
3029 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3030 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3031
Weidong Hand9630fe2008-12-08 11:06:32 +08003032 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3033 GFP_KERNEL);
3034 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003035 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003036 ret = -ENOMEM;
3037 goto error;
3038 }
3039
mark gross80b20dd2008-04-18 13:53:58 -07003040 deferred_flush = kzalloc(g_num_of_iommus *
3041 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3042 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08003043 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08003044 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08003045 }
3046
Jiang Liu7c919772014-01-06 14:18:18 +08003047 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003048 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003049
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003050 intel_iommu_init_qi(iommu);
3051
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003052 ret = iommu_init_domains(iommu);
3053 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003054 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003055
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003056 init_translation_status(iommu);
3057
Joerg Roedel091d42e2015-06-12 11:56:10 +02003058 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3059 iommu_disable_translation(iommu);
3060 clear_translation_pre_enabled(iommu);
3061 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3062 iommu->name);
3063 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003064
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003065 /*
3066 * TBD:
3067 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003068 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003069 */
3070 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003071 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003072 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003073
Joerg Roedel091d42e2015-06-12 11:56:10 +02003074 if (translation_pre_enabled(iommu)) {
3075 pr_info("Translation already enabled - trying to copy translation structures\n");
3076
3077 ret = copy_translation_tables(iommu);
3078 if (ret) {
3079 /*
3080 * We found the IOMMU with translation
3081 * enabled - but failed to copy over the
3082 * old root-entry table. Try to proceed
3083 * by disabling translation now and
3084 * allocating a clean root-entry table.
3085 * This might cause DMAR faults, but
3086 * probably the dump will still succeed.
3087 */
3088 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3089 iommu->name);
3090 iommu_disable_translation(iommu);
3091 clear_translation_pre_enabled(iommu);
3092 } else {
3093 pr_info("Copied translation tables from previous kernel for %s\n",
3094 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003095 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003096 }
3097 }
3098
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003099 iommu_flush_write_buffer(iommu);
3100 iommu_set_root_entry(iommu);
3101 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3102 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3103
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003104 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003105 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003106 }
3107
David Woodhouse19943b02009-08-04 16:19:20 +01003108 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003109 iommu_identity_mapping |= IDENTMAP_ALL;
3110
Suresh Siddhad3f13812011-08-23 17:05:25 -07003111#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003112 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003113#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003114
Joerg Roedel86080cc2015-06-12 12:27:16 +02003115 if (iommu_identity_mapping) {
3116 ret = si_domain_init(hw_pass_through);
3117 if (ret)
3118 goto free_iommu;
3119 }
3120
David Woodhousee0fc7e02009-09-30 09:12:17 -07003121 check_tylersburg_isoch();
3122
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003123 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003124 * If we copied translations from a previous kernel in the kdump
3125 * case, we can not assign the devices to domains now, as that
3126 * would eliminate the old mappings. So skip this part and defer
3127 * the assignment to device driver initialization time.
3128 */
3129 if (copied_tables)
3130 goto domains_done;
3131
3132 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003133 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003134 * identity mappings for rmrr, gfx, and isa and may fall back to static
3135 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003136 */
David Woodhouse19943b02009-08-04 16:19:20 +01003137 if (iommu_identity_mapping) {
3138 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3139 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003140 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003141 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003142 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003143 }
David Woodhouse19943b02009-08-04 16:19:20 +01003144 /*
3145 * For each rmrr
3146 * for each dev attached to rmrr
3147 * do
3148 * locate drhd for dev, alloc domain for dev
3149 * allocate free domain
3150 * allocate page table entries for rmrr
3151 * if context not allocated for bus
3152 * allocate and init context
3153 * set present in root table for this bus
3154 * init context with domain, translation etc
3155 * endfor
3156 * endfor
3157 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003158 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003159 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003160 /* some BIOS lists non-exist devices in DMAR table. */
3161 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003162 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003163 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003164 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003165 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003166 }
3167 }
3168
3169 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003170
Joerg Roedela87f4912015-06-12 12:32:54 +02003171domains_done:
3172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003173 /*
3174 * for each drhd
3175 * enable fault log
3176 * global invalidate context cache
3177 * global invalidate iotlb
3178 * enable translation
3179 */
Jiang Liu7c919772014-01-06 14:18:18 +08003180 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003181 if (drhd->ignored) {
3182 /*
3183 * we always have to disable PMRs or DMA may fail on
3184 * this device
3185 */
3186 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003187 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003189 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190
3191 iommu_flush_write_buffer(iommu);
3192
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003193 ret = dmar_set_interrupt(iommu);
3194 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003195 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003196
Jiang Liu2a41cce2014-07-11 14:19:33 +08003197 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003198 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003199 }
3200
3201 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003202
3203free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003204 for_each_active_iommu(iommu, drhd) {
3205 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003206 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003207 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08003208 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08003209free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08003210 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003211error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003212 return ret;
3213}
3214
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003215/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01003216static struct iova *intel_alloc_iova(struct device *dev,
3217 struct dmar_domain *domain,
3218 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003219{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003220 struct iova *iova = NULL;
3221
David Woodhouse875764d2009-06-28 21:20:51 +01003222 /* Restrict dma_mask to the width that the iommu can handle */
3223 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3224
3225 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003226 /*
3227 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003228 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003229 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003230 */
David Woodhouse875764d2009-06-28 21:20:51 +01003231 iova = alloc_iova(&domain->iovad, nrpages,
3232 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3233 if (iova)
3234 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003235 }
David Woodhouse875764d2009-06-28 21:20:51 +01003236 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3237 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003238 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003239 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003240 return NULL;
3241 }
3242
3243 return iova;
3244}
3245
David Woodhoused4b709f2014-03-09 16:07:40 -07003246static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003247{
3248 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003249 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250
David Woodhoused4b709f2014-03-09 16:07:40 -07003251 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003253 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003254 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003255 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003256 }
3257
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003258 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07003259 if (unlikely(!domain_context_mapped(dev))) {
3260 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003261 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003262 pr_err("Domain context map for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003263 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003264 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003265 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003266 }
3267
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003268 return domain;
3269}
3270
David Woodhoused4b709f2014-03-09 16:07:40 -07003271static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003272{
3273 struct device_domain_info *info;
3274
3275 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003276 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003277 if (likely(info))
3278 return info->domain;
3279
3280 return __get_valid_domain_for_dev(dev);
3281}
3282
David Woodhouseecb509e2014-03-09 16:29:55 -07003283/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003284static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003285{
3286 int found;
3287
David Woodhouse3d891942014-03-06 15:59:26 +00003288 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003289 return 1;
3290
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003291 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003292 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003293
David Woodhouse9b226622014-03-09 14:03:28 -07003294 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003295 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003296 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003297 return 1;
3298 else {
3299 /*
3300 * 32 bit DMA is removed from si_domain and fall back
3301 * to non-identity mapping.
3302 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003303 domain_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003304 pr_info("32bit %s uses non-identity mapping\n",
3305 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003306 return 0;
3307 }
3308 } else {
3309 /*
3310 * In case of a detached 64 bit DMA device from vm, the device
3311 * is put into si_domain for identity mapping.
3312 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003313 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003314 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003315 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003316 hw_pass_through ?
3317 CONTEXT_TT_PASS_THROUGH :
3318 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003319 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003320 pr_info("64bit %s uses identity mapping\n",
3321 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003322 return 1;
3323 }
3324 }
3325 }
3326
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003327 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003328}
3329
David Woodhouse5040a912014-03-09 16:14:00 -07003330static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003331 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003332{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003333 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003334 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003335 struct iova *iova;
3336 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003337 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003338 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003339 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003340
3341 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003342
David Woodhouse5040a912014-03-09 16:14:00 -07003343 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003344 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003345
David Woodhouse5040a912014-03-09 16:14:00 -07003346 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003347 if (!domain)
3348 return 0;
3349
Weidong Han8c11e792008-12-08 15:29:22 +08003350 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003351 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003352
David Woodhouse5040a912014-03-09 16:14:00 -07003353 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003354 if (!iova)
3355 goto error;
3356
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003357 /*
3358 * Check if DMAR supports zero-length reads on write only
3359 * mappings..
3360 */
3361 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003362 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003363 prot |= DMA_PTE_READ;
3364 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3365 prot |= DMA_PTE_WRITE;
3366 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003367 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003368 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003369 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003370 * is not a big problem
3371 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003372 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003373 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003374 if (ret)
3375 goto error;
3376
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003377 /* it's a non-present to present mapping. Only flush if caching mode */
3378 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003379 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003380 else
Weidong Han8c11e792008-12-08 15:29:22 +08003381 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003382
David Woodhouse03d6a242009-06-28 15:33:46 +01003383 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3384 start_paddr += paddr & ~PAGE_MASK;
3385 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003386
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003387error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003388 if (iova)
3389 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003390 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003391 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003392 return 0;
3393}
3394
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003395static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3396 unsigned long offset, size_t size,
3397 enum dma_data_direction dir,
3398 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003399{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003400 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003401 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003402}
3403
mark gross5e0d2a62008-03-04 15:22:08 -08003404static void flush_unmaps(void)
3405{
mark gross80b20dd2008-04-18 13:53:58 -07003406 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003407
mark gross5e0d2a62008-03-04 15:22:08 -08003408 timer_on = 0;
3409
3410 /* just flush them all */
3411 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003412 struct intel_iommu *iommu = g_iommus[i];
3413 if (!iommu)
3414 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003415
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003416 if (!deferred_flush[i].next)
3417 continue;
3418
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003419 /* In caching mode, global flushes turn emulation expensive */
3420 if (!cap_caching_mode(iommu->cap))
3421 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003422 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003423 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003424 unsigned long mask;
3425 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003426 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003427
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003428 /* On real hardware multiple invalidations are expensive */
3429 if (cap_caching_mode(iommu->cap))
3430 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003431 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003432 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003433 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003434 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003435 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3436 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3437 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003438 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003439 if (deferred_flush[i].freelist[j])
3440 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003441 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003442 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003443 }
3444
mark gross5e0d2a62008-03-04 15:22:08 -08003445 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003446}
3447
3448static void flush_unmaps_timeout(unsigned long data)
3449{
mark gross80b20dd2008-04-18 13:53:58 -07003450 unsigned long flags;
3451
3452 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003453 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003454 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003455}
3456
David Woodhouseea8ea462014-03-05 17:09:32 +00003457static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003458{
3459 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003460 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003461 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003462
3463 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003464 if (list_size == HIGH_WATER_MARK)
3465 flush_unmaps();
3466
Weidong Han8c11e792008-12-08 15:29:22 +08003467 iommu = domain_get_iommu(dom);
3468 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003469
mark gross80b20dd2008-04-18 13:53:58 -07003470 next = deferred_flush[iommu_id].next;
3471 deferred_flush[iommu_id].domain[next] = dom;
3472 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003473 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003474 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003475
3476 if (!timer_on) {
3477 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3478 timer_on = 1;
3479 }
3480 list_size++;
3481 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3482}
3483
Jiang Liud41a4ad2014-07-11 14:19:34 +08003484static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003485{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003486 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003487 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003488 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003489 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003490 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003491
David Woodhouse73676832009-07-04 14:08:36 +01003492 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003493 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003494
David Woodhouse1525a292014-03-06 16:19:30 +00003495 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003496 BUG_ON(!domain);
3497
Weidong Han8c11e792008-12-08 15:29:22 +08003498 iommu = domain_get_iommu(domain);
3499
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003500 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003501 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3502 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003503 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003504
David Woodhoused794dc92009-06-28 00:27:49 +01003505 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3506 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003507
David Woodhoused794dc92009-06-28 00:27:49 +01003508 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003509 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003510
David Woodhouseea8ea462014-03-05 17:09:32 +00003511 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003512
mark gross5e0d2a62008-03-04 15:22:08 -08003513 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003514 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003515 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003516 /* free iova */
3517 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003518 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003519 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003520 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003521 /*
3522 * queue up the release of the unmap to save the 1/6th of the
3523 * cpu used up by the iotlb flush operation...
3524 */
mark gross5e0d2a62008-03-04 15:22:08 -08003525 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003526}
3527
Jiang Liud41a4ad2014-07-11 14:19:34 +08003528static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3529 size_t size, enum dma_data_direction dir,
3530 struct dma_attrs *attrs)
3531{
3532 intel_unmap(dev, dev_addr);
3533}
3534
David Woodhouse5040a912014-03-09 16:14:00 -07003535static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003536 dma_addr_t *dma_handle, gfp_t flags,
3537 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003538{
Akinobu Mita36746432014-06-04 16:06:51 -07003539 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003540 int order;
3541
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003542 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003543 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003544
David Woodhouse5040a912014-03-09 16:14:00 -07003545 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003546 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003547 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3548 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003549 flags |= GFP_DMA;
3550 else
3551 flags |= GFP_DMA32;
3552 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003553
Akinobu Mita36746432014-06-04 16:06:51 -07003554 if (flags & __GFP_WAIT) {
3555 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003556
Akinobu Mita36746432014-06-04 16:06:51 -07003557 page = dma_alloc_from_contiguous(dev, count, order);
3558 if (page && iommu_no_mapping(dev) &&
3559 page_to_phys(page) + size > dev->coherent_dma_mask) {
3560 dma_release_from_contiguous(dev, page, count);
3561 page = NULL;
3562 }
3563 }
3564
3565 if (!page)
3566 page = alloc_pages(flags, order);
3567 if (!page)
3568 return NULL;
3569 memset(page_address(page), 0, size);
3570
3571 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003572 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003573 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003574 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003575 return page_address(page);
3576 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3577 __free_pages(page, order);
3578
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003579 return NULL;
3580}
3581
David Woodhouse5040a912014-03-09 16:14:00 -07003582static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003583 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003584{
3585 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003586 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003587
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003588 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003589 order = get_order(size);
3590
Jiang Liud41a4ad2014-07-11 14:19:34 +08003591 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003592 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3593 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003594}
3595
David Woodhouse5040a912014-03-09 16:14:00 -07003596static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003597 int nelems, enum dma_data_direction dir,
3598 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003599{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003600 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003601}
3602
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003603static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003604 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003605{
3606 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003607 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003608
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003609 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003610 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003611 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003612 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003613 }
3614 return nelems;
3615}
3616
David Woodhouse5040a912014-03-09 16:14:00 -07003617static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003618 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003619{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003620 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003621 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003622 size_t size = 0;
3623 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003624 struct iova *iova = NULL;
3625 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003626 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003627 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003628 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003629
3630 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003631 if (iommu_no_mapping(dev))
3632 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003633
David Woodhouse5040a912014-03-09 16:14:00 -07003634 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003635 if (!domain)
3636 return 0;
3637
Weidong Han8c11e792008-12-08 15:29:22 +08003638 iommu = domain_get_iommu(domain);
3639
David Woodhouseb536d242009-06-28 14:49:31 +01003640 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003641 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003642
David Woodhouse5040a912014-03-09 16:14:00 -07003643 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3644 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003645 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003646 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003647 return 0;
3648 }
3649
3650 /*
3651 * Check if DMAR supports zero-length reads on write only
3652 * mappings..
3653 */
3654 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003655 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003656 prot |= DMA_PTE_READ;
3657 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3658 prot |= DMA_PTE_WRITE;
3659
David Woodhouseb536d242009-06-28 14:49:31 +01003660 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003661
Fenghua Yuf5329592009-08-04 15:09:37 -07003662 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003663 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003664 dma_pte_free_pagetable(domain, start_vpfn,
3665 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003666 __free_iova(&domain->iovad, iova);
3667 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003668 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003669
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003670 /* it's a non-present to present mapping. Only flush if caching mode */
3671 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003672 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003673 else
Weidong Han8c11e792008-12-08 15:29:22 +08003674 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003675
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003676 return nelems;
3677}
3678
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003679static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3680{
3681 return !dma_addr;
3682}
3683
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003684struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003685 .alloc = intel_alloc_coherent,
3686 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003687 .map_sg = intel_map_sg,
3688 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003689 .map_page = intel_map_page,
3690 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003691 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003692};
3693
3694static inline int iommu_domain_cache_init(void)
3695{
3696 int ret = 0;
3697
3698 iommu_domain_cache = kmem_cache_create("iommu_domain",
3699 sizeof(struct dmar_domain),
3700 0,
3701 SLAB_HWCACHE_ALIGN,
3702
3703 NULL);
3704 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003705 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003706 ret = -ENOMEM;
3707 }
3708
3709 return ret;
3710}
3711
3712static inline int iommu_devinfo_cache_init(void)
3713{
3714 int ret = 0;
3715
3716 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3717 sizeof(struct device_domain_info),
3718 0,
3719 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003720 NULL);
3721 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003722 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003723 ret = -ENOMEM;
3724 }
3725
3726 return ret;
3727}
3728
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003729static int __init iommu_init_mempool(void)
3730{
3731 int ret;
3732 ret = iommu_iova_cache_init();
3733 if (ret)
3734 return ret;
3735
3736 ret = iommu_domain_cache_init();
3737 if (ret)
3738 goto domain_error;
3739
3740 ret = iommu_devinfo_cache_init();
3741 if (!ret)
3742 return ret;
3743
3744 kmem_cache_destroy(iommu_domain_cache);
3745domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003746 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003747
3748 return -ENOMEM;
3749}
3750
3751static void __init iommu_exit_mempool(void)
3752{
3753 kmem_cache_destroy(iommu_devinfo_cache);
3754 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003755 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003756}
3757
Dan Williams556ab452010-07-23 15:47:56 -07003758static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3759{
3760 struct dmar_drhd_unit *drhd;
3761 u32 vtbar;
3762 int rc;
3763
3764 /* We know that this device on this chipset has its own IOMMU.
3765 * If we find it under a different IOMMU, then the BIOS is lying
3766 * to us. Hope that the IOMMU for this device is actually
3767 * disabled, and it needs no translation...
3768 */
3769 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3770 if (rc) {
3771 /* "can't" happen */
3772 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3773 return;
3774 }
3775 vtbar &= 0xffff0000;
3776
3777 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3778 drhd = dmar_find_matched_drhd_unit(pdev);
3779 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3780 TAINT_FIRMWARE_WORKAROUND,
3781 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3782 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3783}
3784DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3785
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003786static void __init init_no_remapping_devices(void)
3787{
3788 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003789 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003790 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003791
3792 for_each_drhd_unit(drhd) {
3793 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003794 for_each_active_dev_scope(drhd->devices,
3795 drhd->devices_cnt, i, dev)
3796 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003797 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003798 if (i == drhd->devices_cnt)
3799 drhd->ignored = 1;
3800 }
3801 }
3802
Jiang Liu7c919772014-01-06 14:18:18 +08003803 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003804 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003805 continue;
3806
Jiang Liub683b232014-02-19 14:07:32 +08003807 for_each_active_dev_scope(drhd->devices,
3808 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003809 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003810 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003811 if (i < drhd->devices_cnt)
3812 continue;
3813
David Woodhousec0771df2011-10-14 20:59:46 +01003814 /* This IOMMU has *only* gfx devices. Either bypass it or
3815 set the gfx_mapped flag, as appropriate */
3816 if (dmar_map_gfx) {
3817 intel_iommu_gfx_mapped = 1;
3818 } else {
3819 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003820 for_each_active_dev_scope(drhd->devices,
3821 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003822 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003823 }
3824 }
3825}
3826
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003827#ifdef CONFIG_SUSPEND
3828static int init_iommu_hw(void)
3829{
3830 struct dmar_drhd_unit *drhd;
3831 struct intel_iommu *iommu = NULL;
3832
3833 for_each_active_iommu(iommu, drhd)
3834 if (iommu->qi)
3835 dmar_reenable_qi(iommu);
3836
Joseph Cihulab7792602011-05-03 00:08:37 -07003837 for_each_iommu(iommu, drhd) {
3838 if (drhd->ignored) {
3839 /*
3840 * we always have to disable PMRs or DMA may fail on
3841 * this device
3842 */
3843 if (force_on)
3844 iommu_disable_protect_mem_regions(iommu);
3845 continue;
3846 }
3847
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003848 iommu_flush_write_buffer(iommu);
3849
3850 iommu_set_root_entry(iommu);
3851
3852 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003853 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003854 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3855 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003856 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003857 }
3858
3859 return 0;
3860}
3861
3862static void iommu_flush_all(void)
3863{
3864 struct dmar_drhd_unit *drhd;
3865 struct intel_iommu *iommu;
3866
3867 for_each_active_iommu(iommu, drhd) {
3868 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003869 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003870 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003871 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003872 }
3873}
3874
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003875static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003876{
3877 struct dmar_drhd_unit *drhd;
3878 struct intel_iommu *iommu = NULL;
3879 unsigned long flag;
3880
3881 for_each_active_iommu(iommu, drhd) {
3882 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3883 GFP_ATOMIC);
3884 if (!iommu->iommu_state)
3885 goto nomem;
3886 }
3887
3888 iommu_flush_all();
3889
3890 for_each_active_iommu(iommu, drhd) {
3891 iommu_disable_translation(iommu);
3892
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003893 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003894
3895 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3896 readl(iommu->reg + DMAR_FECTL_REG);
3897 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3898 readl(iommu->reg + DMAR_FEDATA_REG);
3899 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3900 readl(iommu->reg + DMAR_FEADDR_REG);
3901 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3902 readl(iommu->reg + DMAR_FEUADDR_REG);
3903
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003904 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003905 }
3906 return 0;
3907
3908nomem:
3909 for_each_active_iommu(iommu, drhd)
3910 kfree(iommu->iommu_state);
3911
3912 return -ENOMEM;
3913}
3914
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003915static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003916{
3917 struct dmar_drhd_unit *drhd;
3918 struct intel_iommu *iommu = NULL;
3919 unsigned long flag;
3920
3921 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003922 if (force_on)
3923 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3924 else
3925 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003926 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003927 }
3928
3929 for_each_active_iommu(iommu, drhd) {
3930
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003931 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003932
3933 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3934 iommu->reg + DMAR_FECTL_REG);
3935 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3936 iommu->reg + DMAR_FEDATA_REG);
3937 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3938 iommu->reg + DMAR_FEADDR_REG);
3939 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3940 iommu->reg + DMAR_FEUADDR_REG);
3941
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003942 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003943 }
3944
3945 for_each_active_iommu(iommu, drhd)
3946 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003947}
3948
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003949static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003950 .resume = iommu_resume,
3951 .suspend = iommu_suspend,
3952};
3953
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003954static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003955{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003956 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003957}
3958
3959#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003960static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003961#endif /* CONFIG_PM */
3962
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003963
Jiang Liuc2a0b532014-11-09 22:47:56 +08003964int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003965{
3966 struct acpi_dmar_reserved_memory *rmrr;
3967 struct dmar_rmrr_unit *rmrru;
3968
3969 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3970 if (!rmrru)
3971 return -ENOMEM;
3972
3973 rmrru->hdr = header;
3974 rmrr = (struct acpi_dmar_reserved_memory *)header;
3975 rmrru->base_address = rmrr->base_address;
3976 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003977 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3978 ((void *)rmrr) + rmrr->header.length,
3979 &rmrru->devices_cnt);
3980 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3981 kfree(rmrru);
3982 return -ENOMEM;
3983 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003984
Jiang Liu2e455282014-02-19 14:07:36 +08003985 list_add(&rmrru->list, &dmar_rmrr_units);
3986
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003987 return 0;
3988}
3989
Jiang Liu6b197242014-11-09 22:47:58 +08003990static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3991{
3992 struct dmar_atsr_unit *atsru;
3993 struct acpi_dmar_atsr *tmp;
3994
3995 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3996 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3997 if (atsr->segment != tmp->segment)
3998 continue;
3999 if (atsr->header.length != tmp->header.length)
4000 continue;
4001 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4002 return atsru;
4003 }
4004
4005 return NULL;
4006}
4007
4008int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004009{
4010 struct acpi_dmar_atsr *atsr;
4011 struct dmar_atsr_unit *atsru;
4012
Jiang Liu6b197242014-11-09 22:47:58 +08004013 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4014 return 0;
4015
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004016 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004017 atsru = dmar_find_atsr(atsr);
4018 if (atsru)
4019 return 0;
4020
4021 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004022 if (!atsru)
4023 return -ENOMEM;
4024
Jiang Liu6b197242014-11-09 22:47:58 +08004025 /*
4026 * If memory is allocated from slab by ACPI _DSM method, we need to
4027 * copy the memory content because the memory buffer will be freed
4028 * on return.
4029 */
4030 atsru->hdr = (void *)(atsru + 1);
4031 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004032 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004033 if (!atsru->include_all) {
4034 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4035 (void *)atsr + atsr->header.length,
4036 &atsru->devices_cnt);
4037 if (atsru->devices_cnt && atsru->devices == NULL) {
4038 kfree(atsru);
4039 return -ENOMEM;
4040 }
4041 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004042
Jiang Liu0e242612014-02-19 14:07:34 +08004043 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004044
4045 return 0;
4046}
4047
Jiang Liu9bdc5312014-01-06 14:18:27 +08004048static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4049{
4050 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4051 kfree(atsru);
4052}
4053
Jiang Liu6b197242014-11-09 22:47:58 +08004054int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4055{
4056 struct acpi_dmar_atsr *atsr;
4057 struct dmar_atsr_unit *atsru;
4058
4059 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4060 atsru = dmar_find_atsr(atsr);
4061 if (atsru) {
4062 list_del_rcu(&atsru->list);
4063 synchronize_rcu();
4064 intel_iommu_free_atsr(atsru);
4065 }
4066
4067 return 0;
4068}
4069
4070int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4071{
4072 int i;
4073 struct device *dev;
4074 struct acpi_dmar_atsr *atsr;
4075 struct dmar_atsr_unit *atsru;
4076
4077 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4078 atsru = dmar_find_atsr(atsr);
4079 if (!atsru)
4080 return 0;
4081
4082 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4083 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4084 i, dev)
4085 return -EBUSY;
4086
4087 return 0;
4088}
4089
Jiang Liuffebeb42014-11-09 22:48:02 +08004090static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4091{
4092 int sp, ret = 0;
4093 struct intel_iommu *iommu = dmaru->iommu;
4094
4095 if (g_iommus[iommu->seq_id])
4096 return 0;
4097
4098 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004099 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004100 iommu->name);
4101 return -ENXIO;
4102 }
4103 if (!ecap_sc_support(iommu->ecap) &&
4104 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004105 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004106 iommu->name);
4107 return -ENXIO;
4108 }
4109 sp = domain_update_iommu_superpage(iommu) - 1;
4110 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004111 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004112 iommu->name);
4113 return -ENXIO;
4114 }
4115
4116 /*
4117 * Disable translation if already enabled prior to OS handover.
4118 */
4119 if (iommu->gcmd & DMA_GCMD_TE)
4120 iommu_disable_translation(iommu);
4121
4122 g_iommus[iommu->seq_id] = iommu;
4123 ret = iommu_init_domains(iommu);
4124 if (ret == 0)
4125 ret = iommu_alloc_root_entry(iommu);
4126 if (ret)
4127 goto out;
4128
4129 if (dmaru->ignored) {
4130 /*
4131 * we always have to disable PMRs or DMA may fail on this device
4132 */
4133 if (force_on)
4134 iommu_disable_protect_mem_regions(iommu);
4135 return 0;
4136 }
4137
4138 intel_iommu_init_qi(iommu);
4139 iommu_flush_write_buffer(iommu);
4140 ret = dmar_set_interrupt(iommu);
4141 if (ret)
4142 goto disable_iommu;
4143
4144 iommu_set_root_entry(iommu);
4145 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4146 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4147 iommu_enable_translation(iommu);
4148
4149 if (si_domain) {
4150 ret = iommu_attach_domain(si_domain, iommu);
4151 if (ret < 0 || si_domain->id != ret)
4152 goto disable_iommu;
4153 domain_attach_iommu(si_domain, iommu);
4154 }
4155
4156 iommu_disable_protect_mem_regions(iommu);
4157 return 0;
4158
4159disable_iommu:
4160 disable_dmar_iommu(iommu);
4161out:
4162 free_dmar_iommu(iommu);
4163 return ret;
4164}
4165
Jiang Liu6b197242014-11-09 22:47:58 +08004166int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4167{
Jiang Liuffebeb42014-11-09 22:48:02 +08004168 int ret = 0;
4169 struct intel_iommu *iommu = dmaru->iommu;
4170
4171 if (!intel_iommu_enabled)
4172 return 0;
4173 if (iommu == NULL)
4174 return -EINVAL;
4175
4176 if (insert) {
4177 ret = intel_iommu_add(dmaru);
4178 } else {
4179 disable_dmar_iommu(iommu);
4180 free_dmar_iommu(iommu);
4181 }
4182
4183 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004184}
4185
Jiang Liu9bdc5312014-01-06 14:18:27 +08004186static void intel_iommu_free_dmars(void)
4187{
4188 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4189 struct dmar_atsr_unit *atsru, *atsr_n;
4190
4191 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4192 list_del(&rmrru->list);
4193 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4194 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004195 }
4196
Jiang Liu9bdc5312014-01-06 14:18:27 +08004197 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4198 list_del(&atsru->list);
4199 intel_iommu_free_atsr(atsru);
4200 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004201}
4202
4203int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4204{
Jiang Liub683b232014-02-19 14:07:32 +08004205 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004206 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004207 struct pci_dev *bridge = NULL;
4208 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004209 struct acpi_dmar_atsr *atsr;
4210 struct dmar_atsr_unit *atsru;
4211
4212 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004213 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004214 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004215 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004216 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004217 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004218 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004219 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004220 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08004221 if (!bridge)
4222 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004223
Jiang Liu0e242612014-02-19 14:07:34 +08004224 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004225 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4226 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4227 if (atsr->segment != pci_domain_nr(dev->bus))
4228 continue;
4229
Jiang Liub683b232014-02-19 14:07:32 +08004230 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004231 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004232 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004233
4234 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004235 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004236 }
Jiang Liub683b232014-02-19 14:07:32 +08004237 ret = 0;
4238out:
Jiang Liu0e242612014-02-19 14:07:34 +08004239 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004240
Jiang Liub683b232014-02-19 14:07:32 +08004241 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004242}
4243
Jiang Liu59ce0512014-02-19 14:07:35 +08004244int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4245{
4246 int ret = 0;
4247 struct dmar_rmrr_unit *rmrru;
4248 struct dmar_atsr_unit *atsru;
4249 struct acpi_dmar_atsr *atsr;
4250 struct acpi_dmar_reserved_memory *rmrr;
4251
4252 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4253 return 0;
4254
4255 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4256 rmrr = container_of(rmrru->hdr,
4257 struct acpi_dmar_reserved_memory, header);
4258 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4259 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4260 ((void *)rmrr) + rmrr->header.length,
4261 rmrr->segment, rmrru->devices,
4262 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004263 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004264 return ret;
4265 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004266 dmar_remove_dev_scope(info, rmrr->segment,
4267 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004268 }
4269 }
4270
4271 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4272 if (atsru->include_all)
4273 continue;
4274
4275 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4276 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4277 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4278 (void *)atsr + atsr->header.length,
4279 atsr->segment, atsru->devices,
4280 atsru->devices_cnt);
4281 if (ret > 0)
4282 break;
4283 else if(ret < 0)
4284 return ret;
4285 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4286 if (dmar_remove_dev_scope(info, atsr->segment,
4287 atsru->devices, atsru->devices_cnt))
4288 break;
4289 }
4290 }
4291
4292 return 0;
4293}
4294
Fenghua Yu99dcade2009-11-11 07:23:06 -08004295/*
4296 * Here we only respond to action of unbound device from driver.
4297 *
4298 * Added device is not attached to its DMAR domain here yet. That will happen
4299 * when mapping the device to iova.
4300 */
4301static int device_notifier(struct notifier_block *nb,
4302 unsigned long action, void *data)
4303{
4304 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004305 struct dmar_domain *domain;
4306
David Woodhouse3d891942014-03-06 15:59:26 +00004307 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004308 return 0;
4309
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004310 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004311 return 0;
4312
David Woodhouse1525a292014-03-06 16:19:30 +00004313 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004314 if (!domain)
4315 return 0;
4316
Jiang Liu3a5670e2014-02-19 14:07:33 +08004317 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004318 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004319 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004320 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004321 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004322
Fenghua Yu99dcade2009-11-11 07:23:06 -08004323 return 0;
4324}
4325
4326static struct notifier_block device_nb = {
4327 .notifier_call = device_notifier,
4328};
4329
Jiang Liu75f05562014-02-19 14:07:37 +08004330static int intel_iommu_memory_notifier(struct notifier_block *nb,
4331 unsigned long val, void *v)
4332{
4333 struct memory_notify *mhp = v;
4334 unsigned long long start, end;
4335 unsigned long start_vpfn, last_vpfn;
4336
4337 switch (val) {
4338 case MEM_GOING_ONLINE:
4339 start = mhp->start_pfn << PAGE_SHIFT;
4340 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4341 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004342 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004343 start, end);
4344 return NOTIFY_BAD;
4345 }
4346 break;
4347
4348 case MEM_OFFLINE:
4349 case MEM_CANCEL_ONLINE:
4350 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4351 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4352 while (start_vpfn <= last_vpfn) {
4353 struct iova *iova;
4354 struct dmar_drhd_unit *drhd;
4355 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004356 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004357
4358 iova = find_iova(&si_domain->iovad, start_vpfn);
4359 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004360 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004361 start_vpfn);
4362 break;
4363 }
4364
4365 iova = split_and_remove_iova(&si_domain->iovad, iova,
4366 start_vpfn, last_vpfn);
4367 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004368 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004369 start_vpfn, last_vpfn);
4370 return NOTIFY_BAD;
4371 }
4372
David Woodhouseea8ea462014-03-05 17:09:32 +00004373 freelist = domain_unmap(si_domain, iova->pfn_lo,
4374 iova->pfn_hi);
4375
Jiang Liu75f05562014-02-19 14:07:37 +08004376 rcu_read_lock();
4377 for_each_active_iommu(iommu, drhd)
4378 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004379 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004380 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004381 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004382 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004383
4384 start_vpfn = iova->pfn_hi + 1;
4385 free_iova_mem(iova);
4386 }
4387 break;
4388 }
4389
4390 return NOTIFY_OK;
4391}
4392
4393static struct notifier_block intel_iommu_memory_nb = {
4394 .notifier_call = intel_iommu_memory_notifier,
4395 .priority = 0
4396};
4397
Alex Williamsona5459cf2014-06-12 16:12:31 -06004398
4399static ssize_t intel_iommu_show_version(struct device *dev,
4400 struct device_attribute *attr,
4401 char *buf)
4402{
4403 struct intel_iommu *iommu = dev_get_drvdata(dev);
4404 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4405 return sprintf(buf, "%d:%d\n",
4406 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4407}
4408static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4409
4410static ssize_t intel_iommu_show_address(struct device *dev,
4411 struct device_attribute *attr,
4412 char *buf)
4413{
4414 struct intel_iommu *iommu = dev_get_drvdata(dev);
4415 return sprintf(buf, "%llx\n", iommu->reg_phys);
4416}
4417static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4418
4419static ssize_t intel_iommu_show_cap(struct device *dev,
4420 struct device_attribute *attr,
4421 char *buf)
4422{
4423 struct intel_iommu *iommu = dev_get_drvdata(dev);
4424 return sprintf(buf, "%llx\n", iommu->cap);
4425}
4426static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4427
4428static ssize_t intel_iommu_show_ecap(struct device *dev,
4429 struct device_attribute *attr,
4430 char *buf)
4431{
4432 struct intel_iommu *iommu = dev_get_drvdata(dev);
4433 return sprintf(buf, "%llx\n", iommu->ecap);
4434}
4435static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4436
4437static struct attribute *intel_iommu_attrs[] = {
4438 &dev_attr_version.attr,
4439 &dev_attr_address.attr,
4440 &dev_attr_cap.attr,
4441 &dev_attr_ecap.attr,
4442 NULL,
4443};
4444
4445static struct attribute_group intel_iommu_group = {
4446 .name = "intel-iommu",
4447 .attrs = intel_iommu_attrs,
4448};
4449
4450const struct attribute_group *intel_iommu_groups[] = {
4451 &intel_iommu_group,
4452 NULL,
4453};
4454
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004455int __init intel_iommu_init(void)
4456{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004457 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004458 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004459 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004460
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004461 /* VT-d is required for a TXT/tboot launch, so enforce that */
4462 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004463
Jiang Liu3a5670e2014-02-19 14:07:33 +08004464 if (iommu_init_mempool()) {
4465 if (force_on)
4466 panic("tboot: Failed to initialize iommu memory\n");
4467 return -ENOMEM;
4468 }
4469
4470 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004471 if (dmar_table_init()) {
4472 if (force_on)
4473 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004474 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004475 }
4476
Takao Indoh3a93c842013-04-23 17:35:03 +09004477 /*
4478 * Disable translation if already enabled prior to OS handover.
4479 */
Jiang Liu7c919772014-01-06 14:18:18 +08004480 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004481 if (iommu->gcmd & DMA_GCMD_TE)
4482 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004483
Suresh Siddhac2c72862011-08-23 17:05:19 -07004484 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004485 if (force_on)
4486 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004487 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004488 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004489
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004490 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004491 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004492
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004493 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004494 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004495
4496 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004497 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004498
Joseph Cihula51a63e62011-03-21 11:04:24 -07004499 if (dmar_init_reserved_ranges()) {
4500 if (force_on)
4501 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004502 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004503 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004504
4505 init_no_remapping_devices();
4506
Joseph Cihulab7792602011-05-03 00:08:37 -07004507 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004508 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004509 if (force_on)
4510 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004511 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004512 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004513 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004514 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004515 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004516
mark gross5e0d2a62008-03-04 15:22:08 -08004517 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004518#ifdef CONFIG_SWIOTLB
4519 swiotlb = 0;
4520#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004521 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004522
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004523 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004524
Alex Williamsona5459cf2014-06-12 16:12:31 -06004525 for_each_active_iommu(iommu, drhd)
4526 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4527 intel_iommu_groups,
4528 iommu->name);
4529
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004530 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004531 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004532 if (si_domain && !hw_pass_through)
4533 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004534
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004535 intel_iommu_enabled = 1;
4536
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004537 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004538
4539out_free_reserved_range:
4540 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004541out_free_dmar:
4542 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004543 up_write(&dmar_global_lock);
4544 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004545 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004546}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004547
Alex Williamson579305f2014-07-03 09:51:43 -06004548static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4549{
4550 struct intel_iommu *iommu = opaque;
4551
4552 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4553 return 0;
4554}
4555
4556/*
4557 * NB - intel-iommu lacks any sort of reference counting for the users of
4558 * dependent devices. If multiple endpoints have intersecting dependent
4559 * devices, unbinding the driver from any one of them will possibly leave
4560 * the others unable to operate.
4561 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004562static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004563 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004564{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004565 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004566 return;
4567
Alex Williamson579305f2014-07-03 09:51:43 -06004568 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004569}
4570
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004571static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004572 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004573{
Yijing Wangbca2b912013-10-31 17:26:04 +08004574 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004575 struct intel_iommu *iommu;
4576 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004577 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004578 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004579
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004580 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004581 if (!iommu)
4582 return;
4583
4584 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004585 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004586 if (info->iommu == iommu && info->bus == bus &&
4587 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004588 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004589 spin_unlock_irqrestore(&device_domain_lock, flags);
4590
Yu Zhao93a23a72009-05-18 13:51:37 +08004591 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004592 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004593 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004594 free_devinfo_mem(info);
4595
4596 spin_lock_irqsave(&device_domain_lock, flags);
4597
4598 if (found)
4599 break;
4600 else
4601 continue;
4602 }
4603
4604 /* if there is no other devices under the same iommu
4605 * owned by this domain, clear this iommu in iommu_bmp
4606 * update iommu count and coherency
4607 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004608 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004609 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004610 }
4611
Roland Dreier3e7abe22011-07-20 06:22:21 -07004612 spin_unlock_irqrestore(&device_domain_lock, flags);
4613
Weidong Hanc7151a82008-12-08 22:51:37 +08004614 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004615 domain_detach_iommu(domain, iommu);
4616 if (!domain_type_is_vm_or_si(domain))
4617 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004618 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004619}
4620
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004621static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004622{
4623 int adjust_width;
4624
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004625 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4626 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004627 domain_reserve_special_ranges(domain);
4628
4629 /* calculate AGAW */
4630 domain->gaw = guest_width;
4631 adjust_width = guestwidth_to_adjustwidth(guest_width);
4632 domain->agaw = width_to_agaw(adjust_width);
4633
Weidong Han5e98c4b2008-12-08 23:03:27 +08004634 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004635 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004636 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004637 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004638
4639 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004640 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004641 if (!domain->pgd)
4642 return -ENOMEM;
4643 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4644 return 0;
4645}
4646
Joerg Roedel00a77de2015-03-26 13:43:08 +01004647static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004648{
Joerg Roedel5d450802008-12-03 14:52:32 +01004649 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004650 struct iommu_domain *domain;
4651
4652 if (type != IOMMU_DOMAIN_UNMANAGED)
4653 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004654
Jiang Liuab8dfe22014-07-11 14:19:27 +08004655 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004656 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004657 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004658 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004659 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004660 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004661 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004662 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004663 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004664 }
Allen Kay8140a952011-10-14 12:32:17 -07004665 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004666
Joerg Roedel00a77de2015-03-26 13:43:08 +01004667 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004668 domain->geometry.aperture_start = 0;
4669 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4670 domain->geometry.force_aperture = true;
4671
Joerg Roedel00a77de2015-03-26 13:43:08 +01004672 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004673}
Kay, Allen M38717942008-09-09 18:37:29 +03004674
Joerg Roedel00a77de2015-03-26 13:43:08 +01004675static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004676{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004677 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004678}
Kay, Allen M38717942008-09-09 18:37:29 +03004679
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004680static int intel_iommu_attach_device(struct iommu_domain *domain,
4681 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004682{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004683 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004684 struct intel_iommu *iommu;
4685 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004686 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004687
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004688 if (device_is_rmrr_locked(dev)) {
4689 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4690 return -EPERM;
4691 }
4692
David Woodhouse7207d8f2014-03-09 16:31:06 -07004693 /* normally dev is not mapped */
4694 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004695 struct dmar_domain *old_domain;
4696
David Woodhouse1525a292014-03-06 16:19:30 +00004697 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004698 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004699 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004700 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004701 else
4702 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004703
4704 if (!domain_type_is_vm_or_si(old_domain) &&
4705 list_empty(&old_domain->devices))
4706 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004707 }
4708 }
4709
David Woodhouse156baca2014-03-09 14:00:57 -07004710 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004711 if (!iommu)
4712 return -ENODEV;
4713
4714 /* check if this iommu agaw is sufficient for max mapped address */
4715 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004716 if (addr_width > cap_mgaw(iommu->cap))
4717 addr_width = cap_mgaw(iommu->cap);
4718
4719 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004720 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004721 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004722 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004723 return -EFAULT;
4724 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004725 dmar_domain->gaw = addr_width;
4726
4727 /*
4728 * Knock out extra levels of page tables if necessary
4729 */
4730 while (iommu->agaw < dmar_domain->agaw) {
4731 struct dma_pte *pte;
4732
4733 pte = dmar_domain->pgd;
4734 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004735 dmar_domain->pgd = (struct dma_pte *)
4736 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004737 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004738 }
4739 dmar_domain->agaw--;
4740 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004741
David Woodhouse5913c9b2014-03-09 16:27:31 -07004742 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004743}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004744
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004745static void intel_iommu_detach_device(struct iommu_domain *domain,
4746 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004747{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004748 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004749}
Kay, Allen M38717942008-09-09 18:37:29 +03004750
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004751static int intel_iommu_map(struct iommu_domain *domain,
4752 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004753 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004754{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004755 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004756 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004757 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004758 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004759
Joerg Roedeldde57a22008-12-03 15:04:09 +01004760 if (iommu_prot & IOMMU_READ)
4761 prot |= DMA_PTE_READ;
4762 if (iommu_prot & IOMMU_WRITE)
4763 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004764 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4765 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004766
David Woodhouse163cc522009-06-28 00:51:17 +01004767 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004768 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004769 u64 end;
4770
4771 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004772 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004773 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004774 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004775 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004776 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004777 return -EFAULT;
4778 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004779 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004780 }
David Woodhousead051222009-06-28 14:22:28 +01004781 /* Round up size to next multiple of PAGE_SIZE, if it and
4782 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004783 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004784 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4785 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004786 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004787}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004788
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004789static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004790 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004791{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004792 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004793 struct page *freelist = NULL;
4794 struct intel_iommu *iommu;
4795 unsigned long start_pfn, last_pfn;
4796 unsigned int npages;
4797 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004798
David Woodhouse5cf0a762014-03-19 16:07:49 +00004799 /* Cope with horrid API which requires us to unmap more than the
4800 size argument if it happens to be a large-page mapping. */
4801 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4802 BUG();
4803
4804 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4805 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4806
David Woodhouseea8ea462014-03-05 17:09:32 +00004807 start_pfn = iova >> VTD_PAGE_SHIFT;
4808 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4809
4810 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4811
4812 npages = last_pfn - start_pfn + 1;
4813
4814 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4815 iommu = g_iommus[iommu_id];
4816
4817 /*
4818 * find bit position of dmar_domain
4819 */
4820 ndomains = cap_ndoms(iommu->cap);
4821 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4822 if (iommu->domains[num] == dmar_domain)
4823 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4824 npages, !freelist, 0);
4825 }
4826
4827 }
4828
4829 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004830
David Woodhouse163cc522009-06-28 00:51:17 +01004831 if (dmar_domain->max_addr == iova + size)
4832 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004833
David Woodhouse5cf0a762014-03-19 16:07:49 +00004834 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004835}
Kay, Allen M38717942008-09-09 18:37:29 +03004836
Joerg Roedeld14d6572008-12-03 15:06:57 +01004837static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304838 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004839{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004840 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004841 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004842 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004843 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004844
David Woodhouse5cf0a762014-03-19 16:07:49 +00004845 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004846 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004847 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004848
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004849 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004850}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004851
Joerg Roedel5d587b82014-09-05 10:50:45 +02004852static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004853{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004854 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004855 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004856 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004857 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004858
Joerg Roedel5d587b82014-09-05 10:50:45 +02004859 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004860}
4861
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004862static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004863{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004864 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004865 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004866 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004867
Alex Williamsona5459cf2014-06-12 16:12:31 -06004868 iommu = device_to_iommu(dev, &bus, &devfn);
4869 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004870 return -ENODEV;
4871
Alex Williamsona5459cf2014-06-12 16:12:31 -06004872 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004873
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004874 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004875
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004876 if (IS_ERR(group))
4877 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004878
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004879 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004880 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004881}
4882
4883static void intel_iommu_remove_device(struct device *dev)
4884{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004885 struct intel_iommu *iommu;
4886 u8 bus, devfn;
4887
4888 iommu = device_to_iommu(dev, &bus, &devfn);
4889 if (!iommu)
4890 return;
4891
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004892 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004893
4894 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004895}
4896
Thierry Redingb22f6432014-06-27 09:03:12 +02004897static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004898 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004899 .domain_alloc = intel_iommu_domain_alloc,
4900 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004901 .attach_dev = intel_iommu_attach_device,
4902 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004903 .map = intel_iommu_map,
4904 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004905 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004906 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004907 .add_device = intel_iommu_add_device,
4908 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004909 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004910};
David Woodhouse9af88142009-02-13 23:18:03 +00004911
Daniel Vetter94526182013-01-20 23:50:13 +01004912static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4913{
4914 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004915 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01004916 dmar_map_gfx = 0;
4917}
4918
4919DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4920DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4921DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4922DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4923DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4924DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4925DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4926
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004927static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004928{
4929 /*
4930 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004931 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004932 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004933 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00004934 rwbf_quirk = 1;
4935}
4936
4937DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004938DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4939DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4940DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4941DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4942DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4943DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004944
Adam Jacksoneecfd572010-08-25 21:17:34 +01004945#define GGC 0x52
4946#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4947#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4948#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4949#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4950#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4951#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4952#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4953#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4954
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004955static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004956{
4957 unsigned short ggc;
4958
Adam Jacksoneecfd572010-08-25 21:17:34 +01004959 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004960 return;
4961
Adam Jacksoneecfd572010-08-25 21:17:34 +01004962 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004963 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01004964 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004965 } else if (dmar_map_gfx) {
4966 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004967 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004968 intel_iommu_strict = 1;
4969 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004970}
4971DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4972DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4973DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4974DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4975
David Woodhousee0fc7e02009-09-30 09:12:17 -07004976/* On Tylersburg chipsets, some BIOSes have been known to enable the
4977 ISOCH DMAR unit for the Azalia sound device, but not give it any
4978 TLB entries, which causes it to deadlock. Check for that. We do
4979 this in a function called from init_dmars(), instead of in a PCI
4980 quirk, because we don't want to print the obnoxious "BIOS broken"
4981 message if VT-d is actually disabled.
4982*/
4983static void __init check_tylersburg_isoch(void)
4984{
4985 struct pci_dev *pdev;
4986 uint32_t vtisochctrl;
4987
4988 /* If there's no Azalia in the system anyway, forget it. */
4989 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4990 if (!pdev)
4991 return;
4992 pci_dev_put(pdev);
4993
4994 /* System Management Registers. Might be hidden, in which case
4995 we can't do the sanity check. But that's OK, because the
4996 known-broken BIOSes _don't_ actually hide it, so far. */
4997 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4998 if (!pdev)
4999 return;
5000
5001 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5002 pci_dev_put(pdev);
5003 return;
5004 }
5005
5006 pci_dev_put(pdev);
5007
5008 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5009 if (vtisochctrl & 1)
5010 return;
5011
5012 /* Drop all bits other than the number of TLB entries */
5013 vtisochctrl &= 0x1c;
5014
5015 /* If we have the recommended number of TLB entries (16), fine. */
5016 if (vtisochctrl == 0x10)
5017 return;
5018
5019 /* Zero TLB entries? You get to ride the short bus to school. */
5020 if (!vtisochctrl) {
5021 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5022 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5023 dmi_get_system_info(DMI_BIOS_VENDOR),
5024 dmi_get_system_info(DMI_BIOS_VERSION),
5025 dmi_get_system_info(DMI_PRODUCT_VERSION));
5026 iommu_identity_mapping |= IDENTMAP_AZALIA;
5027 return;
5028 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005029
5030 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005031 vtisochctrl);
5032}