blob: 0649b94f59584ca5b885cd0ecad595a84af0d89d [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020018 * Joerg Roedel <jroedel@suse.de>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070019 */
20
Joerg Roedel9f10e5b2015-06-12 09:57:06 +020021#define pr_fmt(fmt) "DMAR: " fmt
22
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070023#include <linux/init.h>
24#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080025#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040026#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080035#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070045#include <linux/dma-contiguous.h>
Joerg Roedel091d42e2015-06-12 11:56:10 +020046#include <linux/crash_dump.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070047#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090049#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070050
Joerg Roedel078e1ee2012-09-26 12:44:43 +020051#include "irq_remapping.h"
52
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
David Woodhouse18436af2015-03-25 15:05:47 +000057#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070059#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
61#define IOAPIC_RANGE_START (0xfee00000)
62#define IOAPIC_RANGE_END (0xfeefffff)
63#define IOVA_START_ADDR (0x1000)
64
65#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070067#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080068#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070069
David Woodhouse2ebe3152009-09-19 07:34:04 -070070#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070078
Robin Murphy1b722502015-01-12 17:51:15 +000079/* IO virtual address start page frame number */
80#define IOVA_START_PFN (1)
81
Mark McLoughlinf27be032008-11-20 15:49:43 +000082#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070083#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070084#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080085
Andrew Mortondf08cdc2010-09-22 13:05:11 -070086/* page table handling */
87#define LEVEL_STRIDE (9)
88#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
89
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020090/*
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
94 * that we support.
95 *
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
99 *
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
102 *
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
105 */
106#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
107
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108static inline int agaw_to_level(int agaw)
109{
110 return agaw + 2;
111}
112
113static inline int agaw_to_width(int agaw)
114{
Jiang Liu5c645b32014-01-06 14:18:12 +0800115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700116}
117
118static inline int width_to_agaw(int width)
119{
Jiang Liu5c645b32014-01-06 14:18:12 +0800120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700121}
122
123static inline unsigned int level_to_offset_bits(int level)
124{
125 return (level - 1) * LEVEL_STRIDE;
126}
127
128static inline int pfn_level_offset(unsigned long pfn, int level)
129{
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131}
132
133static inline unsigned long level_mask(int level)
134{
135 return -1UL << level_to_offset_bits(level);
136}
137
138static inline unsigned long level_size(int level)
139{
140 return 1UL << level_to_offset_bits(level);
141}
142
143static inline unsigned long align_to_level(unsigned long pfn, int level)
144{
145 return (pfn + level_size(level) - 1) & level_mask(level);
146}
David Woodhousefd18de52009-05-10 23:57:41 +0100147
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100148static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149{
Jiang Liu5c645b32014-01-06 14:18:12 +0800150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100151}
152
David Woodhousedd4e8312009-06-27 16:21:20 +0100153/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156{
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158}
159
160static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161{
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163}
164static inline unsigned long page_to_dma_pfn(struct page *pg)
165{
166 return mm_to_dma_pfn(page_to_pfn(pg));
167}
168static inline unsigned long virt_to_dma_pfn(void *p)
169{
170 return page_to_dma_pfn(virt_to_page(p));
171}
172
Weidong Hand9630fe2008-12-08 11:06:32 +0800173/* global iommu list, set NULL for ignored DMAR units */
174static struct intel_iommu **g_iommus;
175
David Woodhousee0fc7e02009-09-30 09:12:17 -0700176static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000177static int rwbf_quirk;
178
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000179/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
182 */
183static int force_on = 0;
184
185/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000186 * 0: Present
187 * 1-11: Reserved
188 * 12-63: Context Ptr (12 - (haw-1))
189 * 64-127: Reserved
190 */
191struct root_entry {
David Woodhouse03ecc322015-02-13 14:35:21 +0000192 u64 lo;
193 u64 hi;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000194};
195#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000196
Joerg Roedel091d42e2015-06-12 11:56:10 +0200197/*
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199 * if marked present.
200 */
201static phys_addr_t root_entry_lctp(struct root_entry *re)
202{
203 if (!(re->lo & 1))
204 return 0;
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000205
Joerg Roedel091d42e2015-06-12 11:56:10 +0200206 return re->lo & VTD_PAGE_MASK;
207}
208
209/*
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211 * if marked present.
212 */
213static phys_addr_t root_entry_uctp(struct root_entry *re)
214{
215 if (!(re->hi & 1))
216 return 0;
217
218 return re->hi & VTD_PAGE_MASK;
219}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000220/*
221 * low 64 bits:
222 * 0: present
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
226 * high 64 bits:
227 * 0-2: address width
228 * 3-6: aval
229 * 8-23: domain id
230 */
231struct context_entry {
232 u64 lo;
233 u64 hi;
234};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000235
Joerg Roedelcf484d02015-06-12 12:21:46 +0200236static inline void context_clear_pasid_enable(struct context_entry *context)
237{
238 context->lo &= ~(1ULL << 11);
239}
240
241static inline bool context_pasid_enabled(struct context_entry *context)
242{
243 return !!(context->lo & (1ULL << 11));
244}
245
246static inline void context_set_copied(struct context_entry *context)
247{
248 context->hi |= (1ull << 3);
249}
250
251static inline bool context_copied(struct context_entry *context)
252{
253 return !!(context->hi & (1ULL << 3));
254}
255
256static inline bool __context_present(struct context_entry *context)
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000257{
258 return (context->lo & 1);
259}
Joerg Roedelcf484d02015-06-12 12:21:46 +0200260
261static inline bool context_present(struct context_entry *context)
262{
263 return context_pasid_enabled(context) ?
264 __context_present(context) :
265 __context_present(context) && !context_copied(context);
266}
267
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000268static inline void context_set_present(struct context_entry *context)
269{
270 context->lo |= 1;
271}
272
273static inline void context_set_fault_enable(struct context_entry *context)
274{
275 context->lo &= (((u64)-1) << 2) | 1;
276}
277
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000278static inline void context_set_translation_type(struct context_entry *context,
279 unsigned long value)
280{
281 context->lo &= (((u64)-1) << 4) | 3;
282 context->lo |= (value & 3) << 2;
283}
284
285static inline void context_set_address_root(struct context_entry *context,
286 unsigned long value)
287{
Li, Zhen-Hua1a2262f2014-11-05 15:30:19 +0800288 context->lo &= ~VTD_PAGE_MASK;
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000289 context->lo |= value & VTD_PAGE_MASK;
290}
291
292static inline void context_set_address_width(struct context_entry *context,
293 unsigned long value)
294{
295 context->hi |= value & 7;
296}
297
298static inline void context_set_domain_id(struct context_entry *context,
299 unsigned long value)
300{
301 context->hi |= (value & ((1 << 16) - 1)) << 8;
302}
303
Joerg Roedeldbcd8612015-06-12 12:02:09 +0200304static inline int context_domain_id(struct context_entry *c)
305{
306 return((c->hi >> 8) & 0xffff);
307}
308
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000309static inline void context_clear_entry(struct context_entry *context)
310{
311 context->lo = 0;
312 context->hi = 0;
313}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000314
Mark McLoughlin622ba122008-11-20 15:49:46 +0000315/*
316 * 0: readable
317 * 1: writable
318 * 2-6: reserved
319 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800320 * 8-10: available
321 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000322 * 12-63: Host physcial address
323 */
324struct dma_pte {
325 u64 val;
326};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000327
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000328static inline void dma_clear_pte(struct dma_pte *pte)
329{
330 pte->val = 0;
331}
332
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000333static inline u64 dma_pte_addr(struct dma_pte *pte)
334{
David Woodhousec85994e2009-07-01 19:21:24 +0100335#ifdef CONFIG_64BIT
336 return pte->val & VTD_PAGE_MASK;
337#else
338 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100339 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100340#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000341}
342
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000343static inline bool dma_pte_present(struct dma_pte *pte)
344{
345 return (pte->val & 3) != 0;
346}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000347
Allen Kay4399c8b2011-10-14 12:32:46 -0700348static inline bool dma_pte_superpage(struct dma_pte *pte)
349{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200350 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700351}
352
David Woodhouse75e6bf92009-07-02 11:21:16 +0100353static inline int first_pte_in_page(struct dma_pte *pte)
354{
355 return !((unsigned long)pte & ~VTD_PAGE_MASK);
356}
357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700358/*
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
363 */
David Woodhouse19943b02009-08-04 16:19:20 +0100364static struct dmar_domain *si_domain;
365static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700366
Weidong Han1ce28fe2008-12-08 16:35:39 +0800367/* domain represents a virtual machine, more than one devices
368 * across iommus may be owned in one domain, e.g. kvm guest.
369 */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800370#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
Weidong Han1ce28fe2008-12-08 16:35:39 +0800371
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700372/* si_domain contains mulitple devices */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800373#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700374
Mark McLoughlin99126f72008-11-20 15:49:47 +0000375struct dmar_domain {
376 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700377 int nid; /* node id */
Jiang Liu78d8e702014-11-09 22:47:57 +0800378 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
Mike Travis1b198bb2012-03-05 15:05:16 -0800379 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000380
Joerg Roedel00a77de2015-03-26 13:43:08 +0100381 struct list_head devices; /* all devices' list */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000382 struct iova_domain iovad; /* iova's that belong to this domain */
383
384 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000385 int gaw; /* max guest address width */
386
387 /* adjusted guest address width, 0 is level 2 30-bit */
388 int agaw;
389
Weidong Han3b5410e2008-12-08 09:17:15 +0800390 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800391
392 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800393 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800394 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100395 int iommu_superpage;/* Level of superpages supported:
396 0 == 4KiB (no superpages), 1 == 2MiB,
397 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800398 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800399 u64 max_addr; /* maximum mapped address */
Joerg Roedel00a77de2015-03-26 13:43:08 +0100400
401 struct iommu_domain domain; /* generic domain data structure for
402 iommu core */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000403};
404
Mark McLoughlina647dac2008-11-20 15:49:48 +0000405/* PCI domain-device relationship */
406struct device_domain_info {
407 struct list_head link; /* link to domain siblings */
408 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100409 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000410 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000411 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800412 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000413 struct dmar_domain *domain; /* pointer to domain */
414};
415
Jiang Liub94e4112014-02-19 14:07:25 +0800416struct dmar_rmrr_unit {
417 struct list_head list; /* list of rmrr units */
418 struct acpi_dmar_header *hdr; /* ACPI header */
419 u64 base_address; /* reserved base address*/
420 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000421 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800422 int devices_cnt; /* target device count */
423};
424
425struct dmar_atsr_unit {
426 struct list_head list; /* list of ATSR units */
427 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000428 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800429 int devices_cnt; /* target device count */
430 u8 include_all:1; /* include all ports */
431};
432
433static LIST_HEAD(dmar_atsr_units);
434static LIST_HEAD(dmar_rmrr_units);
435
436#define for_each_rmrr_units(rmrr) \
437 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
438
mark gross5e0d2a62008-03-04 15:22:08 -0800439static void flush_unmaps_timeout(unsigned long data);
440
Jiang Liub707cb02014-01-06 14:18:26 +0800441static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800442
mark gross80b20dd2008-04-18 13:53:58 -0700443#define HIGH_WATER_MARK 250
444struct deferred_flush_tables {
445 int next;
446 struct iova *iova[HIGH_WATER_MARK];
447 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000448 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700449};
450
451static struct deferred_flush_tables *deferred_flush;
452
mark gross5e0d2a62008-03-04 15:22:08 -0800453/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800454static int g_num_of_iommus;
455
456static DEFINE_SPINLOCK(async_umap_flush_lock);
457static LIST_HEAD(unmaps_to_do);
458
459static int timer_on;
460static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800461
Jiang Liu92d03cc2014-02-19 14:07:28 +0800462static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700463static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800464static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700465 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800466static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000467 struct device *dev);
Jiang Liu2a46ddf2014-07-11 14:19:30 +0800468static int domain_detach_iommu(struct dmar_domain *domain,
469 struct intel_iommu *iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700470
Suresh Siddhad3f13812011-08-23 17:05:25 -0700471#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800472int dmar_disabled = 0;
473#else
474int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700475#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800476
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200477int intel_iommu_enabled = 0;
478EXPORT_SYMBOL_GPL(intel_iommu_enabled);
479
David Woodhouse2d9e6672010-06-15 10:57:57 +0100480static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700481static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800482static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100483static int intel_iommu_superpage = 1;
David Woodhousec83b2f22015-06-12 10:15:49 +0100484static int intel_iommu_ecs = 1;
485
486/* We only actually use ECS when PASID support (on the new bit 40)
487 * is also advertised. Some early implementations — the ones with
488 * PASID support on bit 28 — have issues even when we *only* use
489 * extended root/context tables. */
490#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
491 ecap_pasid(iommu->ecap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700492
David Woodhousec0771df2011-10-14 20:59:46 +0100493int intel_iommu_gfx_mapped;
494EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
495
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700496#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
497static DEFINE_SPINLOCK(device_domain_lock);
498static LIST_HEAD(device_domain_list);
499
Thierry Redingb22f6432014-06-27 09:03:12 +0200500static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100501
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200502static bool translation_pre_enabled(struct intel_iommu *iommu)
503{
504 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
505}
506
Joerg Roedel091d42e2015-06-12 11:56:10 +0200507static void clear_translation_pre_enabled(struct intel_iommu *iommu)
508{
509 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
510}
511
Joerg Roedel4158c2e2015-06-12 10:14:02 +0200512static void init_translation_status(struct intel_iommu *iommu)
513{
514 u32 gsts;
515
516 gsts = readl(iommu->reg + DMAR_GSTS_REG);
517 if (gsts & DMA_GSTS_TES)
518 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
519}
520
Joerg Roedel00a77de2015-03-26 13:43:08 +0100521/* Convert generic 'struct iommu_domain to private struct dmar_domain */
522static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
523{
524 return container_of(dom, struct dmar_domain, domain);
525}
526
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700527static int __init intel_iommu_setup(char *str)
528{
529 if (!str)
530 return -EINVAL;
531 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800532 if (!strncmp(str, "on", 2)) {
533 dmar_disabled = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200534 pr_info("IOMMU enabled\n");
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800535 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700536 dmar_disabled = 1;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200537 pr_info("IOMMU disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700538 } else if (!strncmp(str, "igfx_off", 8)) {
539 dmar_map_gfx = 0;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200540 pr_info("Disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700541 } else if (!strncmp(str, "forcedac", 8)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200542 pr_info("Forcing DAC for PCI devices\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700543 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800544 } else if (!strncmp(str, "strict", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200545 pr_info("Disable batched IOTLB flush\n");
mark gross5e0d2a62008-03-04 15:22:08 -0800546 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100547 } else if (!strncmp(str, "sp_off", 6)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +0200548 pr_info("Disable supported super page\n");
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100549 intel_iommu_superpage = 0;
David Woodhousec83b2f22015-06-12 10:15:49 +0100550 } else if (!strncmp(str, "ecs_off", 7)) {
551 printk(KERN_INFO
552 "Intel-IOMMU: disable extended context table support\n");
553 intel_iommu_ecs = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700554 }
555
556 str += strcspn(str, ",");
557 while (*str == ',')
558 str++;
559 }
560 return 0;
561}
562__setup("intel_iommu=", intel_iommu_setup);
563
564static struct kmem_cache *iommu_domain_cache;
565static struct kmem_cache *iommu_devinfo_cache;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700566
Suresh Siddha4c923d42009-10-02 11:01:24 -0700567static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700568{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700569 struct page *page;
570 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700571
Suresh Siddha4c923d42009-10-02 11:01:24 -0700572 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
573 if (page)
574 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700575 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700576}
577
578static inline void free_pgtable_page(void *vaddr)
579{
580 free_page((unsigned long)vaddr);
581}
582
583static inline void *alloc_domain_mem(void)
584{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900585 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700586}
587
Kay, Allen M38717942008-09-09 18:37:29 +0300588static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700589{
590 kmem_cache_free(iommu_domain_cache, vaddr);
591}
592
593static inline void * alloc_devinfo_mem(void)
594{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900595 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700596}
597
598static inline void free_devinfo_mem(void *vaddr)
599{
600 kmem_cache_free(iommu_devinfo_cache, vaddr);
601}
602
Jiang Liuab8dfe22014-07-11 14:19:27 +0800603static inline int domain_type_is_vm(struct dmar_domain *domain)
604{
605 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
606}
607
608static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
609{
610 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
611 DOMAIN_FLAG_STATIC_IDENTITY);
612}
Weidong Han1b573682008-12-08 15:34:06 +0800613
Jiang Liu162d1b12014-07-11 14:19:35 +0800614static inline int domain_pfn_supported(struct dmar_domain *domain,
615 unsigned long pfn)
616{
617 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
618
619 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
620}
621
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700622static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800623{
624 unsigned long sagaw;
625 int agaw = -1;
626
627 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700628 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800629 agaw >= 0; agaw--) {
630 if (test_bit(agaw, &sagaw))
631 break;
632 }
633
634 return agaw;
635}
636
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700637/*
638 * Calculate max SAGAW for each iommu.
639 */
640int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
641{
642 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
643}
644
645/*
646 * calculate agaw for each iommu.
647 * "SAGAW" may be different across iommus, use a default agaw, and
648 * get a supported less agaw for iommus that don't support the default agaw.
649 */
650int iommu_calculate_agaw(struct intel_iommu *iommu)
651{
652 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
653}
654
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700655/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800656static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
657{
658 int iommu_id;
659
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700660 /* si_domain and vm domain should not get here. */
Jiang Liuab8dfe22014-07-11 14:19:27 +0800661 BUG_ON(domain_type_is_vm_or_si(domain));
Mike Travis1b198bb2012-03-05 15:05:16 -0800662 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800663 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
664 return NULL;
665
666 return g_iommus[iommu_id];
667}
668
Weidong Han8e6040972008-12-08 15:49:06 +0800669static void domain_update_iommu_coherency(struct dmar_domain *domain)
670{
David Woodhoused0501962014-03-11 17:10:29 -0700671 struct dmar_drhd_unit *drhd;
672 struct intel_iommu *iommu;
Quentin Lambert2f119c72015-02-06 10:59:53 +0100673 bool found = false;
674 int i;
Weidong Han8e6040972008-12-08 15:49:06 +0800675
David Woodhoused0501962014-03-11 17:10:29 -0700676 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800677
Mike Travis1b198bb2012-03-05 15:05:16 -0800678 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Quentin Lambert2f119c72015-02-06 10:59:53 +0100679 found = true;
Weidong Han8e6040972008-12-08 15:49:06 +0800680 if (!ecap_coherent(g_iommus[i]->ecap)) {
681 domain->iommu_coherency = 0;
682 break;
683 }
Weidong Han8e6040972008-12-08 15:49:06 +0800684 }
David Woodhoused0501962014-03-11 17:10:29 -0700685 if (found)
686 return;
687
688 /* No hardware attached; use lowest common denominator */
689 rcu_read_lock();
690 for_each_active_iommu(iommu, drhd) {
691 if (!ecap_coherent(iommu->ecap)) {
692 domain->iommu_coherency = 0;
693 break;
694 }
695 }
696 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800697}
698
Jiang Liu161f6932014-07-11 14:19:37 +0800699static int domain_update_iommu_snooping(struct intel_iommu *skip)
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100700{
Allen Kay8140a952011-10-14 12:32:17 -0700701 struct dmar_drhd_unit *drhd;
Jiang Liu161f6932014-07-11 14:19:37 +0800702 struct intel_iommu *iommu;
703 int ret = 1;
704
705 rcu_read_lock();
706 for_each_active_iommu(iommu, drhd) {
707 if (iommu != skip) {
708 if (!ecap_sc_support(iommu->ecap)) {
709 ret = 0;
710 break;
711 }
712 }
713 }
714 rcu_read_unlock();
715
716 return ret;
717}
718
719static int domain_update_iommu_superpage(struct intel_iommu *skip)
720{
721 struct dmar_drhd_unit *drhd;
722 struct intel_iommu *iommu;
Allen Kay8140a952011-10-14 12:32:17 -0700723 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100724
725 if (!intel_iommu_superpage) {
Jiang Liu161f6932014-07-11 14:19:37 +0800726 return 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100727 }
728
Allen Kay8140a952011-10-14 12:32:17 -0700729 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800730 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700731 for_each_active_iommu(iommu, drhd) {
Jiang Liu161f6932014-07-11 14:19:37 +0800732 if (iommu != skip) {
733 mask &= cap_super_page_val(iommu->cap);
734 if (!mask)
735 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100736 }
737 }
Jiang Liu0e242612014-02-19 14:07:34 +0800738 rcu_read_unlock();
739
Jiang Liu161f6932014-07-11 14:19:37 +0800740 return fls(mask);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100741}
742
Sheng Yang58c610b2009-03-18 15:33:05 +0800743/* Some capabilities may be different across iommus */
744static void domain_update_iommu_cap(struct dmar_domain *domain)
745{
746 domain_update_iommu_coherency(domain);
Jiang Liu161f6932014-07-11 14:19:37 +0800747 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
748 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
Sheng Yang58c610b2009-03-18 15:33:05 +0800749}
750
David Woodhouse03ecc322015-02-13 14:35:21 +0000751static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
752 u8 bus, u8 devfn, int alloc)
753{
754 struct root_entry *root = &iommu->root_entry[bus];
755 struct context_entry *context;
756 u64 *entry;
757
David Woodhousec83b2f22015-06-12 10:15:49 +0100758 if (ecs_enabled(iommu)) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000759 if (devfn >= 0x80) {
760 devfn -= 0x80;
761 entry = &root->hi;
762 }
763 devfn *= 2;
764 }
765 entry = &root->lo;
766 if (*entry & 1)
767 context = phys_to_virt(*entry & VTD_PAGE_MASK);
768 else {
769 unsigned long phy_addr;
770 if (!alloc)
771 return NULL;
772
773 context = alloc_pgtable_page(iommu->node);
774 if (!context)
775 return NULL;
776
777 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
778 phy_addr = virt_to_phys((void *)context);
779 *entry = phy_addr | 1;
780 __iommu_flush_cache(iommu, entry, sizeof(*entry));
781 }
782 return &context[devfn];
783}
784
David Woodhouse4ed6a542015-05-11 14:59:20 +0100785static int iommu_dummy(struct device *dev)
786{
787 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
788}
789
David Woodhouse156baca2014-03-09 14:00:57 -0700790static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800791{
792 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800793 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700794 struct device *tmp;
795 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800796 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800797 int i;
798
David Woodhouse4ed6a542015-05-11 14:59:20 +0100799 if (iommu_dummy(dev))
800 return NULL;
801
David Woodhouse156baca2014-03-09 14:00:57 -0700802 if (dev_is_pci(dev)) {
803 pdev = to_pci_dev(dev);
804 segment = pci_domain_nr(pdev->bus);
Rafael J. Wysockica5b74d2015-03-16 23:49:08 +0100805 } else if (has_acpi_companion(dev))
David Woodhouse156baca2014-03-09 14:00:57 -0700806 dev = &ACPI_COMPANION(dev)->dev;
807
Jiang Liu0e242612014-02-19 14:07:34 +0800808 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800809 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700810 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100811 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800812
Jiang Liub683b232014-02-19 14:07:32 +0800813 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700814 drhd->devices_cnt, i, tmp) {
815 if (tmp == dev) {
816 *bus = drhd->devices[i].bus;
817 *devfn = drhd->devices[i].devfn;
818 goto out;
819 }
820
821 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000822 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700823
824 ptmp = to_pci_dev(tmp);
825 if (ptmp->subordinate &&
826 ptmp->subordinate->number <= pdev->bus->number &&
827 ptmp->subordinate->busn_res.end >= pdev->bus->number)
828 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100829 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800830
David Woodhouse156baca2014-03-09 14:00:57 -0700831 if (pdev && drhd->include_all) {
832 got_pdev:
833 *bus = pdev->bus->number;
834 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800835 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700836 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800837 }
Jiang Liub683b232014-02-19 14:07:32 +0800838 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700839 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800840 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800841
Jiang Liub683b232014-02-19 14:07:32 +0800842 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800843}
844
Weidong Han5331fe62008-12-08 23:00:00 +0800845static void domain_flush_cache(struct dmar_domain *domain,
846 void *addr, int size)
847{
848 if (!domain->iommu_coherency)
849 clflush_cache_range(addr, size);
850}
851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
853{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854 struct context_entry *context;
David Woodhouse03ecc322015-02-13 14:35:21 +0000855 int ret = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856 unsigned long flags;
857
858 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000859 context = iommu_context_addr(iommu, bus, devfn, 0);
860 if (context)
861 ret = context_present(context);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862 spin_unlock_irqrestore(&iommu->lock, flags);
863 return ret;
864}
865
866static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
867{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 struct context_entry *context;
869 unsigned long flags;
870
871 spin_lock_irqsave(&iommu->lock, flags);
David Woodhouse03ecc322015-02-13 14:35:21 +0000872 context = iommu_context_addr(iommu, bus, devfn, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873 if (context) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000874 context_clear_entry(context);
875 __iommu_flush_cache(iommu, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 }
877 spin_unlock_irqrestore(&iommu->lock, flags);
878}
879
880static void free_context_table(struct intel_iommu *iommu)
881{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882 int i;
883 unsigned long flags;
884 struct context_entry *context;
885
886 spin_lock_irqsave(&iommu->lock, flags);
887 if (!iommu->root_entry) {
888 goto out;
889 }
890 for (i = 0; i < ROOT_ENTRY_NR; i++) {
David Woodhouse03ecc322015-02-13 14:35:21 +0000891 context = iommu_context_addr(iommu, i, 0, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 if (context)
893 free_pgtable_page(context);
David Woodhouse03ecc322015-02-13 14:35:21 +0000894
David Woodhousec83b2f22015-06-12 10:15:49 +0100895 if (!ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +0000896 continue;
897
898 context = iommu_context_addr(iommu, i, 0x80, 0);
899 if (context)
900 free_pgtable_page(context);
901
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902 }
903 free_pgtable_page(iommu->root_entry);
904 iommu->root_entry = NULL;
905out:
906 spin_unlock_irqrestore(&iommu->lock, flags);
907}
908
David Woodhouseb026fd22009-06-28 10:37:25 +0100909static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000910 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700911{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912 struct dma_pte *parent, *pte = NULL;
913 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700914 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700915
916 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200917
Jiang Liu162d1b12014-07-11 14:19:35 +0800918 if (!domain_pfn_supported(domain, pfn))
Julian Stecklinaf9423602013-10-09 10:03:52 +0200919 /* Address beyond IOMMU's addressing capabilities. */
920 return NULL;
921
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700922 parent = domain->pgd;
923
David Woodhouse5cf0a762014-03-19 16:07:49 +0000924 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700925 void *tmp_page;
926
David Woodhouseb026fd22009-06-28 10:37:25 +0100927 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700928 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000929 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100930 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000931 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932 break;
933
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000934 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100935 uint64_t pteval;
936
Suresh Siddha4c923d42009-10-02 11:01:24 -0700937 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938
David Woodhouse206a73c2009-07-01 19:30:28 +0100939 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100941
David Woodhousec85994e2009-07-01 19:21:24 +0100942 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400943 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800944 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100945 /* Someone else set it while we were thinking; use theirs. */
946 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800947 else
David Woodhousec85994e2009-07-01 19:21:24 +0100948 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700949 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000950 if (level == 1)
951 break;
952
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000953 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954 level--;
955 }
956
David Woodhouse5cf0a762014-03-19 16:07:49 +0000957 if (!*target_level)
958 *target_level = level;
959
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700960 return pte;
961}
962
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100963
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100965static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
966 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100967 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968{
969 struct dma_pte *parent, *pte = NULL;
970 int total = agaw_to_level(domain->agaw);
971 int offset;
972
973 parent = domain->pgd;
974 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100975 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976 pte = &parent[offset];
977 if (level == total)
978 return pte;
979
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100980 if (!dma_pte_present(pte)) {
981 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100983 }
984
Yijing Wange16922a2014-05-20 20:37:51 +0800985 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100986 *large_page = total;
987 return pte;
988 }
989
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000990 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991 total--;
992 }
993 return NULL;
994}
995
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000997static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100998 unsigned long start_pfn,
999 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001000{
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001001 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +01001002 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003
Jiang Liu162d1b12014-07-11 14:19:35 +08001004 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1005 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001006 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +01001007
David Woodhouse04b18e62009-06-27 19:15:01 +01001008 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -07001009 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001010 large_page = 1;
1011 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001012 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001013 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001014 continue;
1015 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001016 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +01001017 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001018 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +01001019 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001020 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1021
David Woodhouse310a5ab2009-06-28 18:52:20 +01001022 domain_flush_cache(domain, first_pte,
1023 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -07001024
1025 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026}
1027
Alex Williamson3269ee02013-06-15 10:27:19 -06001028static void dma_pte_free_level(struct dmar_domain *domain, int level,
1029 struct dma_pte *pte, unsigned long pfn,
1030 unsigned long start_pfn, unsigned long last_pfn)
1031{
1032 pfn = max(start_pfn, pfn);
1033 pte = &pte[pfn_level_offset(pfn, level)];
1034
1035 do {
1036 unsigned long level_pfn;
1037 struct dma_pte *level_pte;
1038
1039 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1040 goto next;
1041
1042 level_pfn = pfn & level_mask(level - 1);
1043 level_pte = phys_to_virt(dma_pte_addr(pte));
1044
1045 if (level > 2)
1046 dma_pte_free_level(domain, level - 1, level_pte,
1047 level_pfn, start_pfn, last_pfn);
1048
1049 /* If range covers entire pagetable, free it */
1050 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -08001051 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -06001052 dma_clear_pte(pte);
1053 domain_flush_cache(domain, pte, sizeof(*pte));
1054 free_pgtable_page(level_pte);
1055 }
1056next:
1057 pfn += level_size(level);
1058 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1059}
1060
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001061/* free page table pages. last level pte should already be cleared */
1062static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +01001063 unsigned long start_pfn,
1064 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065{
Jiang Liu162d1b12014-07-11 14:19:35 +08001066 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1067 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouse59c36282009-09-19 07:36:28 -07001068 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001069
Jiang Liud41a4ad2014-07-11 14:19:34 +08001070 dma_pte_clear_range(domain, start_pfn, last_pfn);
1071
David Woodhousef3a0a522009-06-30 03:40:07 +01001072 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -06001073 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1074 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +01001075
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +01001077 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001078 free_pgtable_page(domain->pgd);
1079 domain->pgd = NULL;
1080 }
1081}
1082
David Woodhouseea8ea462014-03-05 17:09:32 +00001083/* When a page at a given level is being unlinked from its parent, we don't
1084 need to *modify* it at all. All we need to do is make a list of all the
1085 pages which can be freed just as soon as we've flushed the IOTLB and we
1086 know the hardware page-walk will no longer touch them.
1087 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1088 be freed. */
1089static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1090 int level, struct dma_pte *pte,
1091 struct page *freelist)
1092{
1093 struct page *pg;
1094
1095 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1096 pg->freelist = freelist;
1097 freelist = pg;
1098
1099 if (level == 1)
1100 return freelist;
1101
Jiang Liuadeb25902014-04-09 10:20:39 +08001102 pte = page_address(pg);
1103 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001104 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1105 freelist = dma_pte_list_pagetables(domain, level - 1,
1106 pte, freelist);
Jiang Liuadeb25902014-04-09 10:20:39 +08001107 pte++;
1108 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001109
1110 return freelist;
1111}
1112
1113static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1114 struct dma_pte *pte, unsigned long pfn,
1115 unsigned long start_pfn,
1116 unsigned long last_pfn,
1117 struct page *freelist)
1118{
1119 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1120
1121 pfn = max(start_pfn, pfn);
1122 pte = &pte[pfn_level_offset(pfn, level)];
1123
1124 do {
1125 unsigned long level_pfn;
1126
1127 if (!dma_pte_present(pte))
1128 goto next;
1129
1130 level_pfn = pfn & level_mask(level);
1131
1132 /* If range covers entire pagetable, free it */
1133 if (start_pfn <= level_pfn &&
1134 last_pfn >= level_pfn + level_size(level) - 1) {
1135 /* These suborbinate page tables are going away entirely. Don't
1136 bother to clear them; we're just going to *free* them. */
1137 if (level > 1 && !dma_pte_superpage(pte))
1138 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1139
1140 dma_clear_pte(pte);
1141 if (!first_pte)
1142 first_pte = pte;
1143 last_pte = pte;
1144 } else if (level > 1) {
1145 /* Recurse down into a level that isn't *entirely* obsolete */
1146 freelist = dma_pte_clear_level(domain, level - 1,
1147 phys_to_virt(dma_pte_addr(pte)),
1148 level_pfn, start_pfn, last_pfn,
1149 freelist);
1150 }
1151next:
1152 pfn += level_size(level);
1153 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1154
1155 if (first_pte)
1156 domain_flush_cache(domain, first_pte,
1157 (void *)++last_pte - (void *)first_pte);
1158
1159 return freelist;
1160}
1161
1162/* We can't just free the pages because the IOMMU may still be walking
1163 the page tables, and may have cached the intermediate levels. The
1164 pages can only be freed after the IOTLB flush has been done. */
1165struct page *domain_unmap(struct dmar_domain *domain,
1166 unsigned long start_pfn,
1167 unsigned long last_pfn)
1168{
David Woodhouseea8ea462014-03-05 17:09:32 +00001169 struct page *freelist = NULL;
1170
Jiang Liu162d1b12014-07-11 14:19:35 +08001171 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1172 BUG_ON(!domain_pfn_supported(domain, last_pfn));
David Woodhouseea8ea462014-03-05 17:09:32 +00001173 BUG_ON(start_pfn > last_pfn);
1174
1175 /* we don't need lock here; nobody else touches the iova range */
1176 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1177 domain->pgd, 0, start_pfn, last_pfn, NULL);
1178
1179 /* free pgd */
1180 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1181 struct page *pgd_page = virt_to_page(domain->pgd);
1182 pgd_page->freelist = freelist;
1183 freelist = pgd_page;
1184
1185 domain->pgd = NULL;
1186 }
1187
1188 return freelist;
1189}
1190
1191void dma_free_pagelist(struct page *freelist)
1192{
1193 struct page *pg;
1194
1195 while ((pg = freelist)) {
1196 freelist = pg->freelist;
1197 free_pgtable_page(page_address(pg));
1198 }
1199}
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201/* iommu handling */
1202static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1203{
1204 struct root_entry *root;
1205 unsigned long flags;
1206
Suresh Siddha4c923d42009-10-02 11:01:24 -07001207 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Jiang Liuffebeb42014-11-09 22:48:02 +08001208 if (!root) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001209 pr_err("Allocating root entry for %s failed\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08001210 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211 return -ENOMEM;
Jiang Liuffebeb42014-11-09 22:48:02 +08001212 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001214 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215
1216 spin_lock_irqsave(&iommu->lock, flags);
1217 iommu->root_entry = root;
1218 spin_unlock_irqrestore(&iommu->lock, flags);
1219
1220 return 0;
1221}
1222
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223static void iommu_set_root_entry(struct intel_iommu *iommu)
1224{
David Woodhouse03ecc322015-02-13 14:35:21 +00001225 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001226 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 unsigned long flag;
1228
David Woodhouse03ecc322015-02-13 14:35:21 +00001229 addr = virt_to_phys(iommu->root_entry);
David Woodhousec83b2f22015-06-12 10:15:49 +01001230 if (ecs_enabled(iommu))
David Woodhouse03ecc322015-02-13 14:35:21 +00001231 addr |= DMA_RTADDR_RTT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001233 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse03ecc322015-02-13 14:35:21 +00001234 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235
David Woodhousec416daa2009-05-10 20:30:58 +01001236 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001240 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001242 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243}
1244
1245static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1246{
1247 u32 val;
1248 unsigned long flag;
1249
David Woodhouse9af88142009-02-13 23:18:03 +00001250 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001254 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255
1256 /* Make sure hardware complete it */
1257 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001258 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001260 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261}
1262
1263/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001264static void __iommu_flush_context(struct intel_iommu *iommu,
1265 u16 did, u16 source_id, u8 function_mask,
1266 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001267{
1268 u64 val = 0;
1269 unsigned long flag;
1270
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271 switch (type) {
1272 case DMA_CCMD_GLOBAL_INVL:
1273 val = DMA_CCMD_GLOBAL_INVL;
1274 break;
1275 case DMA_CCMD_DOMAIN_INVL:
1276 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1277 break;
1278 case DMA_CCMD_DEVICE_INVL:
1279 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1280 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1281 break;
1282 default:
1283 BUG();
1284 }
1285 val |= DMA_CCMD_ICC;
1286
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001287 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1289
1290 /* Make sure hardware complete it */
1291 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1292 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1293
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001294 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295}
1296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001298static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1299 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001300{
1301 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1302 u64 val = 0, val_iva = 0;
1303 unsigned long flag;
1304
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 switch (type) {
1306 case DMA_TLB_GLOBAL_FLUSH:
1307 /* global flush doesn't need set IVA_REG */
1308 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1309 break;
1310 case DMA_TLB_DSI_FLUSH:
1311 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1312 break;
1313 case DMA_TLB_PSI_FLUSH:
1314 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001315 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316 val_iva = size_order | addr;
1317 break;
1318 default:
1319 BUG();
1320 }
1321 /* Note: set drain read/write */
1322#if 0
1323 /*
1324 * This is probably to be super secure.. Looks like we can
1325 * ignore it without any impact.
1326 */
1327 if (cap_read_drain(iommu->cap))
1328 val |= DMA_TLB_READ_DRAIN;
1329#endif
1330 if (cap_write_drain(iommu->cap))
1331 val |= DMA_TLB_WRITE_DRAIN;
1332
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001333 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 /* Note: Only uses first TLB reg currently */
1335 if (val_iva)
1336 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1337 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1338
1339 /* Make sure hardware complete it */
1340 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1341 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1342
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344
1345 /* check IOTLB invalidation granularity */
1346 if (DMA_TLB_IAIG(val) == 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001347 pr_err("Flush IOTLB failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001349 pr_debug("TLB flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001350 (unsigned long long)DMA_TLB_IIRG(type),
1351 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352}
1353
David Woodhouse64ae8922014-03-09 12:52:30 -07001354static struct device_domain_info *
1355iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1356 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357{
Quentin Lambert2f119c72015-02-06 10:59:53 +01001358 bool found = false;
Yu Zhao93a23a72009-05-18 13:51:37 +08001359 unsigned long flags;
1360 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001361 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001362
1363 if (!ecap_dev_iotlb_support(iommu->ecap))
1364 return NULL;
1365
1366 if (!iommu->qi)
1367 return NULL;
1368
1369 spin_lock_irqsave(&device_domain_lock, flags);
1370 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001371 if (info->iommu == iommu && info->bus == bus &&
1372 info->devfn == devfn) {
Quentin Lambert2f119c72015-02-06 10:59:53 +01001373 found = true;
Yu Zhao93a23a72009-05-18 13:51:37 +08001374 break;
1375 }
1376 spin_unlock_irqrestore(&device_domain_lock, flags);
1377
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001378 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001379 return NULL;
1380
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001381 pdev = to_pci_dev(info->dev);
1382
1383 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001384 return NULL;
1385
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001386 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001387 return NULL;
1388
Yu Zhao93a23a72009-05-18 13:51:37 +08001389 return info;
1390}
1391
1392static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1393{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001394 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001395 return;
1396
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001397 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001398}
1399
1400static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1401{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001402 if (!info->dev || !dev_is_pci(info->dev) ||
1403 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001404 return;
1405
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001406 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001407}
1408
1409static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1410 u64 addr, unsigned mask)
1411{
1412 u16 sid, qdep;
1413 unsigned long flags;
1414 struct device_domain_info *info;
1415
1416 spin_lock_irqsave(&device_domain_lock, flags);
1417 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001418 struct pci_dev *pdev;
1419 if (!info->dev || !dev_is_pci(info->dev))
1420 continue;
1421
1422 pdev = to_pci_dev(info->dev);
1423 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001424 continue;
1425
1426 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001427 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001428 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1429 }
1430 spin_unlock_irqrestore(&device_domain_lock, flags);
1431}
1432
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001433static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001434 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001436 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001437 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001439 BUG_ON(pages == 0);
1440
David Woodhouseea8ea462014-03-05 17:09:32 +00001441 if (ih)
1442 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001444 * Fallback to domain selective flush if no PSI support or the size is
1445 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 * PSI requires page size to be 2 ^ x, and the base address is naturally
1447 * aligned to the size
1448 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001449 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1450 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001451 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001452 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001453 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001454 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001455
1456 /*
Nadav Amit82653632010-04-01 13:24:40 +03001457 * In caching mode, changes of pages from non-present to present require
1458 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001459 */
Nadav Amit82653632010-04-01 13:24:40 +03001460 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001461 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462}
1463
mark grossf8bab732008-02-08 04:18:38 -08001464static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1465{
1466 u32 pmen;
1467 unsigned long flags;
1468
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001469 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001470 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1471 pmen &= ~DMA_PMEN_EPM;
1472 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1473
1474 /* wait for the protected region status bit to clear */
1475 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1476 readl, !(pmen & DMA_PMEN_PRS), pmen);
1477
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001478 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001479}
1480
Jiang Liu2a41cce2014-07-11 14:19:33 +08001481static void iommu_enable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001482{
1483 u32 sts;
1484 unsigned long flags;
1485
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001486 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001487 iommu->gcmd |= DMA_GCMD_TE;
1488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489
1490 /* Make sure hardware complete it */
1491 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001492 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001493
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001494 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001495}
1496
Jiang Liu2a41cce2014-07-11 14:19:33 +08001497static void iommu_disable_translation(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498{
1499 u32 sts;
1500 unsigned long flag;
1501
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001502 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503 iommu->gcmd &= ~DMA_GCMD_TE;
1504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1505
1506 /* Make sure hardware complete it */
1507 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001508 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001510 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511}
1512
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001513
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514static int iommu_init_domains(struct intel_iommu *iommu)
1515{
1516 unsigned long ndomains;
1517 unsigned long nlongs;
1518
1519 ndomains = cap_ndoms(iommu->cap);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001520 pr_debug("%s: Number of Domains supported <%ld>\n",
1521 iommu->name, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522 nlongs = BITS_TO_LONGS(ndomains);
1523
Donald Dutile94a91b52009-08-20 16:51:34 -04001524 spin_lock_init(&iommu->lock);
1525
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526 /* TBD: there might be 64K domains,
1527 * consider other allocation for future chip
1528 */
1529 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1530 if (!iommu->domain_ids) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001531 pr_err("%s: Allocating domain id array failed\n",
1532 iommu->name);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533 return -ENOMEM;
1534 }
1535 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1536 GFP_KERNEL);
1537 if (!iommu->domains) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001538 pr_err("%s: Allocating domain array failed\n",
1539 iommu->name);
Jiang Liu852bdb02014-01-06 14:18:11 +08001540 kfree(iommu->domain_ids);
1541 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 return -ENOMEM;
1543 }
1544
1545 /*
1546 * if Caching mode is set, then invalid translations are tagged
1547 * with domainid 0. Hence we need to pre-allocate it.
1548 */
1549 if (cap_caching_mode(iommu->cap))
1550 set_bit(0, iommu->domain_ids);
1551 return 0;
1552}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001553
Jiang Liuffebeb42014-11-09 22:48:02 +08001554static void disable_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555{
1556 struct dmar_domain *domain;
Jiang Liu2a46ddf2014-07-11 14:19:30 +08001557 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558
Donald Dutile94a91b52009-08-20 16:51:34 -04001559 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001560 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001561 /*
1562 * Domain id 0 is reserved for invalid translation
1563 * if hardware supports caching mode.
1564 */
1565 if (cap_caching_mode(iommu->cap) && i == 0)
1566 continue;
1567
Donald Dutile94a91b52009-08-20 16:51:34 -04001568 domain = iommu->domains[i];
1569 clear_bit(i, iommu->domain_ids);
Jiang Liu129ad282014-07-11 14:19:31 +08001570 if (domain_detach_iommu(domain, iommu) == 0 &&
1571 !domain_type_is_vm(domain))
Jiang Liu92d03cc2014-02-19 14:07:28 +08001572 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001573 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574 }
1575
1576 if (iommu->gcmd & DMA_GCMD_TE)
1577 iommu_disable_translation(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08001578}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579
Jiang Liuffebeb42014-11-09 22:48:02 +08001580static void free_dmar_iommu(struct intel_iommu *iommu)
1581{
1582 if ((iommu->domains) && (iommu->domain_ids)) {
1583 kfree(iommu->domains);
1584 kfree(iommu->domain_ids);
1585 iommu->domains = NULL;
1586 iommu->domain_ids = NULL;
1587 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001588
Weidong Hand9630fe2008-12-08 11:06:32 +08001589 g_iommus[iommu->seq_id] = NULL;
1590
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001591 /* free context mapping */
1592 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593}
1594
Jiang Liuab8dfe22014-07-11 14:19:27 +08001595static struct dmar_domain *alloc_domain(int flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001597 /* domain id for virtual machine, it won't be set in context */
1598 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001600
1601 domain = alloc_domain_mem();
1602 if (!domain)
1603 return NULL;
1604
Jiang Liuab8dfe22014-07-11 14:19:27 +08001605 memset(domain, 0, sizeof(*domain));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001606 domain->nid = -1;
Jiang Liuab8dfe22014-07-11 14:19:27 +08001607 domain->flags = flags;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001608 spin_lock_init(&domain->iommu_lock);
1609 INIT_LIST_HEAD(&domain->devices);
Jiang Liuab8dfe22014-07-11 14:19:27 +08001610 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001611 domain->id = atomic_inc_return(&vm_domid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612
1613 return domain;
1614}
1615
Jiang Liufb170fb2014-07-11 14:19:28 +08001616static int __iommu_attach_domain(struct dmar_domain *domain,
1617 struct intel_iommu *iommu)
1618{
1619 int num;
1620 unsigned long ndomains;
1621
1622 ndomains = cap_ndoms(iommu->cap);
1623 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1624 if (num < ndomains) {
1625 set_bit(num, iommu->domain_ids);
1626 iommu->domains[num] = domain;
1627 } else {
1628 num = -ENOSPC;
1629 }
1630
1631 return num;
1632}
1633
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001634static int iommu_attach_domain(struct dmar_domain *domain,
1635 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001637 int num;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638 unsigned long flags;
1639
Weidong Han8c11e792008-12-08 15:29:22 +08001640 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001641 num = __iommu_attach_domain(domain, iommu);
Jiang Liu44bde612014-07-11 14:19:29 +08001642 spin_unlock_irqrestore(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001643 if (num < 0)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001644 pr_err("%s: No free domain ids\n", iommu->name);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001645
Jiang Liufb170fb2014-07-11 14:19:28 +08001646 return num;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001647}
1648
Jiang Liu44bde612014-07-11 14:19:29 +08001649static int iommu_attach_vm_domain(struct dmar_domain *domain,
1650 struct intel_iommu *iommu)
1651{
1652 int num;
1653 unsigned long ndomains;
1654
1655 ndomains = cap_ndoms(iommu->cap);
1656 for_each_set_bit(num, iommu->domain_ids, ndomains)
1657 if (iommu->domains[num] == domain)
1658 return num;
1659
1660 return __iommu_attach_domain(domain, iommu);
1661}
1662
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001663static void iommu_detach_domain(struct dmar_domain *domain,
1664 struct intel_iommu *iommu)
1665{
1666 unsigned long flags;
1667 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001668
1669 spin_lock_irqsave(&iommu->lock, flags);
Jiang Liufb170fb2014-07-11 14:19:28 +08001670 if (domain_type_is_vm_or_si(domain)) {
1671 ndomains = cap_ndoms(iommu->cap);
1672 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1673 if (iommu->domains[num] == domain) {
1674 clear_bit(num, iommu->domain_ids);
1675 iommu->domains[num] = NULL;
1676 break;
1677 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001678 }
Jiang Liufb170fb2014-07-11 14:19:28 +08001679 } else {
1680 clear_bit(domain->id, iommu->domain_ids);
1681 iommu->domains[domain->id] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001682 }
Weidong Han8c11e792008-12-08 15:29:22 +08001683 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001684}
1685
Jiang Liufb170fb2014-07-11 14:19:28 +08001686static void domain_attach_iommu(struct dmar_domain *domain,
1687 struct intel_iommu *iommu)
1688{
1689 unsigned long flags;
1690
1691 spin_lock_irqsave(&domain->iommu_lock, flags);
1692 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1693 domain->iommu_count++;
1694 if (domain->iommu_count == 1)
1695 domain->nid = iommu->node;
1696 domain_update_iommu_cap(domain);
1697 }
1698 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1699}
1700
1701static int domain_detach_iommu(struct dmar_domain *domain,
1702 struct intel_iommu *iommu)
1703{
1704 unsigned long flags;
1705 int count = INT_MAX;
1706
1707 spin_lock_irqsave(&domain->iommu_lock, flags);
1708 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1709 count = --domain->iommu_count;
1710 domain_update_iommu_cap(domain);
1711 }
1712 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1713
1714 return count;
1715}
1716
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001718static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001719
Joseph Cihula51a63e62011-03-21 11:04:24 -07001720static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721{
1722 struct pci_dev *pdev = NULL;
1723 struct iova *iova;
1724 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001726 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1727 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728
Mark Gross8a443df2008-03-04 14:59:31 -08001729 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1730 &reserved_rbtree_key);
1731
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001732 /* IOAPIC ranges shouldn't be accessed by DMA */
1733 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1734 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001735 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001736 pr_err("Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001737 return -ENODEV;
1738 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739
1740 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1741 for_each_pci_dev(pdev) {
1742 struct resource *r;
1743
1744 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1745 r = &pdev->resource[i];
1746 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1747 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001748 iova = reserve_iova(&reserved_iova_list,
1749 IOVA_PFN(r->start),
1750 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001751 if (!iova) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001752 pr_err("Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001753 return -ENODEV;
1754 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755 }
1756 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001757 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758}
1759
1760static void domain_reserve_special_ranges(struct dmar_domain *domain)
1761{
1762 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1763}
1764
1765static inline int guestwidth_to_adjustwidth(int gaw)
1766{
1767 int agaw;
1768 int r = (gaw - 12) % 9;
1769
1770 if (r == 0)
1771 agaw = gaw;
1772 else
1773 agaw = gaw + 9 - r;
1774 if (agaw > 64)
1775 agaw = 64;
1776 return agaw;
1777}
1778
1779static int domain_init(struct dmar_domain *domain, int guest_width)
1780{
1781 struct intel_iommu *iommu;
1782 int adjust_width, agaw;
1783 unsigned long sagaw;
1784
Robin Murphy0fb5fe82015-01-12 17:51:16 +00001785 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1786 DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 domain_reserve_special_ranges(domain);
1788
1789 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001790 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 if (guest_width > cap_mgaw(iommu->cap))
1792 guest_width = cap_mgaw(iommu->cap);
1793 domain->gaw = guest_width;
1794 adjust_width = guestwidth_to_adjustwidth(guest_width);
1795 agaw = width_to_agaw(adjust_width);
1796 sagaw = cap_sagaw(iommu->cap);
1797 if (!test_bit(agaw, &sagaw)) {
1798 /* hardware doesn't support it, choose a bigger one */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001799 pr_debug("Hardware doesn't support agaw %d\n", agaw);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001800 agaw = find_next_bit(&sagaw, 5, agaw);
1801 if (agaw >= 5)
1802 return -ENODEV;
1803 }
1804 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001805
Weidong Han8e6040972008-12-08 15:49:06 +08001806 if (ecap_coherent(iommu->ecap))
1807 domain->iommu_coherency = 1;
1808 else
1809 domain->iommu_coherency = 0;
1810
Sheng Yang58c610b2009-03-18 15:33:05 +08001811 if (ecap_sc_support(iommu->ecap))
1812 domain->iommu_snooping = 1;
1813 else
1814 domain->iommu_snooping = 0;
1815
David Woodhouse214e39a2014-03-19 10:38:49 +00001816 if (intel_iommu_superpage)
1817 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1818 else
1819 domain->iommu_superpage = 0;
1820
Suresh Siddha4c923d42009-10-02 11:01:24 -07001821 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001822
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001824 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001825 if (!domain->pgd)
1826 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001827 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001828 return 0;
1829}
1830
1831static void domain_exit(struct dmar_domain *domain)
1832{
Alex Williamson46ebb7a2015-07-14 14:48:53 -06001833 struct dmar_drhd_unit *drhd;
1834 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001835 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001836
1837 /* Domain 0 is reserved, so dont process it */
1838 if (!domain)
1839 return;
1840
Alex Williamson7b668352011-05-24 12:02:41 +01001841 /* Flush any lazy unmaps that may reference this domain */
1842 if (!intel_iommu_strict)
1843 flush_unmaps_timeout(0);
1844
Jiang Liu92d03cc2014-02-19 14:07:28 +08001845 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001846 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001847
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001848 /* destroy iovas */
1849 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001850
David Woodhouseea8ea462014-03-05 17:09:32 +00001851 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001852
Jiang Liu92d03cc2014-02-19 14:07:28 +08001853 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001854 rcu_read_lock();
Alex Williamson46ebb7a2015-07-14 14:48:53 -06001855 for_each_active_iommu(iommu, drhd)
1856 if (domain_type_is_vm(domain) ||
1857 test_bit(iommu->seq_id, domain->iommu_bmp))
1858 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001859 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001860
David Woodhouseea8ea462014-03-05 17:09:32 +00001861 dma_free_pagelist(freelist);
1862
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863 free_domain_mem(domain);
1864}
1865
David Woodhouse64ae8922014-03-09 12:52:30 -07001866static int domain_context_mapping_one(struct dmar_domain *domain,
1867 struct intel_iommu *iommu,
1868 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869{
1870 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001872 struct dma_pte *pgd;
Weidong Hanea6606b2008-12-08 23:08:15 +08001873 int id;
1874 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001875 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001876
1877 pr_debug("Set context mapping for %02x:%02x.%d\n",
1878 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001879
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001881 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1882 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001883
David Woodhouse03ecc322015-02-13 14:35:21 +00001884 spin_lock_irqsave(&iommu->lock, flags);
1885 context = iommu_context_addr(iommu, bus, devfn, 1);
1886 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887 if (!context)
1888 return -ENOMEM;
1889 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001890 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001891 spin_unlock_irqrestore(&iommu->lock, flags);
1892 return 0;
1893 }
1894
Joerg Roedelcf484d02015-06-12 12:21:46 +02001895 context_clear_entry(context);
1896
Weidong Hanea6606b2008-12-08 23:08:15 +08001897 id = domain->id;
1898 pgd = domain->pgd;
1899
Jiang Liuab8dfe22014-07-11 14:19:27 +08001900 if (domain_type_is_vm_or_si(domain)) {
Jiang Liu44bde612014-07-11 14:19:29 +08001901 if (domain_type_is_vm(domain)) {
1902 id = iommu_attach_vm_domain(domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08001903 if (id < 0) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001904 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02001905 pr_err("%s: No free domain ids\n", iommu->name);
Weidong Hanea6606b2008-12-08 23:08:15 +08001906 return -EFAULT;
1907 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001908 }
1909
1910 /* Skip top levels of page tables for
1911 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001912 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001913 */
Chris Wright1672af12009-12-02 12:06:34 -08001914 if (translation != CONTEXT_TT_PASS_THROUGH) {
1915 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1916 pgd = phys_to_virt(dma_pte_addr(pgd));
1917 if (!dma_pte_present(pgd)) {
1918 spin_unlock_irqrestore(&iommu->lock, flags);
1919 return -ENOMEM;
1920 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001921 }
1922 }
1923 }
1924
1925 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001926
Yu Zhao93a23a72009-05-18 13:51:37 +08001927 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001928 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001929 translation = info ? CONTEXT_TT_DEV_IOTLB :
1930 CONTEXT_TT_MULTI_LEVEL;
1931 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001932 /*
1933 * In pass through mode, AW must be programmed to indicate the largest
1934 * AGAW value supported by hardware. And ASR is ignored by hardware.
1935 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001936 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001937 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001938 else {
1939 context_set_address_root(context, virt_to_phys(pgd));
1940 context_set_address_width(context, iommu->agaw);
1941 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001942
1943 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001944 context_set_fault_enable(context);
1945 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001946 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001948 /*
1949 * It's a non-present to present mapping. If hardware doesn't cache
1950 * non-present entry we only need to flush the write-buffer. If the
1951 * _does_ cache non-present entries, then it does so in the special
1952 * domain #0, which we have to flush:
1953 */
1954 if (cap_caching_mode(iommu->cap)) {
1955 iommu->flush.flush_context(iommu, 0,
1956 (((u16)bus) << 8) | devfn,
1957 DMA_CCMD_MASK_NOBIT,
1958 DMA_CCMD_DEVICE_INVL);
Jiang Liu18fd7792014-07-11 14:19:26 +08001959 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001960 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001961 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001962 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001963 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001964 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001965
Jiang Liufb170fb2014-07-11 14:19:28 +08001966 domain_attach_iommu(domain, iommu);
1967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 return 0;
1969}
1970
Alex Williamson579305f2014-07-03 09:51:43 -06001971struct domain_context_mapping_data {
1972 struct dmar_domain *domain;
1973 struct intel_iommu *iommu;
1974 int translation;
1975};
1976
1977static int domain_context_mapping_cb(struct pci_dev *pdev,
1978 u16 alias, void *opaque)
1979{
1980 struct domain_context_mapping_data *data = opaque;
1981
1982 return domain_context_mapping_one(data->domain, data->iommu,
1983 PCI_BUS_NUM(alias), alias & 0xff,
1984 data->translation);
1985}
1986
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001987static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001988domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1989 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990{
David Woodhouse64ae8922014-03-09 12:52:30 -07001991 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001992 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001993 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994
David Woodhousee1f167f2014-03-09 15:24:46 -07001995 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001996 if (!iommu)
1997 return -ENODEV;
1998
Alex Williamson579305f2014-07-03 09:51:43 -06001999 if (!dev_is_pci(dev))
2000 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002001 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06002002
2003 data.domain = domain;
2004 data.iommu = iommu;
2005 data.translation = translation;
2006
2007 return pci_for_each_dma_alias(to_pci_dev(dev),
2008 &domain_context_mapping_cb, &data);
2009}
2010
2011static int domain_context_mapped_cb(struct pci_dev *pdev,
2012 u16 alias, void *opaque)
2013{
2014 struct intel_iommu *iommu = opaque;
2015
2016 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017}
2018
David Woodhousee1f167f2014-03-09 15:24:46 -07002019static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002020{
Weidong Han5331fe62008-12-08 23:00:00 +08002021 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002022 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08002023
David Woodhousee1f167f2014-03-09 15:24:46 -07002024 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08002025 if (!iommu)
2026 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002027
Alex Williamson579305f2014-07-03 09:51:43 -06002028 if (!dev_is_pci(dev))
2029 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07002030
Alex Williamson579305f2014-07-03 09:51:43 -06002031 return !pci_for_each_dma_alias(to_pci_dev(dev),
2032 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002033}
2034
Fenghua Yuf5329592009-08-04 15:09:37 -07002035/* Returns a number of VTD pages, but aligned to MM page size */
2036static inline unsigned long aligned_nrpages(unsigned long host_addr,
2037 size_t size)
2038{
2039 host_addr &= ~PAGE_MASK;
2040 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2041}
2042
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002043/* Return largest possible superpage level for a given mapping */
2044static inline int hardware_largepage_caps(struct dmar_domain *domain,
2045 unsigned long iov_pfn,
2046 unsigned long phy_pfn,
2047 unsigned long pages)
2048{
2049 int support, level = 1;
2050 unsigned long pfnmerge;
2051
2052 support = domain->iommu_superpage;
2053
2054 /* To use a large page, the virtual *and* physical addresses
2055 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2056 of them will mean we have to use smaller pages. So just
2057 merge them and check both at once. */
2058 pfnmerge = iov_pfn | phy_pfn;
2059
2060 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2061 pages >>= VTD_STRIDE_SHIFT;
2062 if (!pages)
2063 break;
2064 pfnmerge >>= VTD_STRIDE_SHIFT;
2065 level++;
2066 support--;
2067 }
2068 return level;
2069}
2070
David Woodhouse9051aa02009-06-29 12:30:54 +01002071static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2072 struct scatterlist *sg, unsigned long phys_pfn,
2073 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01002074{
2075 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01002076 phys_addr_t uninitialized_var(pteval);
Jiang Liucc4f14a2014-11-26 09:42:10 +08002077 unsigned long sg_res = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002078 unsigned int largepage_lvl = 0;
2079 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01002080
Jiang Liu162d1b12014-07-11 14:19:35 +08002081 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
David Woodhousee1605492009-06-29 11:17:38 +01002082
2083 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2084 return -EINVAL;
2085
2086 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2087
Jiang Liucc4f14a2014-11-26 09:42:10 +08002088 if (!sg) {
2089 sg_res = nr_pages;
David Woodhouse9051aa02009-06-29 12:30:54 +01002090 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2091 }
2092
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002093 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01002094 uint64_t tmp;
2095
David Woodhousee1605492009-06-29 11:17:38 +01002096 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07002097 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01002098 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2099 sg->dma_length = sg->length;
2100 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002101 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01002102 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002103
David Woodhousee1605492009-06-29 11:17:38 +01002104 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002105 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2106
David Woodhouse5cf0a762014-03-19 16:07:49 +00002107 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01002108 if (!pte)
2109 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002110 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002111 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002112 pteval |= DMA_PTE_LARGE_PAGE;
Jiang Liud41a4ad2014-07-11 14:19:34 +08002113 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2114 /*
2115 * Ensure that old small page tables are
2116 * removed to make room for superpage,
2117 * if they exist.
2118 */
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002119 dma_pte_free_pagetable(domain, iov_pfn,
Jiang Liud41a4ad2014-07-11 14:19:34 +08002120 iov_pfn + lvl_pages - 1);
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002121 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002122 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00002123 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002124
David Woodhousee1605492009-06-29 11:17:38 +01002125 }
2126 /* We don't need lock here, nobody else
2127 * touches the iova range
2128 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002129 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002130 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002131 static int dumps = 5;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002132 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2133 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002134 if (dumps) {
2135 dumps--;
2136 debug_dma_dump_mappings(NULL);
2137 }
2138 WARN_ON(1);
2139 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002140
2141 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2142
2143 BUG_ON(nr_pages < lvl_pages);
2144 BUG_ON(sg_res < lvl_pages);
2145
2146 nr_pages -= lvl_pages;
2147 iov_pfn += lvl_pages;
2148 phys_pfn += lvl_pages;
2149 pteval += lvl_pages * VTD_PAGE_SIZE;
2150 sg_res -= lvl_pages;
2151
2152 /* If the next PTE would be the first in a new page, then we
2153 need to flush the cache on the entries we've just written.
2154 And then we'll need to recalculate 'pte', so clear it and
2155 let it get set again in the if (!pte) block above.
2156
2157 If we're done (!nr_pages) we need to flush the cache too.
2158
2159 Also if we've been setting superpages, we may need to
2160 recalculate 'pte' and switch back to smaller pages for the
2161 end of the mapping, if the trailing size is not enough to
2162 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002163 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002164 if (!nr_pages || first_pte_in_page(pte) ||
2165 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002166 domain_flush_cache(domain, first_pte,
2167 (void *)pte - (void *)first_pte);
2168 pte = NULL;
2169 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002170
2171 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002172 sg = sg_next(sg);
2173 }
2174 return 0;
2175}
2176
David Woodhouse9051aa02009-06-29 12:30:54 +01002177static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2178 struct scatterlist *sg, unsigned long nr_pages,
2179 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002180{
David Woodhouse9051aa02009-06-29 12:30:54 +01002181 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2182}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002183
David Woodhouse9051aa02009-06-29 12:30:54 +01002184static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2185 unsigned long phys_pfn, unsigned long nr_pages,
2186 int prot)
2187{
2188 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002189}
2190
Weidong Hanc7151a82008-12-08 22:51:37 +08002191static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002192{
Weidong Hanc7151a82008-12-08 22:51:37 +08002193 if (!iommu)
2194 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002195
2196 clear_context_table(iommu, bus, devfn);
2197 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002198 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002199 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002200}
2201
David Woodhouse109b9b02012-05-25 17:43:02 +01002202static inline void unlink_domain_info(struct device_domain_info *info)
2203{
2204 assert_spin_locked(&device_domain_lock);
2205 list_del(&info->link);
2206 list_del(&info->global);
2207 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002208 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002209}
2210
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002211static void domain_remove_dev_info(struct dmar_domain *domain)
2212{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002213 struct device_domain_info *info, *tmp;
Jiang Liufb170fb2014-07-11 14:19:28 +08002214 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002215
2216 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002217 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002218 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219 spin_unlock_irqrestore(&device_domain_lock, flags);
2220
Yu Zhao93a23a72009-05-18 13:51:37 +08002221 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002222 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002223
Jiang Liuab8dfe22014-07-11 14:19:27 +08002224 if (domain_type_is_vm(domain)) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002225 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liufb170fb2014-07-11 14:19:28 +08002226 domain_detach_iommu(domain, info->iommu);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002227 }
2228
2229 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002230 spin_lock_irqsave(&device_domain_lock, flags);
2231 }
2232 spin_unlock_irqrestore(&device_domain_lock, flags);
2233}
2234
2235/*
2236 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002237 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002238 */
David Woodhouse1525a292014-03-06 16:19:30 +00002239static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002240{
2241 struct device_domain_info *info;
2242
2243 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002244 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002245 if (info)
2246 return info->domain;
2247 return NULL;
2248}
2249
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002250static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002251dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2252{
2253 struct device_domain_info *info;
2254
2255 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002256 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002257 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002258 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002259
2260 return NULL;
2261}
2262
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002263static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002264 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002265 struct device *dev,
2266 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002267{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002268 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002269 struct device_domain_info *info;
2270 unsigned long flags;
2271
2272 info = alloc_devinfo_mem();
2273 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002274 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002275
Jiang Liu745f2582014-02-19 14:07:26 +08002276 info->bus = bus;
2277 info->devfn = devfn;
2278 info->dev = dev;
2279 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002280 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002281
2282 spin_lock_irqsave(&device_domain_lock, flags);
2283 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002284 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002285 else {
2286 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002287 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002288 if (info2)
2289 found = info2->domain;
2290 }
Jiang Liu745f2582014-02-19 14:07:26 +08002291 if (found) {
2292 spin_unlock_irqrestore(&device_domain_lock, flags);
2293 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002294 /* Caller must free the original domain */
2295 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002296 }
2297
David Woodhouseb718cd32014-03-09 13:11:33 -07002298 list_add(&info->link, &domain->devices);
2299 list_add(&info->global, &device_domain_list);
2300 if (dev)
2301 dev->archdata.iommu = info;
2302 spin_unlock_irqrestore(&device_domain_lock, flags);
2303
2304 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002305}
2306
Alex Williamson579305f2014-07-03 09:51:43 -06002307static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2308{
2309 *(u16 *)opaque = alias;
2310 return 0;
2311}
2312
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002313/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002314static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002315{
Alex Williamson579305f2014-07-03 09:51:43 -06002316 struct dmar_domain *domain, *tmp;
2317 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002318 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002319 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002320 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002321 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002322
David Woodhouse146922e2014-03-09 15:44:17 -07002323 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002324 if (domain)
2325 return domain;
2326
David Woodhouse146922e2014-03-09 15:44:17 -07002327 iommu = device_to_iommu(dev, &bus, &devfn);
2328 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002329 return NULL;
2330
2331 if (dev_is_pci(dev)) {
2332 struct pci_dev *pdev = to_pci_dev(dev);
2333
2334 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2335
2336 spin_lock_irqsave(&device_domain_lock, flags);
2337 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2338 PCI_BUS_NUM(dma_alias),
2339 dma_alias & 0xff);
2340 if (info) {
2341 iommu = info->iommu;
2342 domain = info->domain;
2343 }
2344 spin_unlock_irqrestore(&device_domain_lock, flags);
2345
2346 /* DMA alias already has a domain, uses it */
2347 if (info)
2348 goto found_domain;
2349 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002350
David Woodhouse146922e2014-03-09 15:44:17 -07002351 /* Allocate and initialize new domain for the device */
Jiang Liuab8dfe22014-07-11 14:19:27 +08002352 domain = alloc_domain(0);
Jiang Liu745f2582014-02-19 14:07:26 +08002353 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002354 return NULL;
Jiang Liu44bde612014-07-11 14:19:29 +08002355 domain->id = iommu_attach_domain(domain, iommu);
2356 if (domain->id < 0) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002357 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002358 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002359 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002360 domain_attach_iommu(domain, iommu);
Alex Williamson579305f2014-07-03 09:51:43 -06002361 if (domain_init(domain, gaw)) {
2362 domain_exit(domain);
2363 return NULL;
2364 }
2365
2366 /* register PCI DMA alias device */
2367 if (dev_is_pci(dev)) {
2368 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2369 dma_alias & 0xff, NULL, domain);
2370
2371 if (!tmp || tmp != domain) {
2372 domain_exit(domain);
2373 domain = tmp;
2374 }
2375
David Woodhouseb718cd32014-03-09 13:11:33 -07002376 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002377 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378 }
2379
2380found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002381 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2382
2383 if (!tmp || tmp != domain) {
2384 domain_exit(domain);
2385 domain = tmp;
2386 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002387
2388 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389}
2390
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002391static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002392#define IDENTMAP_ALL 1
2393#define IDENTMAP_GFX 2
2394#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002395
David Woodhouseb2132032009-06-26 18:50:28 +01002396static int iommu_domain_identity_map(struct dmar_domain *domain,
2397 unsigned long long start,
2398 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399{
David Woodhousec5395d52009-06-28 16:35:56 +01002400 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2401 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002402
David Woodhousec5395d52009-06-28 16:35:56 +01002403 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2404 dma_to_mm_pfn(last_vpfn))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002405 pr_err("Reserving iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002406 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002407 }
2408
David Woodhousec5395d52009-06-28 16:35:56 +01002409 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2410 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411 /*
2412 * RMRR range might have overlap with physical memory range,
2413 * clear it first
2414 */
David Woodhousec5395d52009-06-28 16:35:56 +01002415 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002416
David Woodhousec5395d52009-06-28 16:35:56 +01002417 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2418 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002419 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002420}
2421
David Woodhouse0b9d9752014-03-09 15:48:15 -07002422static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002423 unsigned long long start,
2424 unsigned long long end)
2425{
2426 struct dmar_domain *domain;
2427 int ret;
2428
David Woodhouse0b9d9752014-03-09 15:48:15 -07002429 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002430 if (!domain)
2431 return -ENOMEM;
2432
David Woodhouse19943b02009-08-04 16:19:20 +01002433 /* For _hardware_ passthrough, don't bother. But for software
2434 passthrough, we do it anyway -- it may indicate a memory
2435 range which is reserved in E820, so which didn't get set
2436 up to start with in si_domain */
2437 if (domain == si_domain && hw_pass_through) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002438 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2439 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002440 return 0;
2441 }
2442
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002443 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2444 dev_name(dev), start, end);
2445
David Woodhouse5595b522009-12-02 09:21:55 +00002446 if (end < start) {
2447 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2448 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2449 dmi_get_system_info(DMI_BIOS_VENDOR),
2450 dmi_get_system_info(DMI_BIOS_VERSION),
2451 dmi_get_system_info(DMI_PRODUCT_VERSION));
2452 ret = -EIO;
2453 goto error;
2454 }
2455
David Woodhouse2ff729f2009-08-26 14:25:41 +01002456 if (end >> agaw_to_width(domain->agaw)) {
2457 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2458 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2459 agaw_to_width(domain->agaw),
2460 dmi_get_system_info(DMI_BIOS_VENDOR),
2461 dmi_get_system_info(DMI_BIOS_VERSION),
2462 dmi_get_system_info(DMI_PRODUCT_VERSION));
2463 ret = -EIO;
2464 goto error;
2465 }
David Woodhouse19943b02009-08-04 16:19:20 +01002466
David Woodhouseb2132032009-06-26 18:50:28 +01002467 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002468 if (ret)
2469 goto error;
2470
2471 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002472 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002473 if (ret)
2474 goto error;
2475
2476 return 0;
2477
2478 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002479 domain_exit(domain);
2480 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002481}
2482
2483static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002484 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002485{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002486 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002487 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002488 return iommu_prepare_identity_map(dev, rmrr->base_address,
2489 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002490}
2491
Suresh Siddhad3f13812011-08-23 17:05:25 -07002492#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002493static inline void iommu_prepare_isa(void)
2494{
2495 struct pci_dev *pdev;
2496 int ret;
2497
2498 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2499 if (!pdev)
2500 return;
2501
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002502 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002503 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002504
2505 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002506 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002507
Yijing Wang9b27e822014-05-20 20:37:52 +08002508 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002509}
2510#else
2511static inline void iommu_prepare_isa(void)
2512{
2513 return;
2514}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002515#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002516
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002517static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002518
Matt Kraai071e1372009-08-23 22:30:22 -07002519static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002520{
2521 struct dmar_drhd_unit *drhd;
2522 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002523 int nid, ret = 0;
Jiang Liu44bde612014-07-11 14:19:29 +08002524 bool first = true;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002525
Jiang Liuab8dfe22014-07-11 14:19:27 +08002526 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002527 if (!si_domain)
2528 return -EFAULT;
2529
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002530 for_each_active_iommu(iommu, drhd) {
2531 ret = iommu_attach_domain(si_domain, iommu);
Jiang Liufb170fb2014-07-11 14:19:28 +08002532 if (ret < 0) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002533 domain_exit(si_domain);
2534 return -EFAULT;
Jiang Liu44bde612014-07-11 14:19:29 +08002535 } else if (first) {
2536 si_domain->id = ret;
2537 first = false;
2538 } else if (si_domain->id != ret) {
2539 domain_exit(si_domain);
2540 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002541 }
Jiang Liufb170fb2014-07-11 14:19:28 +08002542 domain_attach_iommu(si_domain, iommu);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002543 }
2544
2545 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2546 domain_exit(si_domain);
2547 return -EFAULT;
2548 }
2549
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002550 pr_debug("Identity mapping domain is domain %d\n",
Jiang Liu9544c002014-01-06 14:18:13 +08002551 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002552
David Woodhouse19943b02009-08-04 16:19:20 +01002553 if (hw)
2554 return 0;
2555
David Woodhousec7ab48d2009-06-26 19:10:36 +01002556 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002557 unsigned long start_pfn, end_pfn;
2558 int i;
2559
2560 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2561 ret = iommu_domain_identity_map(si_domain,
2562 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2563 if (ret)
2564 return ret;
2565 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002566 }
2567
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002568 return 0;
2569}
2570
David Woodhouse9b226622014-03-09 14:03:28 -07002571static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002572{
2573 struct device_domain_info *info;
2574
2575 if (likely(!iommu_identity_mapping))
2576 return 0;
2577
David Woodhouse9b226622014-03-09 14:03:28 -07002578 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002579 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2580 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002581
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002582 return 0;
2583}
2584
2585static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002586 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002587{
David Woodhouse0ac72662014-03-09 13:19:22 -07002588 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002589 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002590 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002591 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002592
David Woodhouse5913c9b2014-03-09 16:27:31 -07002593 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002594 if (!iommu)
2595 return -ENODEV;
2596
David Woodhouse5913c9b2014-03-09 16:27:31 -07002597 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002598 if (ndomain != domain)
2599 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002600
David Woodhouse5913c9b2014-03-09 16:27:31 -07002601 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002602 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002603 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002604 return ret;
2605 }
2606
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002607 return 0;
2608}
2609
David Woodhouse0b9d9752014-03-09 15:48:15 -07002610static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002611{
2612 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002613 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002614 int i;
2615
Jiang Liu0e242612014-02-19 14:07:34 +08002616 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002617 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002618 /*
2619 * Return TRUE if this RMRR contains the device that
2620 * is passed in.
2621 */
2622 for_each_active_dev_scope(rmrr->devices,
2623 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002624 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002625 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002626 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002627 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002628 }
Jiang Liu0e242612014-02-19 14:07:34 +08002629 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002630 return false;
2631}
2632
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002633/*
2634 * There are a couple cases where we need to restrict the functionality of
2635 * devices associated with RMRRs. The first is when evaluating a device for
2636 * identity mapping because problems exist when devices are moved in and out
2637 * of domains and their respective RMRR information is lost. This means that
2638 * a device with associated RMRRs will never be in a "passthrough" domain.
2639 * The second is use of the device through the IOMMU API. This interface
2640 * expects to have full control of the IOVA space for the device. We cannot
2641 * satisfy both the requirement that RMRR access is maintained and have an
2642 * unencumbered IOVA space. We also have no ability to quiesce the device's
2643 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2644 * We therefore prevent devices associated with an RMRR from participating in
2645 * the IOMMU API, which eliminates them from device assignment.
2646 *
2647 * In both cases we assume that PCI USB devices with RMRRs have them largely
2648 * for historical reasons and that the RMRR space is not actively used post
2649 * boot. This exclusion may change if vendors begin to abuse it.
David Woodhouse18436af2015-03-25 15:05:47 +00002650 *
2651 * The same exception is made for graphics devices, with the requirement that
2652 * any use of the RMRR regions will be torn down before assigning the device
2653 * to a guest.
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002654 */
2655static bool device_is_rmrr_locked(struct device *dev)
2656{
2657 if (!device_has_rmrr(dev))
2658 return false;
2659
2660 if (dev_is_pci(dev)) {
2661 struct pci_dev *pdev = to_pci_dev(dev);
2662
David Woodhouse18436af2015-03-25 15:05:47 +00002663 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002664 return false;
2665 }
2666
2667 return true;
2668}
2669
David Woodhouse3bdb2592014-03-09 16:03:08 -07002670static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002671{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002672
David Woodhouse3bdb2592014-03-09 16:03:08 -07002673 if (dev_is_pci(dev)) {
2674 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002675
Alex Williamsonc875d2c2014-07-03 09:57:02 -06002676 if (device_is_rmrr_locked(dev))
David Woodhouse3bdb2592014-03-09 16:03:08 -07002677 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002678
David Woodhouse3bdb2592014-03-09 16:03:08 -07002679 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2680 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002681
David Woodhouse3bdb2592014-03-09 16:03:08 -07002682 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2683 return 1;
2684
2685 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2686 return 0;
2687
2688 /*
2689 * We want to start off with all devices in the 1:1 domain, and
2690 * take them out later if we find they can't access all of memory.
2691 *
2692 * However, we can't do this for PCI devices behind bridges,
2693 * because all PCI devices behind the same bridge will end up
2694 * with the same source-id on their transactions.
2695 *
2696 * Practically speaking, we can't change things around for these
2697 * devices at run-time, because we can't be sure there'll be no
2698 * DMA transactions in flight for any of their siblings.
2699 *
2700 * So PCI devices (unless they're on the root bus) as well as
2701 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2702 * the 1:1 domain, just in _case_ one of their siblings turns out
2703 * not to be able to map all of memory.
2704 */
2705 if (!pci_is_pcie(pdev)) {
2706 if (!pci_is_root_bus(pdev->bus))
2707 return 0;
2708 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2709 return 0;
2710 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2711 return 0;
2712 } else {
2713 if (device_has_rmrr(dev))
2714 return 0;
2715 }
David Woodhouse6941af22009-07-04 18:24:27 +01002716
David Woodhouse3dfc8132009-07-04 19:11:08 +01002717 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002718 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002719 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002720 * take them out of the 1:1 domain later.
2721 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002722 if (!startup) {
2723 /*
2724 * If the device's dma_mask is less than the system's memory
2725 * size then this is not a candidate for identity mapping.
2726 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002727 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002728
David Woodhouse3bdb2592014-03-09 16:03:08 -07002729 if (dev->coherent_dma_mask &&
2730 dev->coherent_dma_mask < dma_mask)
2731 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002732
David Woodhouse3bdb2592014-03-09 16:03:08 -07002733 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002734 }
David Woodhouse6941af22009-07-04 18:24:27 +01002735
2736 return 1;
2737}
2738
David Woodhousecf04eee2014-03-21 16:49:04 +00002739static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2740{
2741 int ret;
2742
2743 if (!iommu_should_identity_map(dev, 1))
2744 return 0;
2745
2746 ret = domain_add_dev_info(si_domain, dev,
2747 hw ? CONTEXT_TT_PASS_THROUGH :
2748 CONTEXT_TT_MULTI_LEVEL);
2749 if (!ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002750 pr_info("%s identity mapping for device %s\n",
2751 hw ? "Hardware" : "Software", dev_name(dev));
David Woodhousecf04eee2014-03-21 16:49:04 +00002752 else if (ret == -ENODEV)
2753 /* device not associated with an iommu */
2754 ret = 0;
2755
2756 return ret;
2757}
2758
2759
Matt Kraai071e1372009-08-23 22:30:22 -07002760static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002761{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002762 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002763 struct dmar_drhd_unit *drhd;
2764 struct intel_iommu *iommu;
2765 struct device *dev;
2766 int i;
2767 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002768
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002769 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002770 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2771 if (ret)
2772 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002773 }
2774
David Woodhousecf04eee2014-03-21 16:49:04 +00002775 for_each_active_iommu(iommu, drhd)
2776 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2777 struct acpi_device_physical_node *pn;
2778 struct acpi_device *adev;
2779
2780 if (dev->bus != &acpi_bus_type)
2781 continue;
Joerg Roedel86080cc2015-06-12 12:27:16 +02002782
David Woodhousecf04eee2014-03-21 16:49:04 +00002783 adev= to_acpi_device(dev);
2784 mutex_lock(&adev->physical_node_lock);
2785 list_for_each_entry(pn, &adev->physical_node_list, node) {
2786 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2787 if (ret)
2788 break;
2789 }
2790 mutex_unlock(&adev->physical_node_lock);
2791 if (ret)
2792 return ret;
2793 }
2794
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002795 return 0;
2796}
2797
Jiang Liuffebeb42014-11-09 22:48:02 +08002798static void intel_iommu_init_qi(struct intel_iommu *iommu)
2799{
2800 /*
2801 * Start from the sane iommu hardware state.
2802 * If the queued invalidation is already initialized by us
2803 * (for example, while enabling interrupt-remapping) then
2804 * we got the things already rolling from a sane state.
2805 */
2806 if (!iommu->qi) {
2807 /*
2808 * Clear any previous faults.
2809 */
2810 dmar_fault(-1, iommu);
2811 /*
2812 * Disable queued invalidation if supported and already enabled
2813 * before OS handover.
2814 */
2815 dmar_disable_qi(iommu);
2816 }
2817
2818 if (dmar_enable_qi(iommu)) {
2819 /*
2820 * Queued Invalidate not enabled, use Register Based Invalidate
2821 */
2822 iommu->flush.flush_context = __iommu_flush_context;
2823 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002824 pr_info("%s: Using Register based invalidation\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08002825 iommu->name);
2826 } else {
2827 iommu->flush.flush_context = qi_flush_context;
2828 iommu->flush.flush_iotlb = qi_flush_iotlb;
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02002829 pr_info("%s: Using Queued invalidation\n", iommu->name);
Jiang Liuffebeb42014-11-09 22:48:02 +08002830 }
2831}
2832
Joerg Roedel091d42e2015-06-12 11:56:10 +02002833static int copy_context_table(struct intel_iommu *iommu,
2834 struct root_entry *old_re,
2835 struct context_entry **tbl,
2836 int bus, bool ext)
2837{
2838 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002839 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002840 phys_addr_t old_ce_phys;
2841
2842 tbl_idx = ext ? bus * 2 : bus;
2843
2844 for (devfn = 0; devfn < 256; devfn++) {
2845 /* First calculate the correct index */
2846 idx = (ext ? devfn * 2 : devfn) % 256;
2847
2848 if (idx == 0) {
2849 /* First save what we may have and clean up */
2850 if (new_ce) {
2851 tbl[tbl_idx] = new_ce;
2852 __iommu_flush_cache(iommu, new_ce,
2853 VTD_PAGE_SIZE);
2854 pos = 1;
2855 }
2856
2857 if (old_ce)
2858 iounmap(old_ce);
2859
2860 ret = 0;
2861 if (devfn < 0x80)
2862 old_ce_phys = root_entry_lctp(old_re);
2863 else
2864 old_ce_phys = root_entry_uctp(old_re);
2865
2866 if (!old_ce_phys) {
2867 if (ext && devfn == 0) {
2868 /* No LCTP, try UCTP */
2869 devfn = 0x7f;
2870 continue;
2871 } else {
2872 goto out;
2873 }
2874 }
2875
2876 ret = -ENOMEM;
2877 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2878 if (!old_ce)
2879 goto out;
2880
2881 new_ce = alloc_pgtable_page(iommu->node);
2882 if (!new_ce)
2883 goto out_unmap;
2884
2885 ret = 0;
2886 }
2887
2888 /* Now copy the context entry */
2889 ce = old_ce[idx];
2890
Joerg Roedelcf484d02015-06-12 12:21:46 +02002891 if (!__context_present(&ce))
Joerg Roedel091d42e2015-06-12 11:56:10 +02002892 continue;
2893
Joerg Roedeldbcd8612015-06-12 12:02:09 +02002894 did = context_domain_id(&ce);
2895 if (did >= 0 && did < cap_ndoms(iommu->cap))
2896 set_bit(did, iommu->domain_ids);
2897
Joerg Roedelcf484d02015-06-12 12:21:46 +02002898 /*
2899 * We need a marker for copied context entries. This
2900 * marker needs to work for the old format as well as
2901 * for extended context entries.
2902 *
2903 * Bit 67 of the context entry is used. In the old
2904 * format this bit is available to software, in the
2905 * extended format it is the PGE bit, but PGE is ignored
2906 * by HW if PASIDs are disabled (and thus still
2907 * available).
2908 *
2909 * So disable PASIDs first and then mark the entry
2910 * copied. This means that we don't copy PASID
2911 * translations from the old kernel, but this is fine as
2912 * faults there are not fatal.
2913 */
2914 context_clear_pasid_enable(&ce);
2915 context_set_copied(&ce);
2916
Joerg Roedel091d42e2015-06-12 11:56:10 +02002917 new_ce[idx] = ce;
2918 }
2919
2920 tbl[tbl_idx + pos] = new_ce;
2921
2922 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2923
2924out_unmap:
2925 iounmap(old_ce);
2926
2927out:
2928 return ret;
2929}
2930
2931static int copy_translation_tables(struct intel_iommu *iommu)
2932{
2933 struct context_entry **ctxt_tbls;
2934 struct root_entry *old_rt;
2935 phys_addr_t old_rt_phys;
2936 int ctxt_table_entries;
2937 unsigned long flags;
2938 u64 rtaddr_reg;
2939 int bus, ret;
Joerg Roedelc3361f22015-06-12 12:39:25 +02002940 bool new_ext, ext;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002941
2942 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2943 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
Joerg Roedelc3361f22015-06-12 12:39:25 +02002944 new_ext = !!ecap_ecs(iommu->ecap);
2945
2946 /*
2947 * The RTT bit can only be changed when translation is disabled,
2948 * but disabling translation means to open a window for data
2949 * corruption. So bail out and don't copy anything if we would
2950 * have to change the bit.
2951 */
2952 if (new_ext != ext)
2953 return -EINVAL;
Joerg Roedel091d42e2015-06-12 11:56:10 +02002954
2955 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2956 if (!old_rt_phys)
2957 return -EINVAL;
2958
2959 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2960 if (!old_rt)
2961 return -ENOMEM;
2962
2963 /* This is too big for the stack - allocate it from slab */
2964 ctxt_table_entries = ext ? 512 : 256;
2965 ret = -ENOMEM;
2966 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2967 if (!ctxt_tbls)
2968 goto out_unmap;
2969
2970 for (bus = 0; bus < 256; bus++) {
2971 ret = copy_context_table(iommu, &old_rt[bus],
2972 ctxt_tbls, bus, ext);
2973 if (ret) {
2974 pr_err("%s: Failed to copy context table for bus %d\n",
2975 iommu->name, bus);
2976 continue;
2977 }
2978 }
2979
2980 spin_lock_irqsave(&iommu->lock, flags);
2981
2982 /* Context tables are copied, now write them to the root_entry table */
2983 for (bus = 0; bus < 256; bus++) {
2984 int idx = ext ? bus * 2 : bus;
2985 u64 val;
2986
2987 if (ctxt_tbls[idx]) {
2988 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2989 iommu->root_entry[bus].lo = val;
2990 }
2991
2992 if (!ext || !ctxt_tbls[idx + 1])
2993 continue;
2994
2995 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2996 iommu->root_entry[bus].hi = val;
2997 }
2998
2999 spin_unlock_irqrestore(&iommu->lock, flags);
3000
3001 kfree(ctxt_tbls);
3002
3003 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3004
3005 ret = 0;
3006
3007out_unmap:
3008 iounmap(old_rt);
3009
3010 return ret;
3011}
3012
Joseph Cihulab7792602011-05-03 00:08:37 -07003013static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003014{
3015 struct dmar_drhd_unit *drhd;
3016 struct dmar_rmrr_unit *rmrr;
Joerg Roedela87f4912015-06-12 12:32:54 +02003017 bool copied_tables = false;
David Woodhouse832bd852014-03-07 15:08:36 +00003018 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07003020 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003021
3022 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023 * for each drhd
3024 * allocate root
3025 * initialize and program root entry to not present
3026 * endfor
3027 */
3028 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08003029 /*
3030 * lock not needed as this is only incremented in the single
3031 * threaded kernel __init code path all other access are read
3032 * only
3033 */
Jiang Liu78d8e702014-11-09 22:47:57 +08003034 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
Mike Travis1b198bb2012-03-05 15:05:16 -08003035 g_num_of_iommus++;
3036 continue;
3037 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003038 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08003039 }
3040
Jiang Liuffebeb42014-11-09 22:48:02 +08003041 /* Preallocate enough resources for IOMMU hot-addition */
3042 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3043 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3044
Weidong Hand9630fe2008-12-08 11:06:32 +08003045 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3046 GFP_KERNEL);
3047 if (!g_iommus) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003048 pr_err("Allocating global iommu array failed\n");
Weidong Hand9630fe2008-12-08 11:06:32 +08003049 ret = -ENOMEM;
3050 goto error;
3051 }
3052
mark gross80b20dd2008-04-18 13:53:58 -07003053 deferred_flush = kzalloc(g_num_of_iommus *
3054 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3055 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08003056 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08003057 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08003058 }
3059
Jiang Liu7c919772014-01-06 14:18:18 +08003060 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08003061 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003062
Joerg Roedelb63d80d2015-06-12 09:14:34 +02003063 intel_iommu_init_qi(iommu);
3064
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003065 ret = iommu_init_domains(iommu);
3066 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003067 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07003068
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003069 init_translation_status(iommu);
3070
Joerg Roedel091d42e2015-06-12 11:56:10 +02003071 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3072 iommu_disable_translation(iommu);
3073 clear_translation_pre_enabled(iommu);
3074 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3075 iommu->name);
3076 }
Joerg Roedel4158c2e2015-06-12 10:14:02 +02003077
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003078 /*
3079 * TBD:
3080 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003081 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003082 */
3083 ret = iommu_alloc_root_entry(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003084 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003085 goto free_iommu;
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003086
Joerg Roedel091d42e2015-06-12 11:56:10 +02003087 if (translation_pre_enabled(iommu)) {
3088 pr_info("Translation already enabled - trying to copy translation structures\n");
3089
3090 ret = copy_translation_tables(iommu);
3091 if (ret) {
3092 /*
3093 * We found the IOMMU with translation
3094 * enabled - but failed to copy over the
3095 * old root-entry table. Try to proceed
3096 * by disabling translation now and
3097 * allocating a clean root-entry table.
3098 * This might cause DMAR faults, but
3099 * probably the dump will still succeed.
3100 */
3101 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3102 iommu->name);
3103 iommu_disable_translation(iommu);
3104 clear_translation_pre_enabled(iommu);
3105 } else {
3106 pr_info("Copied translation tables from previous kernel for %s\n",
3107 iommu->name);
Joerg Roedela87f4912015-06-12 12:32:54 +02003108 copied_tables = true;
Joerg Roedel091d42e2015-06-12 11:56:10 +02003109 }
3110 }
3111
Joerg Roedel5f0a7f72015-06-12 09:18:53 +02003112 iommu_flush_write_buffer(iommu);
3113 iommu_set_root_entry(iommu);
3114 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3115 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3116
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003117 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01003118 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003119 }
3120
David Woodhouse19943b02009-08-04 16:19:20 +01003121 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07003122 iommu_identity_mapping |= IDENTMAP_ALL;
3123
Suresh Siddhad3f13812011-08-23 17:05:25 -07003124#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07003125 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01003126#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07003127
Joerg Roedel86080cc2015-06-12 12:27:16 +02003128 if (iommu_identity_mapping) {
3129 ret = si_domain_init(hw_pass_through);
3130 if (ret)
3131 goto free_iommu;
3132 }
3133
David Woodhousee0fc7e02009-09-30 09:12:17 -07003134 check_tylersburg_isoch();
3135
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003136 /*
Joerg Roedela87f4912015-06-12 12:32:54 +02003137 * If we copied translations from a previous kernel in the kdump
3138 * case, we can not assign the devices to domains now, as that
3139 * would eliminate the old mappings. So skip this part and defer
3140 * the assignment to device driver initialization time.
3141 */
3142 if (copied_tables)
3143 goto domains_done;
3144
3145 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003146 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003147 * identity mappings for rmrr, gfx, and isa and may fall back to static
3148 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003149 */
David Woodhouse19943b02009-08-04 16:19:20 +01003150 if (iommu_identity_mapping) {
3151 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3152 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003153 pr_crit("Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08003154 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003155 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003156 }
David Woodhouse19943b02009-08-04 16:19:20 +01003157 /*
3158 * For each rmrr
3159 * for each dev attached to rmrr
3160 * do
3161 * locate drhd for dev, alloc domain for dev
3162 * allocate free domain
3163 * allocate page table entries for rmrr
3164 * if context not allocated for bus
3165 * allocate and init context
3166 * set present in root table for this bus
3167 * init context with domain, translation etc
3168 * endfor
3169 * endfor
3170 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003171 pr_info("Setting RMRR:\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003172 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08003173 /* some BIOS lists non-exist devices in DMAR table. */
3174 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00003175 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07003176 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01003177 if (ret)
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003178 pr_err("Mapping reserved region failed\n");
David Woodhouse19943b02009-08-04 16:19:20 +01003179 }
3180 }
3181
3182 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07003183
Joerg Roedela87f4912015-06-12 12:32:54 +02003184domains_done:
3185
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186 /*
3187 * for each drhd
3188 * enable fault log
3189 * global invalidate context cache
3190 * global invalidate iotlb
3191 * enable translation
3192 */
Jiang Liu7c919772014-01-06 14:18:18 +08003193 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07003194 if (drhd->ignored) {
3195 /*
3196 * we always have to disable PMRs or DMA may fail on
3197 * this device
3198 */
3199 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08003200 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003201 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003202 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003203
3204 iommu_flush_write_buffer(iommu);
3205
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003206 ret = dmar_set_interrupt(iommu);
3207 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08003208 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07003209
Joerg Roedel8939ddf2015-06-12 14:40:01 +02003210 if (!translation_pre_enabled(iommu))
3211 iommu_enable_translation(iommu);
3212
David Woodhouseb94996c2009-09-19 15:28:12 -07003213 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003214 }
3215
3216 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08003217
3218free_iommu:
Jiang Liuffebeb42014-11-09 22:48:02 +08003219 for_each_active_iommu(iommu, drhd) {
3220 disable_dmar_iommu(iommu);
Jiang Liua868e6b2014-01-06 14:18:20 +08003221 free_dmar_iommu(iommu);
Jiang Liuffebeb42014-11-09 22:48:02 +08003222 }
Jiang Liu9bdc5312014-01-06 14:18:27 +08003223 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08003224free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08003225 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08003226error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003227 return ret;
3228}
3229
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003230/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01003231static struct iova *intel_alloc_iova(struct device *dev,
3232 struct dmar_domain *domain,
3233 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003234{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003235 struct iova *iova = NULL;
3236
David Woodhouse875764d2009-06-28 21:20:51 +01003237 /* Restrict dma_mask to the width that the iommu can handle */
3238 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3239
3240 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003241 /*
3242 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07003243 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08003244 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003245 */
David Woodhouse875764d2009-06-28 21:20:51 +01003246 iova = alloc_iova(&domain->iovad, nrpages,
3247 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3248 if (iova)
3249 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003250 }
David Woodhouse875764d2009-06-28 21:20:51 +01003251 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3252 if (unlikely(!iova)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003253 pr_err("Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07003254 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003255 return NULL;
3256 }
3257
3258 return iova;
3259}
3260
David Woodhoused4b709f2014-03-09 16:07:40 -07003261static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262{
3263 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003264 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003265
David Woodhoused4b709f2014-03-09 16:07:40 -07003266 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003267 if (!domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003268 pr_err("Allocating domain for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003269 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003270 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003271 }
3272
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003273 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07003274 if (unlikely(!domain_context_mapped(dev))) {
3275 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003276 if (ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003277 pr_err("Domain context map for %s failed\n",
David Woodhoused4b709f2014-03-09 16:07:40 -07003278 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00003279 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003280 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003281 }
3282
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003283 return domain;
3284}
3285
David Woodhoused4b709f2014-03-09 16:07:40 -07003286static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01003287{
3288 struct device_domain_info *info;
3289
3290 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07003291 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01003292 if (likely(info))
3293 return info->domain;
3294
3295 return __get_valid_domain_for_dev(dev);
3296}
3297
David Woodhouseecb509e2014-03-09 16:29:55 -07003298/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01003299static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003300{
3301 int found;
3302
David Woodhouse3d891942014-03-06 15:59:26 +00003303 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003304 return 1;
3305
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003306 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003307 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003308
David Woodhouse9b226622014-03-09 14:03:28 -07003309 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003310 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07003311 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003312 return 1;
3313 else {
3314 /*
3315 * 32 bit DMA is removed from si_domain and fall back
3316 * to non-identity mapping.
3317 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003318 domain_remove_one_dev_info(si_domain, dev);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003319 pr_info("32bit %s uses non-identity mapping\n",
3320 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003321 return 0;
3322 }
3323 } else {
3324 /*
3325 * In case of a detached 64 bit DMA device from vm, the device
3326 * is put into si_domain for identity mapping.
3327 */
David Woodhouseecb509e2014-03-09 16:29:55 -07003328 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003329 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07003330 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01003331 hw_pass_through ?
3332 CONTEXT_TT_PASS_THROUGH :
3333 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003334 if (!ret) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003335 pr_info("64bit %s uses identity mapping\n",
3336 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003337 return 1;
3338 }
3339 }
3340 }
3341
David Woodhouse1e4c64c2009-07-04 10:40:38 +01003342 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003343}
3344
David Woodhouse5040a912014-03-09 16:14:00 -07003345static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003346 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003347{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003348 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003349 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003350 struct iova *iova;
3351 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003352 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08003353 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07003354 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003355
3356 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003357
David Woodhouse5040a912014-03-09 16:14:00 -07003358 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003359 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003360
David Woodhouse5040a912014-03-09 16:14:00 -07003361 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003362 if (!domain)
3363 return 0;
3364
Weidong Han8c11e792008-12-08 15:29:22 +08003365 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003366 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003367
David Woodhouse5040a912014-03-09 16:14:00 -07003368 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003369 if (!iova)
3370 goto error;
3371
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003372 /*
3373 * Check if DMAR supports zero-length reads on write only
3374 * mappings..
3375 */
3376 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003377 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003378 prot |= DMA_PTE_READ;
3379 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3380 prot |= DMA_PTE_WRITE;
3381 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003382 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003383 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003384 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003385 * is not a big problem
3386 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003387 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003388 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003389 if (ret)
3390 goto error;
3391
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003392 /* it's a non-present to present mapping. Only flush if caching mode */
3393 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003394 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003395 else
Weidong Han8c11e792008-12-08 15:29:22 +08003396 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003397
David Woodhouse03d6a242009-06-28 15:33:46 +01003398 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3399 start_paddr += paddr & ~PAGE_MASK;
3400 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003401
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003402error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003403 if (iova)
3404 __free_iova(&domain->iovad, iova);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003405 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003406 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003407 return 0;
3408}
3409
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003410static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3411 unsigned long offset, size_t size,
3412 enum dma_data_direction dir,
3413 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003414{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003415 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003416 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003417}
3418
mark gross5e0d2a62008-03-04 15:22:08 -08003419static void flush_unmaps(void)
3420{
mark gross80b20dd2008-04-18 13:53:58 -07003421 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003422
mark gross5e0d2a62008-03-04 15:22:08 -08003423 timer_on = 0;
3424
3425 /* just flush them all */
3426 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003427 struct intel_iommu *iommu = g_iommus[i];
3428 if (!iommu)
3429 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003430
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003431 if (!deferred_flush[i].next)
3432 continue;
3433
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003434 /* In caching mode, global flushes turn emulation expensive */
3435 if (!cap_caching_mode(iommu->cap))
3436 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003437 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003438 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003439 unsigned long mask;
3440 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003441 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003442
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003443 /* On real hardware multiple invalidations are expensive */
3444 if (cap_caching_mode(iommu->cap))
3445 iommu_flush_iotlb_psi(iommu, domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08003446 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00003447 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003448 else {
Jiang Liua156ef92014-07-11 14:19:36 +08003449 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003450 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3451 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3452 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003453 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003454 if (deferred_flush[i].freelist[j])
3455 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003456 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003457 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003458 }
3459
mark gross5e0d2a62008-03-04 15:22:08 -08003460 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003461}
3462
3463static void flush_unmaps_timeout(unsigned long data)
3464{
mark gross80b20dd2008-04-18 13:53:58 -07003465 unsigned long flags;
3466
3467 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003468 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003469 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003470}
3471
David Woodhouseea8ea462014-03-05 17:09:32 +00003472static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003473{
3474 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003475 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003476 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003477
3478 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003479 if (list_size == HIGH_WATER_MARK)
3480 flush_unmaps();
3481
Weidong Han8c11e792008-12-08 15:29:22 +08003482 iommu = domain_get_iommu(dom);
3483 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003484
mark gross80b20dd2008-04-18 13:53:58 -07003485 next = deferred_flush[iommu_id].next;
3486 deferred_flush[iommu_id].domain[next] = dom;
3487 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003488 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003489 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003490
3491 if (!timer_on) {
3492 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3493 timer_on = 1;
3494 }
3495 list_size++;
3496 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3497}
3498
Jiang Liud41a4ad2014-07-11 14:19:34 +08003499static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003500{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003501 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003502 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003503 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003504 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003505 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003506
David Woodhouse73676832009-07-04 14:08:36 +01003507 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003508 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003509
David Woodhouse1525a292014-03-06 16:19:30 +00003510 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003511 BUG_ON(!domain);
3512
Weidong Han8c11e792008-12-08 15:29:22 +08003513 iommu = domain_get_iommu(domain);
3514
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003515 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003516 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3517 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003518 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003519
David Woodhoused794dc92009-06-28 00:27:49 +01003520 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3521 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003522
David Woodhoused794dc92009-06-28 00:27:49 +01003523 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003524 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003525
David Woodhouseea8ea462014-03-05 17:09:32 +00003526 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003527
mark gross5e0d2a62008-03-04 15:22:08 -08003528 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003529 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003530 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003531 /* free iova */
3532 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003533 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003534 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003535 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003536 /*
3537 * queue up the release of the unmap to save the 1/6th of the
3538 * cpu used up by the iotlb flush operation...
3539 */
mark gross5e0d2a62008-03-04 15:22:08 -08003540 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003541}
3542
Jiang Liud41a4ad2014-07-11 14:19:34 +08003543static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3544 size_t size, enum dma_data_direction dir,
3545 struct dma_attrs *attrs)
3546{
3547 intel_unmap(dev, dev_addr);
3548}
3549
David Woodhouse5040a912014-03-09 16:14:00 -07003550static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003551 dma_addr_t *dma_handle, gfp_t flags,
3552 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003553{
Akinobu Mita36746432014-06-04 16:06:51 -07003554 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003555 int order;
3556
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003557 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003558 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003559
David Woodhouse5040a912014-03-09 16:14:00 -07003560 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003561 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003562 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3563 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003564 flags |= GFP_DMA;
3565 else
3566 flags |= GFP_DMA32;
3567 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003568
Akinobu Mita36746432014-06-04 16:06:51 -07003569 if (flags & __GFP_WAIT) {
3570 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003571
Akinobu Mita36746432014-06-04 16:06:51 -07003572 page = dma_alloc_from_contiguous(dev, count, order);
3573 if (page && iommu_no_mapping(dev) &&
3574 page_to_phys(page) + size > dev->coherent_dma_mask) {
3575 dma_release_from_contiguous(dev, page, count);
3576 page = NULL;
3577 }
3578 }
3579
3580 if (!page)
3581 page = alloc_pages(flags, order);
3582 if (!page)
3583 return NULL;
3584 memset(page_address(page), 0, size);
3585
3586 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003587 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003588 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003589 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003590 return page_address(page);
3591 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3592 __free_pages(page, order);
3593
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003594 return NULL;
3595}
3596
David Woodhouse5040a912014-03-09 16:14:00 -07003597static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003598 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003599{
3600 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003601 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003602
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003603 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003604 order = get_order(size);
3605
Jiang Liud41a4ad2014-07-11 14:19:34 +08003606 intel_unmap(dev, dma_handle);
Akinobu Mita36746432014-06-04 16:06:51 -07003607 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3608 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003609}
3610
David Woodhouse5040a912014-03-09 16:14:00 -07003611static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003612 int nelems, enum dma_data_direction dir,
3613 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003614{
Jiang Liud41a4ad2014-07-11 14:19:34 +08003615 intel_unmap(dev, sglist[0].dma_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003616}
3617
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003618static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003619 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003620{
3621 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003622 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003623
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003624 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003625 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003626 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003627 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003628 }
3629 return nelems;
3630}
3631
David Woodhouse5040a912014-03-09 16:14:00 -07003632static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003633 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003634{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003635 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003636 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003637 size_t size = 0;
3638 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003639 struct iova *iova = NULL;
3640 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003641 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003642 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003643 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003644
3645 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003646 if (iommu_no_mapping(dev))
3647 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003648
David Woodhouse5040a912014-03-09 16:14:00 -07003649 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003650 if (!domain)
3651 return 0;
3652
Weidong Han8c11e792008-12-08 15:29:22 +08003653 iommu = domain_get_iommu(domain);
3654
David Woodhouseb536d242009-06-28 14:49:31 +01003655 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003656 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003657
David Woodhouse5040a912014-03-09 16:14:00 -07003658 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3659 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003660 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003661 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003662 return 0;
3663 }
3664
3665 /*
3666 * Check if DMAR supports zero-length reads on write only
3667 * mappings..
3668 */
3669 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003670 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003671 prot |= DMA_PTE_READ;
3672 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3673 prot |= DMA_PTE_WRITE;
3674
David Woodhouseb536d242009-06-28 14:49:31 +01003675 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003676
Fenghua Yuf5329592009-08-04 15:09:37 -07003677 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003678 if (unlikely(ret)) {
David Woodhousee1605492009-06-29 11:17:38 +01003679 dma_pte_free_pagetable(domain, start_vpfn,
3680 start_vpfn + size - 1);
David Woodhousee1605492009-06-29 11:17:38 +01003681 __free_iova(&domain->iovad, iova);
3682 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003683 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003684
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003685 /* it's a non-present to present mapping. Only flush if caching mode */
3686 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003687 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003688 else
Weidong Han8c11e792008-12-08 15:29:22 +08003689 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003690
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003691 return nelems;
3692}
3693
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003694static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3695{
3696 return !dma_addr;
3697}
3698
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003699struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003700 .alloc = intel_alloc_coherent,
3701 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003702 .map_sg = intel_map_sg,
3703 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003704 .map_page = intel_map_page,
3705 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003706 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003707};
3708
3709static inline int iommu_domain_cache_init(void)
3710{
3711 int ret = 0;
3712
3713 iommu_domain_cache = kmem_cache_create("iommu_domain",
3714 sizeof(struct dmar_domain),
3715 0,
3716 SLAB_HWCACHE_ALIGN,
3717
3718 NULL);
3719 if (!iommu_domain_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003720 pr_err("Couldn't create iommu_domain cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003721 ret = -ENOMEM;
3722 }
3723
3724 return ret;
3725}
3726
3727static inline int iommu_devinfo_cache_init(void)
3728{
3729 int ret = 0;
3730
3731 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3732 sizeof(struct device_domain_info),
3733 0,
3734 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003735 NULL);
3736 if (!iommu_devinfo_cache) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02003737 pr_err("Couldn't create devinfo cache\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003738 ret = -ENOMEM;
3739 }
3740
3741 return ret;
3742}
3743
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003744static int __init iommu_init_mempool(void)
3745{
3746 int ret;
3747 ret = iommu_iova_cache_init();
3748 if (ret)
3749 return ret;
3750
3751 ret = iommu_domain_cache_init();
3752 if (ret)
3753 goto domain_error;
3754
3755 ret = iommu_devinfo_cache_init();
3756 if (!ret)
3757 return ret;
3758
3759 kmem_cache_destroy(iommu_domain_cache);
3760domain_error:
Robin Murphy85b45452015-01-12 17:51:14 +00003761 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003762
3763 return -ENOMEM;
3764}
3765
3766static void __init iommu_exit_mempool(void)
3767{
3768 kmem_cache_destroy(iommu_devinfo_cache);
3769 kmem_cache_destroy(iommu_domain_cache);
Robin Murphy85b45452015-01-12 17:51:14 +00003770 iommu_iova_cache_destroy();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003771}
3772
Dan Williams556ab452010-07-23 15:47:56 -07003773static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3774{
3775 struct dmar_drhd_unit *drhd;
3776 u32 vtbar;
3777 int rc;
3778
3779 /* We know that this device on this chipset has its own IOMMU.
3780 * If we find it under a different IOMMU, then the BIOS is lying
3781 * to us. Hope that the IOMMU for this device is actually
3782 * disabled, and it needs no translation...
3783 */
3784 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3785 if (rc) {
3786 /* "can't" happen */
3787 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3788 return;
3789 }
3790 vtbar &= 0xffff0000;
3791
3792 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3793 drhd = dmar_find_matched_drhd_unit(pdev);
3794 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3795 TAINT_FIRMWARE_WORKAROUND,
3796 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3797 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3798}
3799DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3800
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003801static void __init init_no_remapping_devices(void)
3802{
3803 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003804 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003805 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003806
3807 for_each_drhd_unit(drhd) {
3808 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003809 for_each_active_dev_scope(drhd->devices,
3810 drhd->devices_cnt, i, dev)
3811 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003812 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003813 if (i == drhd->devices_cnt)
3814 drhd->ignored = 1;
3815 }
3816 }
3817
Jiang Liu7c919772014-01-06 14:18:18 +08003818 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003819 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003820 continue;
3821
Jiang Liub683b232014-02-19 14:07:32 +08003822 for_each_active_dev_scope(drhd->devices,
3823 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003824 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003825 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003826 if (i < drhd->devices_cnt)
3827 continue;
3828
David Woodhousec0771df2011-10-14 20:59:46 +01003829 /* This IOMMU has *only* gfx devices. Either bypass it or
3830 set the gfx_mapped flag, as appropriate */
3831 if (dmar_map_gfx) {
3832 intel_iommu_gfx_mapped = 1;
3833 } else {
3834 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003835 for_each_active_dev_scope(drhd->devices,
3836 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003837 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003838 }
3839 }
3840}
3841
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003842#ifdef CONFIG_SUSPEND
3843static int init_iommu_hw(void)
3844{
3845 struct dmar_drhd_unit *drhd;
3846 struct intel_iommu *iommu = NULL;
3847
3848 for_each_active_iommu(iommu, drhd)
3849 if (iommu->qi)
3850 dmar_reenable_qi(iommu);
3851
Joseph Cihulab7792602011-05-03 00:08:37 -07003852 for_each_iommu(iommu, drhd) {
3853 if (drhd->ignored) {
3854 /*
3855 * we always have to disable PMRs or DMA may fail on
3856 * this device
3857 */
3858 if (force_on)
3859 iommu_disable_protect_mem_regions(iommu);
3860 continue;
3861 }
3862
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003863 iommu_flush_write_buffer(iommu);
3864
3865 iommu_set_root_entry(iommu);
3866
3867 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003868 DMA_CCMD_GLOBAL_INVL);
Jiang Liu2a41cce2014-07-11 14:19:33 +08003869 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3870 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003871 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003872 }
3873
3874 return 0;
3875}
3876
3877static void iommu_flush_all(void)
3878{
3879 struct dmar_drhd_unit *drhd;
3880 struct intel_iommu *iommu;
3881
3882 for_each_active_iommu(iommu, drhd) {
3883 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003884 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003885 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003886 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003887 }
3888}
3889
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003890static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003891{
3892 struct dmar_drhd_unit *drhd;
3893 struct intel_iommu *iommu = NULL;
3894 unsigned long flag;
3895
3896 for_each_active_iommu(iommu, drhd) {
3897 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3898 GFP_ATOMIC);
3899 if (!iommu->iommu_state)
3900 goto nomem;
3901 }
3902
3903 iommu_flush_all();
3904
3905 for_each_active_iommu(iommu, drhd) {
3906 iommu_disable_translation(iommu);
3907
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003908 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003909
3910 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3911 readl(iommu->reg + DMAR_FECTL_REG);
3912 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3913 readl(iommu->reg + DMAR_FEDATA_REG);
3914 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3915 readl(iommu->reg + DMAR_FEADDR_REG);
3916 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3917 readl(iommu->reg + DMAR_FEUADDR_REG);
3918
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003919 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003920 }
3921 return 0;
3922
3923nomem:
3924 for_each_active_iommu(iommu, drhd)
3925 kfree(iommu->iommu_state);
3926
3927 return -ENOMEM;
3928}
3929
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003930static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003931{
3932 struct dmar_drhd_unit *drhd;
3933 struct intel_iommu *iommu = NULL;
3934 unsigned long flag;
3935
3936 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003937 if (force_on)
3938 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3939 else
3940 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003941 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003942 }
3943
3944 for_each_active_iommu(iommu, drhd) {
3945
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003946 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003947
3948 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3949 iommu->reg + DMAR_FECTL_REG);
3950 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3951 iommu->reg + DMAR_FEDATA_REG);
3952 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3953 iommu->reg + DMAR_FEADDR_REG);
3954 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3955 iommu->reg + DMAR_FEUADDR_REG);
3956
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003957 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003958 }
3959
3960 for_each_active_iommu(iommu, drhd)
3961 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003962}
3963
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003964static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003965 .resume = iommu_resume,
3966 .suspend = iommu_suspend,
3967};
3968
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003969static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003970{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003971 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003972}
3973
3974#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003975static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003976#endif /* CONFIG_PM */
3977
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003978
Jiang Liuc2a0b532014-11-09 22:47:56 +08003979int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003980{
3981 struct acpi_dmar_reserved_memory *rmrr;
3982 struct dmar_rmrr_unit *rmrru;
3983
3984 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3985 if (!rmrru)
3986 return -ENOMEM;
3987
3988 rmrru->hdr = header;
3989 rmrr = (struct acpi_dmar_reserved_memory *)header;
3990 rmrru->base_address = rmrr->base_address;
3991 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003992 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3993 ((void *)rmrr) + rmrr->header.length,
3994 &rmrru->devices_cnt);
3995 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3996 kfree(rmrru);
3997 return -ENOMEM;
3998 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003999
Jiang Liu2e455282014-02-19 14:07:36 +08004000 list_add(&rmrru->list, &dmar_rmrr_units);
4001
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004002 return 0;
4003}
4004
Jiang Liu6b197242014-11-09 22:47:58 +08004005static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4006{
4007 struct dmar_atsr_unit *atsru;
4008 struct acpi_dmar_atsr *tmp;
4009
4010 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4011 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4012 if (atsr->segment != tmp->segment)
4013 continue;
4014 if (atsr->header.length != tmp->header.length)
4015 continue;
4016 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4017 return atsru;
4018 }
4019
4020 return NULL;
4021}
4022
4023int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004024{
4025 struct acpi_dmar_atsr *atsr;
4026 struct dmar_atsr_unit *atsru;
4027
Jiang Liu6b197242014-11-09 22:47:58 +08004028 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4029 return 0;
4030
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004031 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
Jiang Liu6b197242014-11-09 22:47:58 +08004032 atsru = dmar_find_atsr(atsr);
4033 if (atsru)
4034 return 0;
4035
4036 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004037 if (!atsru)
4038 return -ENOMEM;
4039
Jiang Liu6b197242014-11-09 22:47:58 +08004040 /*
4041 * If memory is allocated from slab by ACPI _DSM method, we need to
4042 * copy the memory content because the memory buffer will be freed
4043 * on return.
4044 */
4045 atsru->hdr = (void *)(atsru + 1);
4046 memcpy(atsru->hdr, hdr, hdr->length);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004047 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08004048 if (!atsru->include_all) {
4049 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4050 (void *)atsr + atsr->header.length,
4051 &atsru->devices_cnt);
4052 if (atsru->devices_cnt && atsru->devices == NULL) {
4053 kfree(atsru);
4054 return -ENOMEM;
4055 }
4056 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004057
Jiang Liu0e242612014-02-19 14:07:34 +08004058 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004059
4060 return 0;
4061}
4062
Jiang Liu9bdc5312014-01-06 14:18:27 +08004063static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4064{
4065 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4066 kfree(atsru);
4067}
4068
Jiang Liu6b197242014-11-09 22:47:58 +08004069int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4070{
4071 struct acpi_dmar_atsr *atsr;
4072 struct dmar_atsr_unit *atsru;
4073
4074 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4075 atsru = dmar_find_atsr(atsr);
4076 if (atsru) {
4077 list_del_rcu(&atsru->list);
4078 synchronize_rcu();
4079 intel_iommu_free_atsr(atsru);
4080 }
4081
4082 return 0;
4083}
4084
4085int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4086{
4087 int i;
4088 struct device *dev;
4089 struct acpi_dmar_atsr *atsr;
4090 struct dmar_atsr_unit *atsru;
4091
4092 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4093 atsru = dmar_find_atsr(atsr);
4094 if (!atsru)
4095 return 0;
4096
4097 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4098 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4099 i, dev)
4100 return -EBUSY;
4101
4102 return 0;
4103}
4104
Jiang Liuffebeb42014-11-09 22:48:02 +08004105static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4106{
4107 int sp, ret = 0;
4108 struct intel_iommu *iommu = dmaru->iommu;
4109
4110 if (g_iommus[iommu->seq_id])
4111 return 0;
4112
4113 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004114 pr_warn("%s: Doesn't support hardware pass through.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004115 iommu->name);
4116 return -ENXIO;
4117 }
4118 if (!ecap_sc_support(iommu->ecap) &&
4119 domain_update_iommu_snooping(iommu)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004120 pr_warn("%s: Doesn't support snooping.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004121 iommu->name);
4122 return -ENXIO;
4123 }
4124 sp = domain_update_iommu_superpage(iommu) - 1;
4125 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004126 pr_warn("%s: Doesn't support large page.\n",
Jiang Liuffebeb42014-11-09 22:48:02 +08004127 iommu->name);
4128 return -ENXIO;
4129 }
4130
4131 /*
4132 * Disable translation if already enabled prior to OS handover.
4133 */
4134 if (iommu->gcmd & DMA_GCMD_TE)
4135 iommu_disable_translation(iommu);
4136
4137 g_iommus[iommu->seq_id] = iommu;
4138 ret = iommu_init_domains(iommu);
4139 if (ret == 0)
4140 ret = iommu_alloc_root_entry(iommu);
4141 if (ret)
4142 goto out;
4143
4144 if (dmaru->ignored) {
4145 /*
4146 * we always have to disable PMRs or DMA may fail on this device
4147 */
4148 if (force_on)
4149 iommu_disable_protect_mem_regions(iommu);
4150 return 0;
4151 }
4152
4153 intel_iommu_init_qi(iommu);
4154 iommu_flush_write_buffer(iommu);
4155 ret = dmar_set_interrupt(iommu);
4156 if (ret)
4157 goto disable_iommu;
4158
4159 iommu_set_root_entry(iommu);
4160 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4161 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4162 iommu_enable_translation(iommu);
4163
4164 if (si_domain) {
4165 ret = iommu_attach_domain(si_domain, iommu);
4166 if (ret < 0 || si_domain->id != ret)
4167 goto disable_iommu;
4168 domain_attach_iommu(si_domain, iommu);
4169 }
4170
4171 iommu_disable_protect_mem_regions(iommu);
4172 return 0;
4173
4174disable_iommu:
4175 disable_dmar_iommu(iommu);
4176out:
4177 free_dmar_iommu(iommu);
4178 return ret;
4179}
4180
Jiang Liu6b197242014-11-09 22:47:58 +08004181int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4182{
Jiang Liuffebeb42014-11-09 22:48:02 +08004183 int ret = 0;
4184 struct intel_iommu *iommu = dmaru->iommu;
4185
4186 if (!intel_iommu_enabled)
4187 return 0;
4188 if (iommu == NULL)
4189 return -EINVAL;
4190
4191 if (insert) {
4192 ret = intel_iommu_add(dmaru);
4193 } else {
4194 disable_dmar_iommu(iommu);
4195 free_dmar_iommu(iommu);
4196 }
4197
4198 return ret;
Jiang Liu6b197242014-11-09 22:47:58 +08004199}
4200
Jiang Liu9bdc5312014-01-06 14:18:27 +08004201static void intel_iommu_free_dmars(void)
4202{
4203 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4204 struct dmar_atsr_unit *atsru, *atsr_n;
4205
4206 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4207 list_del(&rmrru->list);
4208 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4209 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004210 }
4211
Jiang Liu9bdc5312014-01-06 14:18:27 +08004212 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4213 list_del(&atsru->list);
4214 intel_iommu_free_atsr(atsru);
4215 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004216}
4217
4218int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4219{
Jiang Liub683b232014-02-19 14:07:32 +08004220 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004221 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00004222 struct pci_dev *bridge = NULL;
4223 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004224 struct acpi_dmar_atsr *atsr;
4225 struct dmar_atsr_unit *atsru;
4226
4227 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004228 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08004229 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004230 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08004231 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004232 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004233 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004234 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004235 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08004236 if (!bridge)
4237 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004238
Jiang Liu0e242612014-02-19 14:07:34 +08004239 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08004240 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4241 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4242 if (atsr->segment != pci_domain_nr(dev->bus))
4243 continue;
4244
Jiang Liub683b232014-02-19 14:07:32 +08004245 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00004246 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08004247 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004248
4249 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08004250 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08004251 }
Jiang Liub683b232014-02-19 14:07:32 +08004252 ret = 0;
4253out:
Jiang Liu0e242612014-02-19 14:07:34 +08004254 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004255
Jiang Liub683b232014-02-19 14:07:32 +08004256 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004257}
4258
Jiang Liu59ce0512014-02-19 14:07:35 +08004259int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4260{
4261 int ret = 0;
4262 struct dmar_rmrr_unit *rmrru;
4263 struct dmar_atsr_unit *atsru;
4264 struct acpi_dmar_atsr *atsr;
4265 struct acpi_dmar_reserved_memory *rmrr;
4266
4267 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4268 return 0;
4269
4270 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4271 rmrr = container_of(rmrru->hdr,
4272 struct acpi_dmar_reserved_memory, header);
4273 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4274 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4275 ((void *)rmrr) + rmrr->header.length,
4276 rmrr->segment, rmrru->devices,
4277 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08004278 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08004279 return ret;
4280 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08004281 dmar_remove_dev_scope(info, rmrr->segment,
4282 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08004283 }
4284 }
4285
4286 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4287 if (atsru->include_all)
4288 continue;
4289
4290 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4291 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4292 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4293 (void *)atsr + atsr->header.length,
4294 atsr->segment, atsru->devices,
4295 atsru->devices_cnt);
4296 if (ret > 0)
4297 break;
4298 else if(ret < 0)
4299 return ret;
4300 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4301 if (dmar_remove_dev_scope(info, atsr->segment,
4302 atsru->devices, atsru->devices_cnt))
4303 break;
4304 }
4305 }
4306
4307 return 0;
4308}
4309
Fenghua Yu99dcade2009-11-11 07:23:06 -08004310/*
4311 * Here we only respond to action of unbound device from driver.
4312 *
4313 * Added device is not attached to its DMAR domain here yet. That will happen
4314 * when mapping the device to iova.
4315 */
4316static int device_notifier(struct notifier_block *nb,
4317 unsigned long action, void *data)
4318{
4319 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08004320 struct dmar_domain *domain;
4321
David Woodhouse3d891942014-03-06 15:59:26 +00004322 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00004323 return 0;
4324
Joerg Roedel1196c2f2014-09-30 13:02:03 +02004325 if (action != BUS_NOTIFY_REMOVED_DEVICE)
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004326 return 0;
4327
David Woodhouse1525a292014-03-06 16:19:30 +00004328 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004329 if (!domain)
4330 return 0;
4331
Jiang Liu3a5670e2014-02-19 14:07:33 +08004332 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004333 domain_remove_one_dev_info(domain, dev);
Jiang Liuab8dfe22014-07-11 14:19:27 +08004334 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
Jiang Liu7e7dfab2014-02-19 14:07:23 +08004335 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08004336 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07004337
Fenghua Yu99dcade2009-11-11 07:23:06 -08004338 return 0;
4339}
4340
4341static struct notifier_block device_nb = {
4342 .notifier_call = device_notifier,
4343};
4344
Jiang Liu75f05562014-02-19 14:07:37 +08004345static int intel_iommu_memory_notifier(struct notifier_block *nb,
4346 unsigned long val, void *v)
4347{
4348 struct memory_notify *mhp = v;
4349 unsigned long long start, end;
4350 unsigned long start_vpfn, last_vpfn;
4351
4352 switch (val) {
4353 case MEM_GOING_ONLINE:
4354 start = mhp->start_pfn << PAGE_SHIFT;
4355 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4356 if (iommu_domain_identity_map(si_domain, start, end)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004357 pr_warn("Failed to build identity map for [%llx-%llx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004358 start, end);
4359 return NOTIFY_BAD;
4360 }
4361 break;
4362
4363 case MEM_OFFLINE:
4364 case MEM_CANCEL_ONLINE:
4365 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4366 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4367 while (start_vpfn <= last_vpfn) {
4368 struct iova *iova;
4369 struct dmar_drhd_unit *drhd;
4370 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00004371 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08004372
4373 iova = find_iova(&si_domain->iovad, start_vpfn);
4374 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004375 pr_debug("Failed get IOVA for PFN %lx\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004376 start_vpfn);
4377 break;
4378 }
4379
4380 iova = split_and_remove_iova(&si_domain->iovad, iova,
4381 start_vpfn, last_vpfn);
4382 if (iova == NULL) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004383 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
Jiang Liu75f05562014-02-19 14:07:37 +08004384 start_vpfn, last_vpfn);
4385 return NOTIFY_BAD;
4386 }
4387
David Woodhouseea8ea462014-03-05 17:09:32 +00004388 freelist = domain_unmap(si_domain, iova->pfn_lo,
4389 iova->pfn_hi);
4390
Jiang Liu75f05562014-02-19 14:07:37 +08004391 rcu_read_lock();
4392 for_each_active_iommu(iommu, drhd)
4393 iommu_flush_iotlb_psi(iommu, si_domain->id,
Jiang Liua156ef92014-07-11 14:19:36 +08004394 iova->pfn_lo, iova_size(iova),
David Woodhouseea8ea462014-03-05 17:09:32 +00004395 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08004396 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00004397 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08004398
4399 start_vpfn = iova->pfn_hi + 1;
4400 free_iova_mem(iova);
4401 }
4402 break;
4403 }
4404
4405 return NOTIFY_OK;
4406}
4407
4408static struct notifier_block intel_iommu_memory_nb = {
4409 .notifier_call = intel_iommu_memory_notifier,
4410 .priority = 0
4411};
4412
Alex Williamsona5459cf2014-06-12 16:12:31 -06004413
4414static ssize_t intel_iommu_show_version(struct device *dev,
4415 struct device_attribute *attr,
4416 char *buf)
4417{
4418 struct intel_iommu *iommu = dev_get_drvdata(dev);
4419 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4420 return sprintf(buf, "%d:%d\n",
4421 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4422}
4423static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4424
4425static ssize_t intel_iommu_show_address(struct device *dev,
4426 struct device_attribute *attr,
4427 char *buf)
4428{
4429 struct intel_iommu *iommu = dev_get_drvdata(dev);
4430 return sprintf(buf, "%llx\n", iommu->reg_phys);
4431}
4432static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4433
4434static ssize_t intel_iommu_show_cap(struct device *dev,
4435 struct device_attribute *attr,
4436 char *buf)
4437{
4438 struct intel_iommu *iommu = dev_get_drvdata(dev);
4439 return sprintf(buf, "%llx\n", iommu->cap);
4440}
4441static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4442
4443static ssize_t intel_iommu_show_ecap(struct device *dev,
4444 struct device_attribute *attr,
4445 char *buf)
4446{
4447 struct intel_iommu *iommu = dev_get_drvdata(dev);
4448 return sprintf(buf, "%llx\n", iommu->ecap);
4449}
4450static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4451
4452static struct attribute *intel_iommu_attrs[] = {
4453 &dev_attr_version.attr,
4454 &dev_attr_address.attr,
4455 &dev_attr_cap.attr,
4456 &dev_attr_ecap.attr,
4457 NULL,
4458};
4459
4460static struct attribute_group intel_iommu_group = {
4461 .name = "intel-iommu",
4462 .attrs = intel_iommu_attrs,
4463};
4464
4465const struct attribute_group *intel_iommu_groups[] = {
4466 &intel_iommu_group,
4467 NULL,
4468};
4469
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004470int __init intel_iommu_init(void)
4471{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004472 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004473 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004474 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004475
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004476 /* VT-d is required for a TXT/tboot launch, so enforce that */
4477 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004478
Jiang Liu3a5670e2014-02-19 14:07:33 +08004479 if (iommu_init_mempool()) {
4480 if (force_on)
4481 panic("tboot: Failed to initialize iommu memory\n");
4482 return -ENOMEM;
4483 }
4484
4485 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004486 if (dmar_table_init()) {
4487 if (force_on)
4488 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004489 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004490 }
4491
Suresh Siddhac2c72862011-08-23 17:05:19 -07004492 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004493 if (force_on)
4494 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004495 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004496 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004497
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004498 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004499 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004500
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004501 if (list_empty(&dmar_rmrr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004502 pr_info("No RMRR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004503
4504 if (list_empty(&dmar_atsr_units))
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004505 pr_info("No ATSR found\n");
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004506
Joseph Cihula51a63e62011-03-21 11:04:24 -07004507 if (dmar_init_reserved_ranges()) {
4508 if (force_on)
4509 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004510 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004511 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004512
4513 init_no_remapping_devices();
4514
Joseph Cihulab7792602011-05-03 00:08:37 -07004515 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004516 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004517 if (force_on)
4518 panic("tboot: Failed to initialize DMARs\n");
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004519 pr_err("Initialization failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004520 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004521 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004522 up_write(&dmar_global_lock);
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004523 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004524
mark gross5e0d2a62008-03-04 15:22:08 -08004525 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004526#ifdef CONFIG_SWIOTLB
4527 swiotlb = 0;
4528#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004529 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004530
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004531 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004532
Alex Williamsona5459cf2014-06-12 16:12:31 -06004533 for_each_active_iommu(iommu, drhd)
4534 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4535 intel_iommu_groups,
4536 iommu->name);
4537
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004538 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004539 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004540 if (si_domain && !hw_pass_through)
4541 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004542
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004543 intel_iommu_enabled = 1;
4544
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004545 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004546
4547out_free_reserved_range:
4548 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004549out_free_dmar:
4550 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004551 up_write(&dmar_global_lock);
4552 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004553 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004554}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004555
Alex Williamson579305f2014-07-03 09:51:43 -06004556static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4557{
4558 struct intel_iommu *iommu = opaque;
4559
4560 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4561 return 0;
4562}
4563
4564/*
4565 * NB - intel-iommu lacks any sort of reference counting for the users of
4566 * dependent devices. If multiple endpoints have intersecting dependent
4567 * devices, unbinding the driver from any one of them will possibly leave
4568 * the others unable to operate.
4569 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004570static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004571 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004572{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004573 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004574 return;
4575
Alex Williamson579305f2014-07-03 09:51:43 -06004576 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004577}
4578
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004579static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004580 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004581{
Yijing Wangbca2b912013-10-31 17:26:04 +08004582 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004583 struct intel_iommu *iommu;
4584 unsigned long flags;
Quentin Lambert2f119c72015-02-06 10:59:53 +01004585 bool found = false;
David Woodhouse156baca2014-03-09 14:00:57 -07004586 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004587
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004588 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004589 if (!iommu)
4590 return;
4591
4592 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004593 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004594 if (info->iommu == iommu && info->bus == bus &&
4595 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004596 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004597 spin_unlock_irqrestore(&device_domain_lock, flags);
4598
Yu Zhao93a23a72009-05-18 13:51:37 +08004599 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004600 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004601 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004602 free_devinfo_mem(info);
4603
4604 spin_lock_irqsave(&device_domain_lock, flags);
4605
4606 if (found)
4607 break;
4608 else
4609 continue;
4610 }
4611
4612 /* if there is no other devices under the same iommu
4613 * owned by this domain, clear this iommu in iommu_bmp
4614 * update iommu count and coherency
4615 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004616 if (info->iommu == iommu)
Quentin Lambert2f119c72015-02-06 10:59:53 +01004617 found = true;
Weidong Hanc7151a82008-12-08 22:51:37 +08004618 }
4619
Roland Dreier3e7abe22011-07-20 06:22:21 -07004620 spin_unlock_irqrestore(&device_domain_lock, flags);
4621
Weidong Hanc7151a82008-12-08 22:51:37 +08004622 if (found == 0) {
Jiang Liufb170fb2014-07-11 14:19:28 +08004623 domain_detach_iommu(domain, iommu);
4624 if (!domain_type_is_vm_or_si(domain))
4625 iommu_detach_domain(domain, iommu);
Weidong Hanc7151a82008-12-08 22:51:37 +08004626 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004627}
4628
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004629static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004630{
4631 int adjust_width;
4632
Robin Murphy0fb5fe82015-01-12 17:51:16 +00004633 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4634 DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004635 domain_reserve_special_ranges(domain);
4636
4637 /* calculate AGAW */
4638 domain->gaw = guest_width;
4639 adjust_width = guestwidth_to_adjustwidth(guest_width);
4640 domain->agaw = width_to_agaw(adjust_width);
4641
Weidong Han5e98c4b2008-12-08 23:03:27 +08004642 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004643 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004644 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004645 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004646
4647 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004648 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004649 if (!domain->pgd)
4650 return -ENOMEM;
4651 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4652 return 0;
4653}
4654
Joerg Roedel00a77de2015-03-26 13:43:08 +01004655static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
Kay, Allen M38717942008-09-09 18:37:29 +03004656{
Joerg Roedel5d450802008-12-03 14:52:32 +01004657 struct dmar_domain *dmar_domain;
Joerg Roedel00a77de2015-03-26 13:43:08 +01004658 struct iommu_domain *domain;
4659
4660 if (type != IOMMU_DOMAIN_UNMANAGED)
4661 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004662
Jiang Liuab8dfe22014-07-11 14:19:27 +08004663 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
Joerg Roedel5d450802008-12-03 14:52:32 +01004664 if (!dmar_domain) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004665 pr_err("Can't allocate dmar_domain\n");
Joerg Roedel00a77de2015-03-26 13:43:08 +01004666 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004667 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004668 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004669 pr_err("Domain initialization failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004670 domain_exit(dmar_domain);
Joerg Roedel00a77de2015-03-26 13:43:08 +01004671 return NULL;
Kay, Allen M38717942008-09-09 18:37:29 +03004672 }
Allen Kay8140a952011-10-14 12:32:17 -07004673 domain_update_iommu_cap(dmar_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004674
Joerg Roedel00a77de2015-03-26 13:43:08 +01004675 domain = &dmar_domain->domain;
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004676 domain->geometry.aperture_start = 0;
4677 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4678 domain->geometry.force_aperture = true;
4679
Joerg Roedel00a77de2015-03-26 13:43:08 +01004680 return domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004681}
Kay, Allen M38717942008-09-09 18:37:29 +03004682
Joerg Roedel00a77de2015-03-26 13:43:08 +01004683static void intel_iommu_domain_free(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004684{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004685 domain_exit(to_dmar_domain(domain));
Kay, Allen M38717942008-09-09 18:37:29 +03004686}
Kay, Allen M38717942008-09-09 18:37:29 +03004687
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004688static int intel_iommu_attach_device(struct iommu_domain *domain,
4689 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004690{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004691 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004692 struct intel_iommu *iommu;
4693 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004694 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004695
Alex Williamsonc875d2c2014-07-03 09:57:02 -06004696 if (device_is_rmrr_locked(dev)) {
4697 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4698 return -EPERM;
4699 }
4700
David Woodhouse7207d8f2014-03-09 16:31:06 -07004701 /* normally dev is not mapped */
4702 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004703 struct dmar_domain *old_domain;
4704
David Woodhouse1525a292014-03-06 16:19:30 +00004705 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004706 if (old_domain) {
Jiang Liuab8dfe22014-07-11 14:19:27 +08004707 if (domain_type_is_vm_or_si(dmar_domain))
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004708 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004709 else
4710 domain_remove_dev_info(old_domain);
Joerg Roedel62c22162014-12-09 12:56:45 +01004711
4712 if (!domain_type_is_vm_or_si(old_domain) &&
4713 list_empty(&old_domain->devices))
4714 domain_exit(old_domain);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004715 }
4716 }
4717
David Woodhouse156baca2014-03-09 14:00:57 -07004718 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004719 if (!iommu)
4720 return -ENODEV;
4721
4722 /* check if this iommu agaw is sufficient for max mapped address */
4723 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004724 if (addr_width > cap_mgaw(iommu->cap))
4725 addr_width = cap_mgaw(iommu->cap);
4726
4727 if (dmar_domain->max_addr > (1LL << addr_width)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004728 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004729 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004730 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004731 return -EFAULT;
4732 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004733 dmar_domain->gaw = addr_width;
4734
4735 /*
4736 * Knock out extra levels of page tables if necessary
4737 */
4738 while (iommu->agaw < dmar_domain->agaw) {
4739 struct dma_pte *pte;
4740
4741 pte = dmar_domain->pgd;
4742 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004743 dmar_domain->pgd = (struct dma_pte *)
4744 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004745 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004746 }
4747 dmar_domain->agaw--;
4748 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004749
David Woodhouse5913c9b2014-03-09 16:27:31 -07004750 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004751}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004752
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004753static void intel_iommu_detach_device(struct iommu_domain *domain,
4754 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004755{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004756 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004757}
Kay, Allen M38717942008-09-09 18:37:29 +03004758
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004759static int intel_iommu_map(struct iommu_domain *domain,
4760 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004761 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004762{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004763 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004764 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004765 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004766 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004767
Joerg Roedeldde57a22008-12-03 15:04:09 +01004768 if (iommu_prot & IOMMU_READ)
4769 prot |= DMA_PTE_READ;
4770 if (iommu_prot & IOMMU_WRITE)
4771 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004772 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4773 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004774
David Woodhouse163cc522009-06-28 00:51:17 +01004775 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004776 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004777 u64 end;
4778
4779 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004780 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004781 if (end < max_addr) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004782 pr_err("%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004783 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004784 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004785 return -EFAULT;
4786 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004787 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004788 }
David Woodhousead051222009-06-28 14:22:28 +01004789 /* Round up size to next multiple of PAGE_SIZE, if it and
4790 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004791 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004792 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4793 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004794 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004795}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004796
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004797static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004798 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004799{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004800 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
David Woodhouseea8ea462014-03-05 17:09:32 +00004801 struct page *freelist = NULL;
4802 struct intel_iommu *iommu;
4803 unsigned long start_pfn, last_pfn;
4804 unsigned int npages;
4805 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004806
David Woodhouse5cf0a762014-03-19 16:07:49 +00004807 /* Cope with horrid API which requires us to unmap more than the
4808 size argument if it happens to be a large-page mapping. */
4809 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4810 BUG();
4811
4812 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4813 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4814
David Woodhouseea8ea462014-03-05 17:09:32 +00004815 start_pfn = iova >> VTD_PAGE_SHIFT;
4816 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4817
4818 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4819
4820 npages = last_pfn - start_pfn + 1;
4821
4822 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4823 iommu = g_iommus[iommu_id];
4824
4825 /*
4826 * find bit position of dmar_domain
4827 */
4828 ndomains = cap_ndoms(iommu->cap);
4829 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4830 if (iommu->domains[num] == dmar_domain)
4831 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4832 npages, !freelist, 0);
4833 }
4834
4835 }
4836
4837 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004838
David Woodhouse163cc522009-06-28 00:51:17 +01004839 if (dmar_domain->max_addr == iova + size)
4840 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004841
David Woodhouse5cf0a762014-03-19 16:07:49 +00004842 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004843}
Kay, Allen M38717942008-09-09 18:37:29 +03004844
Joerg Roedeld14d6572008-12-03 15:06:57 +01004845static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547a2013-03-29 01:23:58 +05304846 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004847{
Joerg Roedel00a77de2015-03-26 13:43:08 +01004848 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004849 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004850 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004851 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004852
David Woodhouse5cf0a762014-03-19 16:07:49 +00004853 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004854 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004855 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004856
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004857 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004858}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004859
Joerg Roedel5d587b82014-09-05 10:50:45 +02004860static bool intel_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004861{
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004862 if (cap == IOMMU_CAP_CACHE_COHERENCY)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004863 return domain_update_iommu_snooping(NULL) == 1;
Tom Lyon323f99c2010-07-02 16:56:14 -04004864 if (cap == IOMMU_CAP_INTR_REMAP)
Joerg Roedel5d587b82014-09-05 10:50:45 +02004865 return irq_remapping_enabled == 1;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004866
Joerg Roedel5d587b82014-09-05 10:50:45 +02004867 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004868}
4869
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004870static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004871{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004872 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004873 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004874 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004875
Alex Williamsona5459cf2014-06-12 16:12:31 -06004876 iommu = device_to_iommu(dev, &bus, &devfn);
4877 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004878 return -ENODEV;
4879
Alex Williamsona5459cf2014-06-12 16:12:31 -06004880 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004881
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004882 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004883
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004884 if (IS_ERR(group))
4885 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004886
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004887 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004888 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004889}
4890
4891static void intel_iommu_remove_device(struct device *dev)
4892{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004893 struct intel_iommu *iommu;
4894 u8 bus, devfn;
4895
4896 iommu = device_to_iommu(dev, &bus, &devfn);
4897 if (!iommu)
4898 return;
4899
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004900 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004901
4902 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004903}
4904
Thierry Redingb22f6432014-06-27 09:03:12 +02004905static const struct iommu_ops intel_iommu_ops = {
Joerg Roedel5d587b82014-09-05 10:50:45 +02004906 .capable = intel_iommu_capable,
Joerg Roedel00a77de2015-03-26 13:43:08 +01004907 .domain_alloc = intel_iommu_domain_alloc,
4908 .domain_free = intel_iommu_domain_free,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004909 .attach_dev = intel_iommu_attach_device,
4910 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004911 .map = intel_iommu_map,
4912 .unmap = intel_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07004913 .map_sg = default_iommu_map_sg,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004914 .iova_to_phys = intel_iommu_iova_to_phys,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004915 .add_device = intel_iommu_add_device,
4916 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004917 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004918};
David Woodhouse9af88142009-02-13 23:18:03 +00004919
Daniel Vetter94526182013-01-20 23:50:13 +01004920static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4921{
4922 /* G4x/GM45 integrated gfx dmar support is totally busted. */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004923 pr_info("Disabling IOMMU for graphics on this chipset\n");
Daniel Vetter94526182013-01-20 23:50:13 +01004924 dmar_map_gfx = 0;
4925}
4926
4927DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4928DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4929DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4930DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4931DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4932DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4933DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4934
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004935static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004936{
4937 /*
4938 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004939 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004940 */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004941 pr_info("Forcing write-buffer flush capability\n");
David Woodhouse9af88142009-02-13 23:18:03 +00004942 rwbf_quirk = 1;
4943}
4944
4945DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004946DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4947DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4948DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4949DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4950DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4951DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004952
Adam Jacksoneecfd572010-08-25 21:17:34 +01004953#define GGC 0x52
4954#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4955#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4956#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4957#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4958#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4959#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4960#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4961#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4962
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004963static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004964{
4965 unsigned short ggc;
4966
Adam Jacksoneecfd572010-08-25 21:17:34 +01004967 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004968 return;
4969
Adam Jacksoneecfd572010-08-25 21:17:34 +01004970 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004971 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
David Woodhouse9eecabc2010-09-21 22:28:23 +01004972 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004973 } else if (dmar_map_gfx) {
4974 /* we have to ensure the gfx device is idle before we flush */
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02004975 pr_info("Disabling batched IOTLB flush on Ironlake\n");
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004976 intel_iommu_strict = 1;
4977 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004978}
4979DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4980DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4981DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4982DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4983
David Woodhousee0fc7e02009-09-30 09:12:17 -07004984/* On Tylersburg chipsets, some BIOSes have been known to enable the
4985 ISOCH DMAR unit for the Azalia sound device, but not give it any
4986 TLB entries, which causes it to deadlock. Check for that. We do
4987 this in a function called from init_dmars(), instead of in a PCI
4988 quirk, because we don't want to print the obnoxious "BIOS broken"
4989 message if VT-d is actually disabled.
4990*/
4991static void __init check_tylersburg_isoch(void)
4992{
4993 struct pci_dev *pdev;
4994 uint32_t vtisochctrl;
4995
4996 /* If there's no Azalia in the system anyway, forget it. */
4997 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4998 if (!pdev)
4999 return;
5000 pci_dev_put(pdev);
5001
5002 /* System Management Registers. Might be hidden, in which case
5003 we can't do the sanity check. But that's OK, because the
5004 known-broken BIOSes _don't_ actually hide it, so far. */
5005 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5006 if (!pdev)
5007 return;
5008
5009 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5010 pci_dev_put(pdev);
5011 return;
5012 }
5013
5014 pci_dev_put(pdev);
5015
5016 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5017 if (vtisochctrl & 1)
5018 return;
5019
5020 /* Drop all bits other than the number of TLB entries */
5021 vtisochctrl &= 0x1c;
5022
5023 /* If we have the recommended number of TLB entries (16), fine. */
5024 if (vtisochctrl == 0x10)
5025 return;
5026
5027 /* Zero TLB entries? You get to ride the short bus to school. */
5028 if (!vtisochctrl) {
5029 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5030 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5031 dmi_get_system_info(DMI_BIOS_VENDOR),
5032 dmi_get_system_info(DMI_BIOS_VERSION),
5033 dmi_get_system_info(DMI_PRODUCT_VERSION));
5034 iommu_identity_mapping |= IDENTMAP_AZALIA;
5035 return;
5036 }
Joerg Roedel9f10e5b2015-06-12 09:57:06 +02005037
5038 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
David Woodhousee0fc7e02009-09-30 09:12:17 -07005039 vtisochctrl);
5040}