blob: 6472bf15bef2e108199a9fdf16c292704e41fed1 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080036#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080037#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010039#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030040#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010041#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070042#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100043#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020044#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080045#include <linux/memblock.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070046#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090048#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049
Joerg Roedel078e1ee2012-09-26 12:44:43 +020050#include "irq_remapping.h"
Varun Sethi61e015a2013-04-23 10:05:24 +053051#include "pci.h"
Joerg Roedel078e1ee2012-09-26 12:44:43 +020052
Fenghua Yu5b6985c2008-10-16 18:02:32 -070053#define ROOT_SIZE VTD_PAGE_SIZE
54#define CONTEXT_SIZE VTD_PAGE_SIZE
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070058#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070059
60#define IOAPIC_RANGE_START (0xfee00000)
61#define IOAPIC_RANGE_END (0xfeefffff)
62#define IOVA_START_ADDR (0x1000)
63
64#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
65
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070066#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080067#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070068
David Woodhouse2ebe3152009-09-19 07:34:04 -070069#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
70#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
71
72/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
75 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070077
Mark McLoughlinf27be032008-11-20 15:49:43 +000078#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070079#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070080#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080081
Andrew Mortondf08cdc2010-09-22 13:05:11 -070082/* page table handling */
83#define LEVEL_STRIDE (9)
84#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
85
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020086/*
87 * This bitmap is used to advertise the page sizes our hardware support
88 * to the IOMMU core, which will then use this information to split
89 * physically contiguous memory regions it is mapping into page sizes
90 * that we support.
91 *
92 * Traditionally the IOMMU core just handed us the mappings directly,
93 * after making sure the size is an order of a 4KiB page and that the
94 * mapping has natural alignment.
95 *
96 * To retain this behavior, we currently advertise that we support
97 * all page sizes that are an order of 4KiB.
98 *
99 * If at some point we'd like to utilize the IOMMU core's new behavior,
100 * we could change this to advertise the real page sizes we support.
101 */
102#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
103
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700104static inline int agaw_to_level(int agaw)
105{
106 return agaw + 2;
107}
108
109static inline int agaw_to_width(int agaw)
110{
Jiang Liu5c645b32014-01-06 14:18:12 +0800111 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700112}
113
114static inline int width_to_agaw(int width)
115{
Jiang Liu5c645b32014-01-06 14:18:12 +0800116 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700117}
118
119static inline unsigned int level_to_offset_bits(int level)
120{
121 return (level - 1) * LEVEL_STRIDE;
122}
123
124static inline int pfn_level_offset(unsigned long pfn, int level)
125{
126 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
127}
128
129static inline unsigned long level_mask(int level)
130{
131 return -1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long level_size(int level)
135{
136 return 1UL << level_to_offset_bits(level);
137}
138
139static inline unsigned long align_to_level(unsigned long pfn, int level)
140{
141 return (pfn + level_size(level) - 1) & level_mask(level);
142}
David Woodhousefd18de52009-05-10 23:57:41 +0100143
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100144static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
145{
Jiang Liu5c645b32014-01-06 14:18:12 +0800146 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100147}
148
David Woodhousedd4e8312009-06-27 16:21:20 +0100149/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
150 are never going to work. */
151static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
152{
153 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155
156static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
157{
158 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
159}
160static inline unsigned long page_to_dma_pfn(struct page *pg)
161{
162 return mm_to_dma_pfn(page_to_pfn(pg));
163}
164static inline unsigned long virt_to_dma_pfn(void *p)
165{
166 return page_to_dma_pfn(virt_to_page(p));
167}
168
Weidong Hand9630fe2008-12-08 11:06:32 +0800169/* global iommu list, set NULL for ignored DMAR units */
170static struct intel_iommu **g_iommus;
171
David Woodhousee0fc7e02009-09-30 09:12:17 -0700172static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000173static int rwbf_quirk;
174
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000175/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700176 * set to 1 to panic kernel if can't successfully enable VT-d
177 * (used when kernel is launched w/ TXT)
178 */
179static int force_on = 0;
180
181/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000182 * 0: Present
183 * 1-11: Reserved
184 * 12-63: Context Ptr (12 - (haw-1))
185 * 64-127: Reserved
186 */
187struct root_entry {
188 u64 val;
189 u64 rsvd1;
190};
191#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
192static inline bool root_present(struct root_entry *root)
193{
194 return (root->val & 1);
195}
196static inline void set_root_present(struct root_entry *root)
197{
198 root->val |= 1;
199}
200static inline void set_root_value(struct root_entry *root, unsigned long value)
201{
202 root->val |= value & VTD_PAGE_MASK;
203}
204
205static inline struct context_entry *
206get_context_addr_from_root(struct root_entry *root)
207{
208 return (struct context_entry *)
209 (root_present(root)?phys_to_virt(
210 root->val & VTD_PAGE_MASK) :
211 NULL);
212}
213
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000214/*
215 * low 64 bits:
216 * 0: present
217 * 1: fault processing disable
218 * 2-3: translation type
219 * 12-63: address space root
220 * high 64 bits:
221 * 0-2: address width
222 * 3-6: aval
223 * 8-23: domain id
224 */
225struct context_entry {
226 u64 lo;
227 u64 hi;
228};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000229
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000230static inline bool context_present(struct context_entry *context)
231{
232 return (context->lo & 1);
233}
234static inline void context_set_present(struct context_entry *context)
235{
236 context->lo |= 1;
237}
238
239static inline void context_set_fault_enable(struct context_entry *context)
240{
241 context->lo &= (((u64)-1) << 2) | 1;
242}
243
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000244static inline void context_set_translation_type(struct context_entry *context,
245 unsigned long value)
246{
247 context->lo &= (((u64)-1) << 4) | 3;
248 context->lo |= (value & 3) << 2;
249}
250
251static inline void context_set_address_root(struct context_entry *context,
252 unsigned long value)
253{
254 context->lo |= value & VTD_PAGE_MASK;
255}
256
257static inline void context_set_address_width(struct context_entry *context,
258 unsigned long value)
259{
260 context->hi |= value & 7;
261}
262
263static inline void context_set_domain_id(struct context_entry *context,
264 unsigned long value)
265{
266 context->hi |= (value & ((1 << 16) - 1)) << 8;
267}
268
269static inline void context_clear_entry(struct context_entry *context)
270{
271 context->lo = 0;
272 context->hi = 0;
273}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000274
Mark McLoughlin622ba122008-11-20 15:49:46 +0000275/*
276 * 0: readable
277 * 1: writable
278 * 2-6: reserved
279 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800280 * 8-10: available
281 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000282 * 12-63: Host physcial address
283 */
284struct dma_pte {
285 u64 val;
286};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000287
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000288static inline void dma_clear_pte(struct dma_pte *pte)
289{
290 pte->val = 0;
291}
292
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000293static inline u64 dma_pte_addr(struct dma_pte *pte)
294{
David Woodhousec85994e2009-07-01 19:21:24 +0100295#ifdef CONFIG_64BIT
296 return pte->val & VTD_PAGE_MASK;
297#else
298 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100299 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100300#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000301}
302
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000303static inline bool dma_pte_present(struct dma_pte *pte)
304{
305 return (pte->val & 3) != 0;
306}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000307
Allen Kay4399c8b2011-10-14 12:32:46 -0700308static inline bool dma_pte_superpage(struct dma_pte *pte)
309{
310 return (pte->val & (1 << 7));
311}
312
David Woodhouse75e6bf92009-07-02 11:21:16 +0100313static inline int first_pte_in_page(struct dma_pte *pte)
314{
315 return !((unsigned long)pte & ~VTD_PAGE_MASK);
316}
317
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700318/*
319 * This domain is a statically identity mapping domain.
320 * 1. This domain creats a static 1:1 mapping to all usable memory.
321 * 2. It maps to each iommu if successful.
322 * 3. Each iommu mapps to this domain if successful.
323 */
David Woodhouse19943b02009-08-04 16:19:20 +0100324static struct dmar_domain *si_domain;
325static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700326
Weidong Han3b5410e2008-12-08 09:17:15 +0800327/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100328#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800329
Weidong Han1ce28fe2008-12-08 16:35:39 +0800330/* domain represents a virtual machine, more than one devices
331 * across iommus may be owned in one domain, e.g. kvm guest.
332 */
333#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
334
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700335/* si_domain contains mulitple devices */
336#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
337
Mike Travis1b198bb2012-03-05 15:05:16 -0800338/* define the limit of IOMMUs supported in each domain */
339#ifdef CONFIG_X86
340# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
341#else
342# define IOMMU_UNITS_SUPPORTED 64
343#endif
344
Mark McLoughlin99126f72008-11-20 15:49:47 +0000345struct dmar_domain {
346 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700347 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800348 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
349 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000350
351 struct list_head devices; /* all devices' list */
352 struct iova_domain iovad; /* iova's that belong to this domain */
353
354 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000355 int gaw; /* max guest address width */
356
357 /* adjusted guest address width, 0 is level 2 30-bit */
358 int agaw;
359
Weidong Han3b5410e2008-12-08 09:17:15 +0800360 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800361
362 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800363 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800364 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100365 int iommu_superpage;/* Level of superpages supported:
366 0 == 4KiB (no superpages), 1 == 2MiB,
367 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800368 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800369 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000370};
371
Mark McLoughlina647dac2008-11-20 15:49:48 +0000372/* PCI domain-device relationship */
373struct device_domain_info {
374 struct list_head link; /* link to domain siblings */
375 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100376 int segment; /* PCI domain */
377 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000378 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500379 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800380 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000381 struct dmar_domain *domain; /* pointer to domain */
382};
383
Jiang Liub94e4112014-02-19 14:07:25 +0800384struct dmar_rmrr_unit {
385 struct list_head list; /* list of rmrr units */
386 struct acpi_dmar_header *hdr; /* ACPI header */
387 u64 base_address; /* reserved base address*/
388 u64 end_address; /* reserved end address */
Jiang Liu0e242612014-02-19 14:07:34 +0800389 struct pci_dev __rcu **devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800390 int devices_cnt; /* target device count */
391};
392
393struct dmar_atsr_unit {
394 struct list_head list; /* list of ATSR units */
395 struct acpi_dmar_header *hdr; /* ACPI header */
Jiang Liu0e242612014-02-19 14:07:34 +0800396 struct pci_dev __rcu **devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800397 int devices_cnt; /* target device count */
398 u8 include_all:1; /* include all ports */
399};
400
401static LIST_HEAD(dmar_atsr_units);
402static LIST_HEAD(dmar_rmrr_units);
403
404#define for_each_rmrr_units(rmrr) \
405 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
406
mark gross5e0d2a62008-03-04 15:22:08 -0800407static void flush_unmaps_timeout(unsigned long data);
408
Jiang Liub707cb02014-01-06 14:18:26 +0800409static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800410
mark gross80b20dd2008-04-18 13:53:58 -0700411#define HIGH_WATER_MARK 250
412struct deferred_flush_tables {
413 int next;
414 struct iova *iova[HIGH_WATER_MARK];
415 struct dmar_domain *domain[HIGH_WATER_MARK];
416};
417
418static struct deferred_flush_tables *deferred_flush;
419
mark gross5e0d2a62008-03-04 15:22:08 -0800420/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800421static int g_num_of_iommus;
422
423static DEFINE_SPINLOCK(async_umap_flush_lock);
424static LIST_HEAD(unmaps_to_do);
425
426static int timer_on;
427static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800428
Jiang Liu92d03cc2014-02-19 14:07:28 +0800429static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800431static void domain_remove_one_dev_info(struct dmar_domain *domain,
432 struct pci_dev *pdev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800433static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
434 struct pci_dev *pdev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700435
Suresh Siddhad3f13812011-08-23 17:05:25 -0700436#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800437int dmar_disabled = 0;
438#else
439int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700440#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800441
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200442int intel_iommu_enabled = 0;
443EXPORT_SYMBOL_GPL(intel_iommu_enabled);
444
David Woodhouse2d9e6672010-06-15 10:57:57 +0100445static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700446static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800447static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100448static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449
David Woodhousec0771df2011-10-14 20:59:46 +0100450int intel_iommu_gfx_mapped;
451EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
452
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700453#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
454static DEFINE_SPINLOCK(device_domain_lock);
455static LIST_HEAD(device_domain_list);
456
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100457static struct iommu_ops intel_iommu_ops;
458
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700459static int __init intel_iommu_setup(char *str)
460{
461 if (!str)
462 return -EINVAL;
463 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800464 if (!strncmp(str, "on", 2)) {
465 dmar_disabled = 0;
466 printk(KERN_INFO "Intel-IOMMU: enabled\n");
467 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700468 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800469 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700470 } else if (!strncmp(str, "igfx_off", 8)) {
471 dmar_map_gfx = 0;
472 printk(KERN_INFO
473 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700474 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800475 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700476 "Intel-IOMMU: Forcing DAC for PCI devices\n");
477 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800478 } else if (!strncmp(str, "strict", 6)) {
479 printk(KERN_INFO
480 "Intel-IOMMU: disable batched IOTLB flush\n");
481 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100482 } else if (!strncmp(str, "sp_off", 6)) {
483 printk(KERN_INFO
484 "Intel-IOMMU: disable supported super page\n");
485 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700486 }
487
488 str += strcspn(str, ",");
489 while (*str == ',')
490 str++;
491 }
492 return 0;
493}
494__setup("intel_iommu=", intel_iommu_setup);
495
496static struct kmem_cache *iommu_domain_cache;
497static struct kmem_cache *iommu_devinfo_cache;
498static struct kmem_cache *iommu_iova_cache;
499
Suresh Siddha4c923d42009-10-02 11:01:24 -0700500static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700501{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700502 struct page *page;
503 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700504
Suresh Siddha4c923d42009-10-02 11:01:24 -0700505 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
506 if (page)
507 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700508 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700509}
510
511static inline void free_pgtable_page(void *vaddr)
512{
513 free_page((unsigned long)vaddr);
514}
515
516static inline void *alloc_domain_mem(void)
517{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900518 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700519}
520
Kay, Allen M38717942008-09-09 18:37:29 +0300521static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700522{
523 kmem_cache_free(iommu_domain_cache, vaddr);
524}
525
526static inline void * alloc_devinfo_mem(void)
527{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900528 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700529}
530
531static inline void free_devinfo_mem(void *vaddr)
532{
533 kmem_cache_free(iommu_devinfo_cache, vaddr);
534}
535
536struct iova *alloc_iova_mem(void)
537{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900538 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700539}
540
541void free_iova_mem(struct iova *iova)
542{
543 kmem_cache_free(iommu_iova_cache, iova);
544}
545
Weidong Han1b573682008-12-08 15:34:06 +0800546
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700547static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800548{
549 unsigned long sagaw;
550 int agaw = -1;
551
552 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700553 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800554 agaw >= 0; agaw--) {
555 if (test_bit(agaw, &sagaw))
556 break;
557 }
558
559 return agaw;
560}
561
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700562/*
563 * Calculate max SAGAW for each iommu.
564 */
565int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
566{
567 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
568}
569
570/*
571 * calculate agaw for each iommu.
572 * "SAGAW" may be different across iommus, use a default agaw, and
573 * get a supported less agaw for iommus that don't support the default agaw.
574 */
575int iommu_calculate_agaw(struct intel_iommu *iommu)
576{
577 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
578}
579
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700580/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800581static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
582{
583 int iommu_id;
584
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700585 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800586 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700587 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800588
Mike Travis1b198bb2012-03-05 15:05:16 -0800589 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800590 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
591 return NULL;
592
593 return g_iommus[iommu_id];
594}
595
Weidong Han8e6040972008-12-08 15:49:06 +0800596static void domain_update_iommu_coherency(struct dmar_domain *domain)
597{
598 int i;
599
Alex Williamson2e12bc22011-11-11 17:26:44 -0700600 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
601
602 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800603
Mike Travis1b198bb2012-03-05 15:05:16 -0800604 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800605 if (!ecap_coherent(g_iommus[i]->ecap)) {
606 domain->iommu_coherency = 0;
607 break;
608 }
Weidong Han8e6040972008-12-08 15:49:06 +0800609 }
610}
611
Sheng Yang58c610b2009-03-18 15:33:05 +0800612static void domain_update_iommu_snooping(struct dmar_domain *domain)
613{
614 int i;
615
616 domain->iommu_snooping = 1;
617
Mike Travis1b198bb2012-03-05 15:05:16 -0800618 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800619 if (!ecap_sc_support(g_iommus[i]->ecap)) {
620 domain->iommu_snooping = 0;
621 break;
622 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800623 }
624}
625
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100626static void domain_update_iommu_superpage(struct dmar_domain *domain)
627{
Allen Kay8140a952011-10-14 12:32:17 -0700628 struct dmar_drhd_unit *drhd;
629 struct intel_iommu *iommu = NULL;
630 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100631
632 if (!intel_iommu_superpage) {
633 domain->iommu_superpage = 0;
634 return;
635 }
636
Allen Kay8140a952011-10-14 12:32:17 -0700637 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800638 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700639 for_each_active_iommu(iommu, drhd) {
640 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100641 if (!mask) {
642 break;
643 }
644 }
Jiang Liu0e242612014-02-19 14:07:34 +0800645 rcu_read_unlock();
646
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100647 domain->iommu_superpage = fls(mask);
648}
649
Sheng Yang58c610b2009-03-18 15:33:05 +0800650/* Some capabilities may be different across iommus */
651static void domain_update_iommu_cap(struct dmar_domain *domain)
652{
653 domain_update_iommu_coherency(domain);
654 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100655 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800656}
657
David Woodhouse276dbf992009-04-04 01:45:37 +0100658static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800659{
660 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800661 struct intel_iommu *iommu;
662 struct pci_dev *dev;
Weidong Hanc7151a82008-12-08 22:51:37 +0800663 int i;
664
Jiang Liu0e242612014-02-19 14:07:34 +0800665 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800666 for_each_active_iommu(iommu, drhd) {
David Woodhouse276dbf992009-04-04 01:45:37 +0100667 if (segment != drhd->segment)
668 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800669
Jiang Liub683b232014-02-19 14:07:32 +0800670 for_each_active_dev_scope(drhd->devices,
671 drhd->devices_cnt, i, dev) {
672 if (dev->bus->number == bus && dev->devfn == devfn)
673 goto out;
674 if (dev->subordinate &&
675 dev->subordinate->number <= bus &&
676 dev->subordinate->busn_res.end >= bus)
677 goto out;
David Woodhouse924b6232009-04-04 00:39:25 +0100678 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800679
680 if (drhd->include_all)
Jiang Liub683b232014-02-19 14:07:32 +0800681 goto out;
Weidong Hanc7151a82008-12-08 22:51:37 +0800682 }
Jiang Liub683b232014-02-19 14:07:32 +0800683 iommu = NULL;
684out:
Jiang Liu0e242612014-02-19 14:07:34 +0800685 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800686
Jiang Liub683b232014-02-19 14:07:32 +0800687 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800688}
689
Weidong Han5331fe62008-12-08 23:00:00 +0800690static void domain_flush_cache(struct dmar_domain *domain,
691 void *addr, int size)
692{
693 if (!domain->iommu_coherency)
694 clflush_cache_range(addr, size);
695}
696
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697/* Gets context entry for a given bus and devfn */
698static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
699 u8 bus, u8 devfn)
700{
701 struct root_entry *root;
702 struct context_entry *context;
703 unsigned long phy_addr;
704 unsigned long flags;
705
706 spin_lock_irqsave(&iommu->lock, flags);
707 root = &iommu->root_entry[bus];
708 context = get_context_addr_from_root(root);
709 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700710 context = (struct context_entry *)
711 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700712 if (!context) {
713 spin_unlock_irqrestore(&iommu->lock, flags);
714 return NULL;
715 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700716 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700717 phy_addr = virt_to_phys((void *)context);
718 set_root_value(root, phy_addr);
719 set_root_present(root);
720 __iommu_flush_cache(iommu, root, sizeof(*root));
721 }
722 spin_unlock_irqrestore(&iommu->lock, flags);
723 return &context[devfn];
724}
725
726static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
727{
728 struct root_entry *root;
729 struct context_entry *context;
730 int ret;
731 unsigned long flags;
732
733 spin_lock_irqsave(&iommu->lock, flags);
734 root = &iommu->root_entry[bus];
735 context = get_context_addr_from_root(root);
736 if (!context) {
737 ret = 0;
738 goto out;
739 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000740 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700741out:
742 spin_unlock_irqrestore(&iommu->lock, flags);
743 return ret;
744}
745
746static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
747{
748 struct root_entry *root;
749 struct context_entry *context;
750 unsigned long flags;
751
752 spin_lock_irqsave(&iommu->lock, flags);
753 root = &iommu->root_entry[bus];
754 context = get_context_addr_from_root(root);
755 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000756 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 __iommu_flush_cache(iommu, &context[devfn], \
758 sizeof(*context));
759 }
760 spin_unlock_irqrestore(&iommu->lock, flags);
761}
762
763static void free_context_table(struct intel_iommu *iommu)
764{
765 struct root_entry *root;
766 int i;
767 unsigned long flags;
768 struct context_entry *context;
769
770 spin_lock_irqsave(&iommu->lock, flags);
771 if (!iommu->root_entry) {
772 goto out;
773 }
774 for (i = 0; i < ROOT_ENTRY_NR; i++) {
775 root = &iommu->root_entry[i];
776 context = get_context_addr_from_root(root);
777 if (context)
778 free_pgtable_page(context);
779 }
780 free_pgtable_page(iommu->root_entry);
781 iommu->root_entry = NULL;
782out:
783 spin_unlock_irqrestore(&iommu->lock, flags);
784}
785
David Woodhouseb026fd22009-06-28 10:37:25 +0100786static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000787 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788{
David Woodhouseb026fd22009-06-28 10:37:25 +0100789 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790 struct dma_pte *parent, *pte = NULL;
791 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700792 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793
794 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200795
796 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
797 /* Address beyond IOMMU's addressing capabilities. */
798 return NULL;
799
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700800 parent = domain->pgd;
801
David Woodhouse5cf0a762014-03-19 16:07:49 +0000802 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803 void *tmp_page;
804
David Woodhouseb026fd22009-06-28 10:37:25 +0100805 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000807 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100808 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000809 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810 break;
811
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000812 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100813 uint64_t pteval;
814
Suresh Siddha4c923d42009-10-02 11:01:24 -0700815 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816
David Woodhouse206a73c12009-07-01 19:30:28 +0100817 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700818 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100819
David Woodhousec85994e2009-07-01 19:21:24 +0100820 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400821 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100822 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
823 /* Someone else set it while we were thinking; use theirs. */
824 free_pgtable_page(tmp_page);
825 } else {
826 dma_pte_addr(pte);
827 domain_flush_cache(domain, pte, sizeof(*pte));
828 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000830 if (level == 1)
831 break;
832
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000833 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834 level--;
835 }
836
David Woodhouse5cf0a762014-03-19 16:07:49 +0000837 if (!*target_level)
838 *target_level = level;
839
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 return pte;
841}
842
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100843
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700844/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100845static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
846 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100847 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848{
849 struct dma_pte *parent, *pte = NULL;
850 int total = agaw_to_level(domain->agaw);
851 int offset;
852
853 parent = domain->pgd;
854 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100855 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856 pte = &parent[offset];
857 if (level == total)
858 return pte;
859
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100860 if (!dma_pte_present(pte)) {
861 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100863 }
864
865 if (pte->val & DMA_PTE_LARGE_PAGE) {
866 *large_page = total;
867 return pte;
868 }
869
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000870 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871 total--;
872 }
873 return NULL;
874}
875
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000877static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100878 unsigned long start_pfn,
879 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700880{
David Woodhouse04b18e62009-06-27 19:15:01 +0100881 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100882 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100883 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884
David Woodhouse04b18e62009-06-27 19:15:01 +0100885 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100886 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700887 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100888
David Woodhouse04b18e62009-06-27 19:15:01 +0100889 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700890 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100891 large_page = 1;
892 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100893 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100894 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100895 continue;
896 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100897 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100898 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100899 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100900 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100901 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
902
David Woodhouse310a5ab2009-06-28 18:52:20 +0100903 domain_flush_cache(domain, first_pte,
904 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700905
906 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907}
908
Alex Williamson3269ee02013-06-15 10:27:19 -0600909static void dma_pte_free_level(struct dmar_domain *domain, int level,
910 struct dma_pte *pte, unsigned long pfn,
911 unsigned long start_pfn, unsigned long last_pfn)
912{
913 pfn = max(start_pfn, pfn);
914 pte = &pte[pfn_level_offset(pfn, level)];
915
916 do {
917 unsigned long level_pfn;
918 struct dma_pte *level_pte;
919
920 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
921 goto next;
922
923 level_pfn = pfn & level_mask(level - 1);
924 level_pte = phys_to_virt(dma_pte_addr(pte));
925
926 if (level > 2)
927 dma_pte_free_level(domain, level - 1, level_pte,
928 level_pfn, start_pfn, last_pfn);
929
930 /* If range covers entire pagetable, free it */
931 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800932 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600933 dma_clear_pte(pte);
934 domain_flush_cache(domain, pte, sizeof(*pte));
935 free_pgtable_page(level_pte);
936 }
937next:
938 pfn += level_size(level);
939 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
940}
941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942/* free page table pages. last level pte should already be cleared */
943static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100944 unsigned long start_pfn,
945 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700946{
David Woodhouse6660c632009-06-27 22:41:00 +0100947 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700948
David Woodhouse6660c632009-06-27 22:41:00 +0100949 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
950 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700951 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700952
David Woodhousef3a0a522009-06-30 03:40:07 +0100953 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600954 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
955 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100956
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100958 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959 free_pgtable_page(domain->pgd);
960 domain->pgd = NULL;
961 }
962}
963
964/* iommu handling */
965static int iommu_alloc_root_entry(struct intel_iommu *iommu)
966{
967 struct root_entry *root;
968 unsigned long flags;
969
Suresh Siddha4c923d42009-10-02 11:01:24 -0700970 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971 if (!root)
972 return -ENOMEM;
973
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700974 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975
976 spin_lock_irqsave(&iommu->lock, flags);
977 iommu->root_entry = root;
978 spin_unlock_irqrestore(&iommu->lock, flags);
979
980 return 0;
981}
982
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983static void iommu_set_root_entry(struct intel_iommu *iommu)
984{
985 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100986 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987 unsigned long flag;
988
989 addr = iommu->root_entry;
990
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200991 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700992 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
993
David Woodhousec416daa2009-05-10 20:30:58 +0100994 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995
996 /* Make sure hardware complete it */
997 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100998 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001000 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001}
1002
1003static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1004{
1005 u32 val;
1006 unsigned long flag;
1007
David Woodhouse9af88142009-02-13 23:18:03 +00001008 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001011 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001012 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013
1014 /* Make sure hardware complete it */
1015 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001016 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001017
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001018 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001019}
1020
1021/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001022static void __iommu_flush_context(struct intel_iommu *iommu,
1023 u16 did, u16 source_id, u8 function_mask,
1024 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001025{
1026 u64 val = 0;
1027 unsigned long flag;
1028
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001029 switch (type) {
1030 case DMA_CCMD_GLOBAL_INVL:
1031 val = DMA_CCMD_GLOBAL_INVL;
1032 break;
1033 case DMA_CCMD_DOMAIN_INVL:
1034 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1035 break;
1036 case DMA_CCMD_DEVICE_INVL:
1037 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1038 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1039 break;
1040 default:
1041 BUG();
1042 }
1043 val |= DMA_CCMD_ICC;
1044
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001045 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1047
1048 /* Make sure hardware complete it */
1049 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1050 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1051
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001052 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053}
1054
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001055/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001056static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1057 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001058{
1059 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1060 u64 val = 0, val_iva = 0;
1061 unsigned long flag;
1062
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001063 switch (type) {
1064 case DMA_TLB_GLOBAL_FLUSH:
1065 /* global flush doesn't need set IVA_REG */
1066 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1067 break;
1068 case DMA_TLB_DSI_FLUSH:
1069 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1070 break;
1071 case DMA_TLB_PSI_FLUSH:
1072 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1073 /* Note: always flush non-leaf currently */
1074 val_iva = size_order | addr;
1075 break;
1076 default:
1077 BUG();
1078 }
1079 /* Note: set drain read/write */
1080#if 0
1081 /*
1082 * This is probably to be super secure.. Looks like we can
1083 * ignore it without any impact.
1084 */
1085 if (cap_read_drain(iommu->cap))
1086 val |= DMA_TLB_READ_DRAIN;
1087#endif
1088 if (cap_write_drain(iommu->cap))
1089 val |= DMA_TLB_WRITE_DRAIN;
1090
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001091 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001092 /* Note: Only uses first TLB reg currently */
1093 if (val_iva)
1094 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1095 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1096
1097 /* Make sure hardware complete it */
1098 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1099 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1100
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001101 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001102
1103 /* check IOTLB invalidation granularity */
1104 if (DMA_TLB_IAIG(val) == 0)
1105 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1106 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1107 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001108 (unsigned long long)DMA_TLB_IIRG(type),
1109 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001110}
1111
Yu Zhao93a23a72009-05-18 13:51:37 +08001112static struct device_domain_info *iommu_support_dev_iotlb(
1113 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001114{
Yu Zhao93a23a72009-05-18 13:51:37 +08001115 int found = 0;
1116 unsigned long flags;
1117 struct device_domain_info *info;
1118 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1119
1120 if (!ecap_dev_iotlb_support(iommu->ecap))
1121 return NULL;
1122
1123 if (!iommu->qi)
1124 return NULL;
1125
1126 spin_lock_irqsave(&device_domain_lock, flags);
1127 list_for_each_entry(info, &domain->devices, link)
1128 if (info->bus == bus && info->devfn == devfn) {
1129 found = 1;
1130 break;
1131 }
1132 spin_unlock_irqrestore(&device_domain_lock, flags);
1133
1134 if (!found || !info->dev)
1135 return NULL;
1136
1137 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1138 return NULL;
1139
1140 if (!dmar_find_matched_atsr_unit(info->dev))
1141 return NULL;
1142
1143 info->iommu = iommu;
1144
1145 return info;
1146}
1147
1148static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1149{
1150 if (!info)
1151 return;
1152
1153 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1154}
1155
1156static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1157{
1158 if (!info->dev || !pci_ats_enabled(info->dev))
1159 return;
1160
1161 pci_disable_ats(info->dev);
1162}
1163
1164static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1165 u64 addr, unsigned mask)
1166{
1167 u16 sid, qdep;
1168 unsigned long flags;
1169 struct device_domain_info *info;
1170
1171 spin_lock_irqsave(&device_domain_lock, flags);
1172 list_for_each_entry(info, &domain->devices, link) {
1173 if (!info->dev || !pci_ats_enabled(info->dev))
1174 continue;
1175
1176 sid = info->bus << 8 | info->devfn;
1177 qdep = pci_ats_queue_depth(info->dev);
1178 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1179 }
1180 spin_unlock_irqrestore(&device_domain_lock, flags);
1181}
1182
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001183static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001184 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001186 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001187 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189 BUG_ON(pages == 0);
1190
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001191 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001192 * Fallback to domain selective flush if no PSI support or the size is
1193 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001194 * PSI requires page size to be 2 ^ x, and the base address is naturally
1195 * aligned to the size
1196 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001197 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1198 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001199 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001200 else
1201 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1202 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001203
1204 /*
Nadav Amit82653632010-04-01 13:24:40 +03001205 * In caching mode, changes of pages from non-present to present require
1206 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001207 */
Nadav Amit82653632010-04-01 13:24:40 +03001208 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001209 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001210}
1211
mark grossf8bab732008-02-08 04:18:38 -08001212static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1213{
1214 u32 pmen;
1215 unsigned long flags;
1216
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001217 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001218 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1219 pmen &= ~DMA_PMEN_EPM;
1220 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1221
1222 /* wait for the protected region status bit to clear */
1223 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1224 readl, !(pmen & DMA_PMEN_PRS), pmen);
1225
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001226 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001227}
1228
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001229static int iommu_enable_translation(struct intel_iommu *iommu)
1230{
1231 u32 sts;
1232 unsigned long flags;
1233
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001234 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001235 iommu->gcmd |= DMA_GCMD_TE;
1236 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001240 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001241
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001242 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243 return 0;
1244}
1245
1246static int iommu_disable_translation(struct intel_iommu *iommu)
1247{
1248 u32 sts;
1249 unsigned long flag;
1250
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001251 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252 iommu->gcmd &= ~DMA_GCMD_TE;
1253 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1254
1255 /* Make sure hardware complete it */
1256 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001257 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001258
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001259 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 return 0;
1261}
1262
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001263
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264static int iommu_init_domains(struct intel_iommu *iommu)
1265{
1266 unsigned long ndomains;
1267 unsigned long nlongs;
1268
1269 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001270 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1271 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272 nlongs = BITS_TO_LONGS(ndomains);
1273
Donald Dutile94a91b52009-08-20 16:51:34 -04001274 spin_lock_init(&iommu->lock);
1275
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001276 /* TBD: there might be 64K domains,
1277 * consider other allocation for future chip
1278 */
1279 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1280 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001281 pr_err("IOMMU%d: allocating domain id array failed\n",
1282 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283 return -ENOMEM;
1284 }
1285 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1286 GFP_KERNEL);
1287 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001288 pr_err("IOMMU%d: allocating domain array failed\n",
1289 iommu->seq_id);
1290 kfree(iommu->domain_ids);
1291 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001292 return -ENOMEM;
1293 }
1294
1295 /*
1296 * if Caching mode is set, then invalid translations are tagged
1297 * with domainid 0. Hence we need to pre-allocate it.
1298 */
1299 if (cap_caching_mode(iommu->cap))
1300 set_bit(0, iommu->domain_ids);
1301 return 0;
1302}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303
Jiang Liua868e6b2014-01-06 14:18:20 +08001304static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305{
1306 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001307 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001308 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
Donald Dutile94a91b52009-08-20 16:51:34 -04001310 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001311 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001312 /*
1313 * Domain id 0 is reserved for invalid translation
1314 * if hardware supports caching mode.
1315 */
1316 if (cap_caching_mode(iommu->cap) && i == 0)
1317 continue;
1318
Donald Dutile94a91b52009-08-20 16:51:34 -04001319 domain = iommu->domains[i];
1320 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001321
Donald Dutile94a91b52009-08-20 16:51:34 -04001322 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001323 count = --domain->iommu_count;
1324 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001325 if (count == 0)
1326 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001327 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328 }
1329
1330 if (iommu->gcmd & DMA_GCMD_TE)
1331 iommu_disable_translation(iommu);
1332
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001333 kfree(iommu->domains);
1334 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001335 iommu->domains = NULL;
1336 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337
Weidong Hand9630fe2008-12-08 11:06:32 +08001338 g_iommus[iommu->seq_id] = NULL;
1339
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340 /* free context mapping */
1341 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001342}
1343
Jiang Liu92d03cc2014-02-19 14:07:28 +08001344static struct dmar_domain *alloc_domain(bool vm)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001346 /* domain id for virtual machine, it won't be set in context */
1347 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001348 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349
1350 domain = alloc_domain_mem();
1351 if (!domain)
1352 return NULL;
1353
Suresh Siddha4c923d42009-10-02 11:01:24 -07001354 domain->nid = -1;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001355 domain->iommu_count = 0;
Mike Travis1b198bb2012-03-05 15:05:16 -08001356 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001357 domain->flags = 0;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001358 spin_lock_init(&domain->iommu_lock);
1359 INIT_LIST_HEAD(&domain->devices);
1360 if (vm) {
1361 domain->id = atomic_inc_return(&vm_domid);
1362 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1363 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001364
1365 return domain;
1366}
1367
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001368static int iommu_attach_domain(struct dmar_domain *domain,
1369 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001370{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001371 int num;
1372 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373 unsigned long flags;
1374
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001375 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001376
1377 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001378
1379 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1380 if (num >= ndomains) {
1381 spin_unlock_irqrestore(&iommu->lock, flags);
1382 printk(KERN_ERR "IOMMU: no free domain ids\n");
1383 return -ENOMEM;
1384 }
1385
1386 domain->id = num;
Jiang Liu9ebd6822014-02-19 14:07:29 +08001387 domain->iommu_count++;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001388 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001389 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001390 iommu->domains[num] = domain;
1391 spin_unlock_irqrestore(&iommu->lock, flags);
1392
1393 return 0;
1394}
1395
1396static void iommu_detach_domain(struct dmar_domain *domain,
1397 struct intel_iommu *iommu)
1398{
1399 unsigned long flags;
1400 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001401
1402 spin_lock_irqsave(&iommu->lock, flags);
1403 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001404 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001405 if (iommu->domains[num] == domain) {
Jiang Liu92d03cc2014-02-19 14:07:28 +08001406 clear_bit(num, iommu->domain_ids);
1407 iommu->domains[num] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001408 break;
1409 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001410 }
Weidong Han8c11e792008-12-08 15:29:22 +08001411 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412}
1413
1414static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001415static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416
Joseph Cihula51a63e62011-03-21 11:04:24 -07001417static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418{
1419 struct pci_dev *pdev = NULL;
1420 struct iova *iova;
1421 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001422
David Millerf6611972008-02-06 01:36:23 -08001423 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424
Mark Gross8a443df2008-03-04 14:59:31 -08001425 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1426 &reserved_rbtree_key);
1427
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428 /* IOAPIC ranges shouldn't be accessed by DMA */
1429 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1430 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001431 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001433 return -ENODEV;
1434 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435
1436 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1437 for_each_pci_dev(pdev) {
1438 struct resource *r;
1439
1440 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1441 r = &pdev->resource[i];
1442 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1443 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001444 iova = reserve_iova(&reserved_iova_list,
1445 IOVA_PFN(r->start),
1446 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001447 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001449 return -ENODEV;
1450 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001451 }
1452 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001453 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454}
1455
1456static void domain_reserve_special_ranges(struct dmar_domain *domain)
1457{
1458 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1459}
1460
1461static inline int guestwidth_to_adjustwidth(int gaw)
1462{
1463 int agaw;
1464 int r = (gaw - 12) % 9;
1465
1466 if (r == 0)
1467 agaw = gaw;
1468 else
1469 agaw = gaw + 9 - r;
1470 if (agaw > 64)
1471 agaw = 64;
1472 return agaw;
1473}
1474
1475static int domain_init(struct dmar_domain *domain, int guest_width)
1476{
1477 struct intel_iommu *iommu;
1478 int adjust_width, agaw;
1479 unsigned long sagaw;
1480
David Millerf6611972008-02-06 01:36:23 -08001481 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001482 domain_reserve_special_ranges(domain);
1483
1484 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001485 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 if (guest_width > cap_mgaw(iommu->cap))
1487 guest_width = cap_mgaw(iommu->cap);
1488 domain->gaw = guest_width;
1489 adjust_width = guestwidth_to_adjustwidth(guest_width);
1490 agaw = width_to_agaw(adjust_width);
1491 sagaw = cap_sagaw(iommu->cap);
1492 if (!test_bit(agaw, &sagaw)) {
1493 /* hardware doesn't support it, choose a bigger one */
1494 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1495 agaw = find_next_bit(&sagaw, 5, agaw);
1496 if (agaw >= 5)
1497 return -ENODEV;
1498 }
1499 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
Weidong Han8e6040972008-12-08 15:49:06 +08001501 if (ecap_coherent(iommu->ecap))
1502 domain->iommu_coherency = 1;
1503 else
1504 domain->iommu_coherency = 0;
1505
Sheng Yang58c610b2009-03-18 15:33:05 +08001506 if (ecap_sc_support(iommu->ecap))
1507 domain->iommu_snooping = 1;
1508 else
1509 domain->iommu_snooping = 0;
1510
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001511 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Suresh Siddha4c923d42009-10-02 11:01:24 -07001512 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001513
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001515 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516 if (!domain->pgd)
1517 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001518 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519 return 0;
1520}
1521
1522static void domain_exit(struct dmar_domain *domain)
1523{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001524 struct dmar_drhd_unit *drhd;
1525 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526
1527 /* Domain 0 is reserved, so dont process it */
1528 if (!domain)
1529 return;
1530
Alex Williamson7b668352011-05-24 12:02:41 +01001531 /* Flush any lazy unmaps that may reference this domain */
1532 if (!intel_iommu_strict)
1533 flush_unmaps_timeout(0);
1534
Jiang Liu92d03cc2014-02-19 14:07:28 +08001535 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001536 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001537
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538 /* destroy iovas */
1539 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540
1541 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001542 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543
1544 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001545 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546
Jiang Liu92d03cc2014-02-19 14:07:28 +08001547 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001548 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549 for_each_active_iommu(iommu, drhd)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001550 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1551 test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001552 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001553 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001554
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 free_domain_mem(domain);
1556}
1557
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001558static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1559 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560{
1561 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001562 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001563 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001564 struct dma_pte *pgd;
1565 unsigned long num;
1566 unsigned long ndomains;
1567 int id;
1568 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001569 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570
1571 pr_debug("Set context mapping for %02x:%02x.%d\n",
1572 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001573
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001575 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1576 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001577
David Woodhouse276dbf992009-04-04 01:45:37 +01001578 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001579 if (!iommu)
1580 return -ENODEV;
1581
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001582 context = device_to_context_entry(iommu, bus, devfn);
1583 if (!context)
1584 return -ENOMEM;
1585 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001586 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001587 spin_unlock_irqrestore(&iommu->lock, flags);
1588 return 0;
1589 }
1590
Weidong Hanea6606b2008-12-08 23:08:15 +08001591 id = domain->id;
1592 pgd = domain->pgd;
1593
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001594 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1595 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001596 int found = 0;
1597
1598 /* find an available domain id for this device in iommu */
1599 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001600 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001601 if (iommu->domains[num] == domain) {
1602 id = num;
1603 found = 1;
1604 break;
1605 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001606 }
1607
1608 if (found == 0) {
1609 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1610 if (num >= ndomains) {
1611 spin_unlock_irqrestore(&iommu->lock, flags);
1612 printk(KERN_ERR "IOMMU: no free domain ids\n");
1613 return -EFAULT;
1614 }
1615
1616 set_bit(num, iommu->domain_ids);
1617 iommu->domains[num] = domain;
1618 id = num;
1619 }
1620
1621 /* Skip top levels of page tables for
1622 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001623 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001624 */
Chris Wright1672af12009-12-02 12:06:34 -08001625 if (translation != CONTEXT_TT_PASS_THROUGH) {
1626 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1627 pgd = phys_to_virt(dma_pte_addr(pgd));
1628 if (!dma_pte_present(pgd)) {
1629 spin_unlock_irqrestore(&iommu->lock, flags);
1630 return -ENOMEM;
1631 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001632 }
1633 }
1634 }
1635
1636 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001637
Yu Zhao93a23a72009-05-18 13:51:37 +08001638 if (translation != CONTEXT_TT_PASS_THROUGH) {
1639 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1640 translation = info ? CONTEXT_TT_DEV_IOTLB :
1641 CONTEXT_TT_MULTI_LEVEL;
1642 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001643 /*
1644 * In pass through mode, AW must be programmed to indicate the largest
1645 * AGAW value supported by hardware. And ASR is ignored by hardware.
1646 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001647 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001648 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001649 else {
1650 context_set_address_root(context, virt_to_phys(pgd));
1651 context_set_address_width(context, iommu->agaw);
1652 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001653
1654 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001655 context_set_fault_enable(context);
1656 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001657 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001659 /*
1660 * It's a non-present to present mapping. If hardware doesn't cache
1661 * non-present entry we only need to flush the write-buffer. If the
1662 * _does_ cache non-present entries, then it does so in the special
1663 * domain #0, which we have to flush:
1664 */
1665 if (cap_caching_mode(iommu->cap)) {
1666 iommu->flush.flush_context(iommu, 0,
1667 (((u16)bus) << 8) | devfn,
1668 DMA_CCMD_MASK_NOBIT,
1669 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001670 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001671 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001673 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001674 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001676
1677 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001678 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001679 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001680 if (domain->iommu_count == 1)
1681 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001682 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001683 }
1684 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 return 0;
1686}
1687
1688static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001689domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1690 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691{
1692 int ret;
1693 struct pci_dev *tmp, *parent;
1694
David Woodhouse276dbf992009-04-04 01:45:37 +01001695 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001696 pdev->bus->number, pdev->devfn,
1697 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001698 if (ret)
1699 return ret;
1700
1701 /* dependent device mapping */
1702 tmp = pci_find_upstream_pcie_bridge(pdev);
1703 if (!tmp)
1704 return 0;
1705 /* Secondary interface's bus number and devfn 0 */
1706 parent = pdev->bus->self;
1707 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001708 ret = domain_context_mapping_one(domain,
1709 pci_domain_nr(parent->bus),
1710 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001711 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001712 if (ret)
1713 return ret;
1714 parent = parent->bus->self;
1715 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001716 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001718 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001719 tmp->subordinate->number, 0,
1720 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721 else /* this is a legacy PCI bridge */
1722 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001723 pci_domain_nr(tmp->bus),
1724 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001725 tmp->devfn,
1726 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727}
1728
Weidong Han5331fe62008-12-08 23:00:00 +08001729static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730{
1731 int ret;
1732 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001733 struct intel_iommu *iommu;
1734
David Woodhouse276dbf992009-04-04 01:45:37 +01001735 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1736 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001737 if (!iommu)
1738 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739
David Woodhouse276dbf992009-04-04 01:45:37 +01001740 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001741 if (!ret)
1742 return ret;
1743 /* dependent device mapping */
1744 tmp = pci_find_upstream_pcie_bridge(pdev);
1745 if (!tmp)
1746 return ret;
1747 /* Secondary interface's bus number and devfn 0 */
1748 parent = pdev->bus->self;
1749 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001750 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001751 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001752 if (!ret)
1753 return ret;
1754 parent = parent->bus->self;
1755 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001756 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001757 return device_context_mapped(iommu, tmp->subordinate->number,
1758 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001760 return device_context_mapped(iommu, tmp->bus->number,
1761 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001762}
1763
Fenghua Yuf5329592009-08-04 15:09:37 -07001764/* Returns a number of VTD pages, but aligned to MM page size */
1765static inline unsigned long aligned_nrpages(unsigned long host_addr,
1766 size_t size)
1767{
1768 host_addr &= ~PAGE_MASK;
1769 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1770}
1771
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001772/* Return largest possible superpage level for a given mapping */
1773static inline int hardware_largepage_caps(struct dmar_domain *domain,
1774 unsigned long iov_pfn,
1775 unsigned long phy_pfn,
1776 unsigned long pages)
1777{
1778 int support, level = 1;
1779 unsigned long pfnmerge;
1780
1781 support = domain->iommu_superpage;
1782
1783 /* To use a large page, the virtual *and* physical addresses
1784 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1785 of them will mean we have to use smaller pages. So just
1786 merge them and check both at once. */
1787 pfnmerge = iov_pfn | phy_pfn;
1788
1789 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1790 pages >>= VTD_STRIDE_SHIFT;
1791 if (!pages)
1792 break;
1793 pfnmerge >>= VTD_STRIDE_SHIFT;
1794 level++;
1795 support--;
1796 }
1797 return level;
1798}
1799
David Woodhouse9051aa02009-06-29 12:30:54 +01001800static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1801 struct scatterlist *sg, unsigned long phys_pfn,
1802 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001803{
1804 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001805 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001806 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001807 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001808 unsigned int largepage_lvl = 0;
1809 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001810
1811 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1812
1813 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1814 return -EINVAL;
1815
1816 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1817
David Woodhouse9051aa02009-06-29 12:30:54 +01001818 if (sg)
1819 sg_res = 0;
1820 else {
1821 sg_res = nr_pages + 1;
1822 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1823 }
1824
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001825 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001826 uint64_t tmp;
1827
David Woodhousee1605492009-06-29 11:17:38 +01001828 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001829 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001830 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1831 sg->dma_length = sg->length;
1832 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001833 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001834 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001835
David Woodhousee1605492009-06-29 11:17:38 +01001836 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001837 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1838
David Woodhouse5cf0a762014-03-19 16:07:49 +00001839 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001840 if (!pte)
1841 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001842 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001843 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001844 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001845 /* Ensure that old small page tables are removed to make room
1846 for superpage, if they exist. */
1847 dma_pte_clear_range(domain, iov_pfn,
1848 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1849 dma_pte_free_pagetable(domain, iov_pfn,
1850 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1851 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001852 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001853 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001854
David Woodhousee1605492009-06-29 11:17:38 +01001855 }
1856 /* We don't need lock here, nobody else
1857 * touches the iova range
1858 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001859 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001860 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001861 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001862 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1863 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001864 if (dumps) {
1865 dumps--;
1866 debug_dma_dump_mappings(NULL);
1867 }
1868 WARN_ON(1);
1869 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001870
1871 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1872
1873 BUG_ON(nr_pages < lvl_pages);
1874 BUG_ON(sg_res < lvl_pages);
1875
1876 nr_pages -= lvl_pages;
1877 iov_pfn += lvl_pages;
1878 phys_pfn += lvl_pages;
1879 pteval += lvl_pages * VTD_PAGE_SIZE;
1880 sg_res -= lvl_pages;
1881
1882 /* If the next PTE would be the first in a new page, then we
1883 need to flush the cache on the entries we've just written.
1884 And then we'll need to recalculate 'pte', so clear it and
1885 let it get set again in the if (!pte) block above.
1886
1887 If we're done (!nr_pages) we need to flush the cache too.
1888
1889 Also if we've been setting superpages, we may need to
1890 recalculate 'pte' and switch back to smaller pages for the
1891 end of the mapping, if the trailing size is not enough to
1892 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001893 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001894 if (!nr_pages || first_pte_in_page(pte) ||
1895 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001896 domain_flush_cache(domain, first_pte,
1897 (void *)pte - (void *)first_pte);
1898 pte = NULL;
1899 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001900
1901 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001902 sg = sg_next(sg);
1903 }
1904 return 0;
1905}
1906
David Woodhouse9051aa02009-06-29 12:30:54 +01001907static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1908 struct scatterlist *sg, unsigned long nr_pages,
1909 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910{
David Woodhouse9051aa02009-06-29 12:30:54 +01001911 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1912}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001913
David Woodhouse9051aa02009-06-29 12:30:54 +01001914static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1915 unsigned long phys_pfn, unsigned long nr_pages,
1916 int prot)
1917{
1918 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001919}
1920
Weidong Hanc7151a82008-12-08 22:51:37 +08001921static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001922{
Weidong Hanc7151a82008-12-08 22:51:37 +08001923 if (!iommu)
1924 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001925
1926 clear_context_table(iommu, bus, devfn);
1927 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001928 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001929 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930}
1931
David Woodhouse109b9b02012-05-25 17:43:02 +01001932static inline void unlink_domain_info(struct device_domain_info *info)
1933{
1934 assert_spin_locked(&device_domain_lock);
1935 list_del(&info->link);
1936 list_del(&info->global);
1937 if (info->dev)
1938 info->dev->dev.archdata.iommu = NULL;
1939}
1940
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001941static void domain_remove_dev_info(struct dmar_domain *domain)
1942{
1943 struct device_domain_info *info;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001944 unsigned long flags, flags2;
Weidong Hanc7151a82008-12-08 22:51:37 +08001945 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001946
1947 spin_lock_irqsave(&device_domain_lock, flags);
1948 while (!list_empty(&domain->devices)) {
1949 info = list_entry(domain->devices.next,
1950 struct device_domain_info, link);
David Woodhouse109b9b02012-05-25 17:43:02 +01001951 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001952 spin_unlock_irqrestore(&device_domain_lock, flags);
1953
Yu Zhao93a23a72009-05-18 13:51:37 +08001954 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001955 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001956 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957
Jiang Liu92d03cc2014-02-19 14:07:28 +08001958 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1959 iommu_detach_dependent_devices(iommu, info->dev);
1960 /* clear this iommu in iommu_bmp, update iommu count
1961 * and capabilities
1962 */
1963 spin_lock_irqsave(&domain->iommu_lock, flags2);
1964 if (test_and_clear_bit(iommu->seq_id,
1965 domain->iommu_bmp)) {
1966 domain->iommu_count--;
1967 domain_update_iommu_cap(domain);
1968 }
1969 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
1970 }
1971
1972 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973 spin_lock_irqsave(&device_domain_lock, flags);
1974 }
1975 spin_unlock_irqrestore(&device_domain_lock, flags);
1976}
1977
1978/*
1979 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001980 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001981 */
Kay, Allen M38717942008-09-09 18:37:29 +03001982static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001983find_domain(struct pci_dev *pdev)
1984{
1985 struct device_domain_info *info;
1986
1987 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001988 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001989 if (info)
1990 return info->domain;
1991 return NULL;
1992}
1993
Jiang Liu745f2582014-02-19 14:07:26 +08001994static inline struct dmar_domain *
1995dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
1996{
1997 struct device_domain_info *info;
1998
1999 list_for_each_entry(info, &device_domain_list, global)
2000 if (info->segment == segment && info->bus == bus &&
2001 info->devfn == devfn)
2002 return info->domain;
2003
2004 return NULL;
2005}
2006
2007static int dmar_insert_dev_info(int segment, int bus, int devfn,
2008 struct pci_dev *dev, struct dmar_domain **domp)
2009{
2010 struct dmar_domain *found, *domain = *domp;
2011 struct device_domain_info *info;
2012 unsigned long flags;
2013
2014 info = alloc_devinfo_mem();
2015 if (!info)
2016 return -ENOMEM;
2017
2018 info->segment = segment;
2019 info->bus = bus;
2020 info->devfn = devfn;
2021 info->dev = dev;
2022 info->domain = domain;
2023 if (!dev)
2024 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2025
2026 spin_lock_irqsave(&device_domain_lock, flags);
2027 if (dev)
2028 found = find_domain(dev);
2029 else
2030 found = dmar_search_domain_by_dev_info(segment, bus, devfn);
2031 if (found) {
2032 spin_unlock_irqrestore(&device_domain_lock, flags);
2033 free_devinfo_mem(info);
2034 if (found != domain) {
2035 domain_exit(domain);
2036 *domp = found;
2037 }
2038 } else {
2039 list_add(&info->link, &domain->devices);
2040 list_add(&info->global, &device_domain_list);
2041 if (dev)
2042 dev->dev.archdata.iommu = info;
2043 spin_unlock_irqrestore(&device_domain_lock, flags);
2044 }
2045
2046 return 0;
2047}
2048
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002049/* domain is initialized */
2050static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
2051{
Jiang Liue85bb5d2014-02-19 14:07:27 +08002052 struct dmar_domain *domain, *free = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053 struct intel_iommu *iommu;
2054 struct dmar_drhd_unit *drhd;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002055 struct pci_dev *dev_tmp;
2056 unsigned long flags;
2057 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01002058 int segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002059
2060 domain = find_domain(pdev);
2061 if (domain)
2062 return domain;
2063
David Woodhouse276dbf992009-04-04 01:45:37 +01002064 segment = pci_domain_nr(pdev->bus);
2065
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002066 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2067 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002068 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002069 bus = dev_tmp->subordinate->number;
2070 devfn = 0;
2071 } else {
2072 bus = dev_tmp->bus->number;
2073 devfn = dev_tmp->devfn;
2074 }
2075 spin_lock_irqsave(&device_domain_lock, flags);
Jiang Liu745f2582014-02-19 14:07:26 +08002076 domain = dmar_search_domain_by_dev_info(segment, bus, devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002077 spin_unlock_irqrestore(&device_domain_lock, flags);
2078 /* pcie-pci bridge already has a domain, uses it */
Jiang Liu745f2582014-02-19 14:07:26 +08002079 if (domain)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002080 goto found_domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002081 }
2082
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002083 drhd = dmar_find_matched_drhd_unit(pdev);
2084 if (!drhd) {
2085 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2086 pci_name(pdev));
2087 return NULL;
2088 }
2089 iommu = drhd->iommu;
2090
Jiang Liu745f2582014-02-19 14:07:26 +08002091 /* Allocate and intialize new domain for the device */
Jiang Liu92d03cc2014-02-19 14:07:28 +08002092 domain = alloc_domain(false);
Jiang Liu745f2582014-02-19 14:07:26 +08002093 if (!domain)
2094 goto error;
2095 if (iommu_attach_domain(domain, iommu)) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002096 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002098 }
Jiang Liue85bb5d2014-02-19 14:07:27 +08002099 free = domain;
2100 if (domain_init(domain, gaw))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002101 goto error;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002102
2103 /* register pcie-to-pci device */
2104 if (dev_tmp) {
Jiang Liue85bb5d2014-02-19 14:07:27 +08002105 if (dmar_insert_dev_info(segment, bus, devfn, NULL, &domain))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106 goto error;
Jiang Liue85bb5d2014-02-19 14:07:27 +08002107 else
2108 free = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002109 }
2110
2111found_domain:
Jiang Liu745f2582014-02-19 14:07:26 +08002112 if (dmar_insert_dev_info(segment, pdev->bus->number, pdev->devfn,
2113 pdev, &domain) == 0)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002114 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115error:
Jiang Liue85bb5d2014-02-19 14:07:27 +08002116 if (free)
2117 domain_exit(free);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002118 /* recheck it here, maybe others set it */
2119 return find_domain(pdev);
2120}
2121
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002122static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002123#define IDENTMAP_ALL 1
2124#define IDENTMAP_GFX 2
2125#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002126
David Woodhouseb2132032009-06-26 18:50:28 +01002127static int iommu_domain_identity_map(struct dmar_domain *domain,
2128 unsigned long long start,
2129 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130{
David Woodhousec5395d52009-06-28 16:35:56 +01002131 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2132 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002133
David Woodhousec5395d52009-06-28 16:35:56 +01002134 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2135 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002137 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002138 }
2139
David Woodhousec5395d52009-06-28 16:35:56 +01002140 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2141 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142 /*
2143 * RMRR range might have overlap with physical memory range,
2144 * clear it first
2145 */
David Woodhousec5395d52009-06-28 16:35:56 +01002146 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002147
David Woodhousec5395d52009-06-28 16:35:56 +01002148 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2149 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002150 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002151}
2152
2153static int iommu_prepare_identity_map(struct pci_dev *pdev,
2154 unsigned long long start,
2155 unsigned long long end)
2156{
2157 struct dmar_domain *domain;
2158 int ret;
2159
David Woodhousec7ab48d2009-06-26 19:10:36 +01002160 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002161 if (!domain)
2162 return -ENOMEM;
2163
David Woodhouse19943b02009-08-04 16:19:20 +01002164 /* For _hardware_ passthrough, don't bother. But for software
2165 passthrough, we do it anyway -- it may indicate a memory
2166 range which is reserved in E820, so which didn't get set
2167 up to start with in si_domain */
2168 if (domain == si_domain && hw_pass_through) {
2169 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2170 pci_name(pdev), start, end);
2171 return 0;
2172 }
2173
2174 printk(KERN_INFO
2175 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2176 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002177
David Woodhouse5595b522009-12-02 09:21:55 +00002178 if (end < start) {
2179 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2181 dmi_get_system_info(DMI_BIOS_VENDOR),
2182 dmi_get_system_info(DMI_BIOS_VERSION),
2183 dmi_get_system_info(DMI_PRODUCT_VERSION));
2184 ret = -EIO;
2185 goto error;
2186 }
2187
David Woodhouse2ff729f2009-08-26 14:25:41 +01002188 if (end >> agaw_to_width(domain->agaw)) {
2189 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2190 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2191 agaw_to_width(domain->agaw),
2192 dmi_get_system_info(DMI_BIOS_VENDOR),
2193 dmi_get_system_info(DMI_BIOS_VERSION),
2194 dmi_get_system_info(DMI_PRODUCT_VERSION));
2195 ret = -EIO;
2196 goto error;
2197 }
David Woodhouse19943b02009-08-04 16:19:20 +01002198
David Woodhouseb2132032009-06-26 18:50:28 +01002199 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002200 if (ret)
2201 goto error;
2202
2203 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002204 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002205 if (ret)
2206 goto error;
2207
2208 return 0;
2209
2210 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002211 domain_exit(domain);
2212 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002213}
2214
2215static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2216 struct pci_dev *pdev)
2217{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002218 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002219 return 0;
2220 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002221 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002222}
2223
Suresh Siddhad3f13812011-08-23 17:05:25 -07002224#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002225static inline void iommu_prepare_isa(void)
2226{
2227 struct pci_dev *pdev;
2228 int ret;
2229
2230 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2231 if (!pdev)
2232 return;
2233
David Woodhousec7ab48d2009-06-26 19:10:36 +01002234 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002235 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002236
2237 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002238 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2239 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002240
2241}
2242#else
2243static inline void iommu_prepare_isa(void)
2244{
2245 return;
2246}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002247#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002248
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002249static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002250
Matt Kraai071e1372009-08-23 22:30:22 -07002251static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002252{
2253 struct dmar_drhd_unit *drhd;
2254 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002255 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002256
Jiang Liu92d03cc2014-02-19 14:07:28 +08002257 si_domain = alloc_domain(false);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002258 if (!si_domain)
2259 return -EFAULT;
2260
Jiang Liu92d03cc2014-02-19 14:07:28 +08002261 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2262
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002263 for_each_active_iommu(iommu, drhd) {
2264 ret = iommu_attach_domain(si_domain, iommu);
2265 if (ret) {
2266 domain_exit(si_domain);
2267 return -EFAULT;
2268 }
2269 }
2270
2271 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2272 domain_exit(si_domain);
2273 return -EFAULT;
2274 }
2275
Jiang Liu9544c002014-01-06 14:18:13 +08002276 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2277 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002278
David Woodhouse19943b02009-08-04 16:19:20 +01002279 if (hw)
2280 return 0;
2281
David Woodhousec7ab48d2009-06-26 19:10:36 +01002282 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002283 unsigned long start_pfn, end_pfn;
2284 int i;
2285
2286 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2287 ret = iommu_domain_identity_map(si_domain,
2288 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2289 if (ret)
2290 return ret;
2291 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002292 }
2293
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002294 return 0;
2295}
2296
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002297static int identity_mapping(struct pci_dev *pdev)
2298{
2299 struct device_domain_info *info;
2300
2301 if (likely(!iommu_identity_mapping))
2302 return 0;
2303
Mike Traviscb452a42011-05-28 13:15:03 -05002304 info = pdev->dev.archdata.iommu;
2305 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2306 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002307
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002308 return 0;
2309}
2310
2311static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002312 struct pci_dev *pdev,
2313 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002314{
2315 struct device_domain_info *info;
2316 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002317 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002318
2319 info = alloc_devinfo_mem();
2320 if (!info)
2321 return -ENOMEM;
2322
2323 info->segment = pci_domain_nr(pdev->bus);
2324 info->bus = pdev->bus->number;
2325 info->devfn = pdev->devfn;
2326 info->dev = pdev;
2327 info->domain = domain;
2328
2329 spin_lock_irqsave(&device_domain_lock, flags);
2330 list_add(&info->link, &domain->devices);
2331 list_add(&info->global, &device_domain_list);
2332 pdev->dev.archdata.iommu = info;
2333 spin_unlock_irqrestore(&device_domain_lock, flags);
2334
David Woodhousee2ad23d2012-05-25 17:42:54 +01002335 ret = domain_context_mapping(domain, pdev, translation);
2336 if (ret) {
2337 spin_lock_irqsave(&device_domain_lock, flags);
David Woodhouse109b9b02012-05-25 17:43:02 +01002338 unlink_domain_info(info);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002339 spin_unlock_irqrestore(&device_domain_lock, flags);
2340 free_devinfo_mem(info);
2341 return ret;
2342 }
2343
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002344 return 0;
2345}
2346
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002347static bool device_has_rmrr(struct pci_dev *dev)
2348{
2349 struct dmar_rmrr_unit *rmrr;
Jiang Liub683b232014-02-19 14:07:32 +08002350 struct pci_dev *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002351 int i;
2352
Jiang Liu0e242612014-02-19 14:07:34 +08002353 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002354 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002355 /*
2356 * Return TRUE if this RMRR contains the device that
2357 * is passed in.
2358 */
2359 for_each_active_dev_scope(rmrr->devices,
2360 rmrr->devices_cnt, i, tmp)
2361 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002362 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002363 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002364 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002365 }
Jiang Liu0e242612014-02-19 14:07:34 +08002366 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002367 return false;
2368}
2369
David Woodhouse6941af22009-07-04 18:24:27 +01002370static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2371{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002372
2373 /*
2374 * We want to prevent any device associated with an RMRR from
2375 * getting placed into the SI Domain. This is done because
2376 * problems exist when devices are moved in and out of domains
2377 * and their respective RMRR info is lost. We exempt USB devices
2378 * from this process due to their usage of RMRRs that are known
2379 * to not be needed after BIOS hand-off to OS.
2380 */
2381 if (device_has_rmrr(pdev) &&
2382 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2383 return 0;
2384
David Woodhousee0fc7e02009-09-30 09:12:17 -07002385 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2386 return 1;
2387
2388 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2389 return 1;
2390
2391 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2392 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002393
David Woodhouse3dfc8132009-07-04 19:11:08 +01002394 /*
2395 * We want to start off with all devices in the 1:1 domain, and
2396 * take them out later if we find they can't access all of memory.
2397 *
2398 * However, we can't do this for PCI devices behind bridges,
2399 * because all PCI devices behind the same bridge will end up
2400 * with the same source-id on their transactions.
2401 *
2402 * Practically speaking, we can't change things around for these
2403 * devices at run-time, because we can't be sure there'll be no
2404 * DMA transactions in flight for any of their siblings.
2405 *
2406 * So PCI devices (unless they're on the root bus) as well as
2407 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2408 * the 1:1 domain, just in _case_ one of their siblings turns out
2409 * not to be able to map all of memory.
2410 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002411 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002412 if (!pci_is_root_bus(pdev->bus))
2413 return 0;
2414 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2415 return 0;
Yijing Wang62f87c02012-07-24 17:20:03 +08002416 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
David Woodhouse3dfc8132009-07-04 19:11:08 +01002417 return 0;
2418
2419 /*
2420 * At boot time, we don't yet know if devices will be 64-bit capable.
2421 * Assume that they will -- if they turn out not to be, then we can
2422 * take them out of the 1:1 domain later.
2423 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002424 if (!startup) {
2425 /*
2426 * If the device's dma_mask is less than the system's memory
2427 * size then this is not a candidate for identity mapping.
2428 */
2429 u64 dma_mask = pdev->dma_mask;
2430
2431 if (pdev->dev.coherent_dma_mask &&
2432 pdev->dev.coherent_dma_mask < dma_mask)
2433 dma_mask = pdev->dev.coherent_dma_mask;
2434
2435 return dma_mask >= dma_get_required_mask(&pdev->dev);
2436 }
David Woodhouse6941af22009-07-04 18:24:27 +01002437
2438 return 1;
2439}
2440
Matt Kraai071e1372009-08-23 22:30:22 -07002441static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002442{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002443 struct pci_dev *pdev = NULL;
2444 int ret;
2445
David Woodhouse19943b02009-08-04 16:19:20 +01002446 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002447 if (ret)
2448 return -EFAULT;
2449
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002450 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002451 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002452 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002453 hw ? CONTEXT_TT_PASS_THROUGH :
2454 CONTEXT_TT_MULTI_LEVEL);
2455 if (ret) {
2456 /* device not associated with an iommu */
2457 if (ret == -ENODEV)
2458 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002459 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002460 }
2461 pr_info("IOMMU: %s identity mapping for device %s\n",
2462 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002463 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002464 }
2465
2466 return 0;
2467}
2468
Joseph Cihulab7792602011-05-03 00:08:37 -07002469static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002470{
2471 struct dmar_drhd_unit *drhd;
2472 struct dmar_rmrr_unit *rmrr;
2473 struct pci_dev *pdev;
2474 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002475 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002476
2477 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002478 * for each drhd
2479 * allocate root
2480 * initialize and program root entry to not present
2481 * endfor
2482 */
2483 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002484 /*
2485 * lock not needed as this is only incremented in the single
2486 * threaded kernel __init code path all other access are read
2487 * only
2488 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002489 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2490 g_num_of_iommus++;
2491 continue;
2492 }
2493 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2494 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002495 }
2496
Weidong Hand9630fe2008-12-08 11:06:32 +08002497 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2498 GFP_KERNEL);
2499 if (!g_iommus) {
2500 printk(KERN_ERR "Allocating global iommu array failed\n");
2501 ret = -ENOMEM;
2502 goto error;
2503 }
2504
mark gross80b20dd2008-04-18 13:53:58 -07002505 deferred_flush = kzalloc(g_num_of_iommus *
2506 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2507 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002508 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002509 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002510 }
2511
Jiang Liu7c919772014-01-06 14:18:18 +08002512 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002513 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002514
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002515 ret = iommu_init_domains(iommu);
2516 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002517 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002518
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002519 /*
2520 * TBD:
2521 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002522 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002523 */
2524 ret = iommu_alloc_root_entry(iommu);
2525 if (ret) {
2526 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002527 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002528 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002529 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002530 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002531 }
2532
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002533 /*
2534 * Start from the sane iommu hardware state.
2535 */
Jiang Liu7c919772014-01-06 14:18:18 +08002536 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002537 /*
2538 * If the queued invalidation is already initialized by us
2539 * (for example, while enabling interrupt-remapping) then
2540 * we got the things already rolling from a sane state.
2541 */
2542 if (iommu->qi)
2543 continue;
2544
2545 /*
2546 * Clear any previous faults.
2547 */
2548 dmar_fault(-1, iommu);
2549 /*
2550 * Disable queued invalidation if supported and already enabled
2551 * before OS handover.
2552 */
2553 dmar_disable_qi(iommu);
2554 }
2555
Jiang Liu7c919772014-01-06 14:18:18 +08002556 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002557 if (dmar_enable_qi(iommu)) {
2558 /*
2559 * Queued Invalidate not enabled, use Register Based
2560 * Invalidate
2561 */
2562 iommu->flush.flush_context = __iommu_flush_context;
2563 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002564 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002565 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002566 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002567 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002568 } else {
2569 iommu->flush.flush_context = qi_flush_context;
2570 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002571 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002572 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002573 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002574 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002575 }
2576 }
2577
David Woodhouse19943b02009-08-04 16:19:20 +01002578 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002579 iommu_identity_mapping |= IDENTMAP_ALL;
2580
Suresh Siddhad3f13812011-08-23 17:05:25 -07002581#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002582 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002583#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002584
2585 check_tylersburg_isoch();
2586
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002587 /*
2588 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002589 * identity mappings for rmrr, gfx, and isa and may fall back to static
2590 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002591 */
David Woodhouse19943b02009-08-04 16:19:20 +01002592 if (iommu_identity_mapping) {
2593 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2594 if (ret) {
2595 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002596 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002597 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002598 }
David Woodhouse19943b02009-08-04 16:19:20 +01002599 /*
2600 * For each rmrr
2601 * for each dev attached to rmrr
2602 * do
2603 * locate drhd for dev, alloc domain for dev
2604 * allocate free domain
2605 * allocate page table entries for rmrr
2606 * if context not allocated for bus
2607 * allocate and init context
2608 * set present in root table for this bus
2609 * init context with domain, translation etc
2610 * endfor
2611 * endfor
2612 */
2613 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2614 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002615 /* some BIOS lists non-exist devices in DMAR table. */
2616 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2617 i, pdev) {
David Woodhouse19943b02009-08-04 16:19:20 +01002618 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2619 if (ret)
2620 printk(KERN_ERR
2621 "IOMMU: mapping reserved region failed\n");
2622 }
2623 }
2624
2625 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002626
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002627 /*
2628 * for each drhd
2629 * enable fault log
2630 * global invalidate context cache
2631 * global invalidate iotlb
2632 * enable translation
2633 */
Jiang Liu7c919772014-01-06 14:18:18 +08002634 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002635 if (drhd->ignored) {
2636 /*
2637 * we always have to disable PMRs or DMA may fail on
2638 * this device
2639 */
2640 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002641 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002643 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644
2645 iommu_flush_write_buffer(iommu);
2646
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002647 ret = dmar_set_interrupt(iommu);
2648 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002649 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002650
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002651 iommu_set_root_entry(iommu);
2652
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002653 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002654 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002655
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656 ret = iommu_enable_translation(iommu);
2657 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002658 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002659
2660 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002661 }
2662
2663 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002664
2665free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002666 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002667 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002668 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002669free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002670 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002671error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002672 return ret;
2673}
2674
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002675/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002676static struct iova *intel_alloc_iova(struct device *dev,
2677 struct dmar_domain *domain,
2678 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002679{
2680 struct pci_dev *pdev = to_pci_dev(dev);
2681 struct iova *iova = NULL;
2682
David Woodhouse875764d2009-06-28 21:20:51 +01002683 /* Restrict dma_mask to the width that the iommu can handle */
2684 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2685
2686 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002687 /*
2688 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002689 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002690 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002691 */
David Woodhouse875764d2009-06-28 21:20:51 +01002692 iova = alloc_iova(&domain->iovad, nrpages,
2693 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2694 if (iova)
2695 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002696 }
David Woodhouse875764d2009-06-28 21:20:51 +01002697 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2698 if (unlikely(!iova)) {
2699 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2700 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002701 return NULL;
2702 }
2703
2704 return iova;
2705}
2706
David Woodhouse147202a2009-07-07 19:43:20 +01002707static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708{
2709 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002710 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002711
2712 domain = get_domain_for_dev(pdev,
2713 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2714 if (!domain) {
2715 printk(KERN_ERR
2716 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002717 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718 }
2719
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002720 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002721 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002722 ret = domain_context_mapping(domain, pdev,
2723 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002724 if (ret) {
2725 printk(KERN_ERR
2726 "Domain context map for %s failed",
2727 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002728 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002729 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002730 }
2731
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002732 return domain;
2733}
2734
David Woodhouse147202a2009-07-07 19:43:20 +01002735static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2736{
2737 struct device_domain_info *info;
2738
2739 /* No lock here, assumes no domain exit in normal case */
2740 info = dev->dev.archdata.iommu;
2741 if (likely(info))
2742 return info->domain;
2743
2744 return __get_valid_domain_for_dev(dev);
2745}
2746
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002747static int iommu_dummy(struct pci_dev *pdev)
2748{
2749 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2750}
2751
2752/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002753static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002754{
David Woodhouse73676832009-07-04 14:08:36 +01002755 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002756 int found;
2757
Yijing Wangdbad0862013-12-05 19:43:42 +08002758 if (unlikely(!dev_is_pci(dev)))
David Woodhouse73676832009-07-04 14:08:36 +01002759 return 1;
2760
2761 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002762 if (iommu_dummy(pdev))
2763 return 1;
2764
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002765 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002766 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002767
2768 found = identity_mapping(pdev);
2769 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002770 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002771 return 1;
2772 else {
2773 /*
2774 * 32 bit DMA is removed from si_domain and fall back
2775 * to non-identity mapping.
2776 */
2777 domain_remove_one_dev_info(si_domain, pdev);
2778 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2779 pci_name(pdev));
2780 return 0;
2781 }
2782 } else {
2783 /*
2784 * In case of a detached 64 bit DMA device from vm, the device
2785 * is put into si_domain for identity mapping.
2786 */
David Woodhouse6941af22009-07-04 18:24:27 +01002787 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002788 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002789 ret = domain_add_dev_info(si_domain, pdev,
2790 hw_pass_through ?
2791 CONTEXT_TT_PASS_THROUGH :
2792 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002793 if (!ret) {
2794 printk(KERN_INFO "64bit %s uses identity mapping\n",
2795 pci_name(pdev));
2796 return 1;
2797 }
2798 }
2799 }
2800
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002801 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002802}
2803
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002804static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2805 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002806{
2807 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002809 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002810 struct iova *iova;
2811 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002812 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002813 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002814 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002815
2816 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002817
David Woodhouse73676832009-07-04 14:08:36 +01002818 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002819 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002820
2821 domain = get_valid_domain_for_dev(pdev);
2822 if (!domain)
2823 return 0;
2824
Weidong Han8c11e792008-12-08 15:29:22 +08002825 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002826 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002827
Mike Travisc681d0b2011-05-28 13:15:05 -05002828 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002829 if (!iova)
2830 goto error;
2831
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002832 /*
2833 * Check if DMAR supports zero-length reads on write only
2834 * mappings..
2835 */
2836 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002837 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002838 prot |= DMA_PTE_READ;
2839 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2840 prot |= DMA_PTE_WRITE;
2841 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002842 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002843 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002844 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002845 * is not a big problem
2846 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002847 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002848 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002849 if (ret)
2850 goto error;
2851
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002852 /* it's a non-present to present mapping. Only flush if caching mode */
2853 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002854 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002855 else
Weidong Han8c11e792008-12-08 15:29:22 +08002856 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002857
David Woodhouse03d6a242009-06-28 15:33:46 +01002858 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2859 start_paddr += paddr & ~PAGE_MASK;
2860 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002861
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002862error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002863 if (iova)
2864 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002865 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002866 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002867 return 0;
2868}
2869
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002870static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2871 unsigned long offset, size_t size,
2872 enum dma_data_direction dir,
2873 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002874{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002875 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2876 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002877}
2878
mark gross5e0d2a62008-03-04 15:22:08 -08002879static void flush_unmaps(void)
2880{
mark gross80b20dd2008-04-18 13:53:58 -07002881 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002882
mark gross5e0d2a62008-03-04 15:22:08 -08002883 timer_on = 0;
2884
2885 /* just flush them all */
2886 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002887 struct intel_iommu *iommu = g_iommus[i];
2888 if (!iommu)
2889 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002890
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002891 if (!deferred_flush[i].next)
2892 continue;
2893
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002894 /* In caching mode, global flushes turn emulation expensive */
2895 if (!cap_caching_mode(iommu->cap))
2896 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002897 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002898 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002899 unsigned long mask;
2900 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002901 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002902
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002903 /* On real hardware multiple invalidations are expensive */
2904 if (cap_caching_mode(iommu->cap))
2905 iommu_flush_iotlb_psi(iommu, domain->id,
2906 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2907 else {
2908 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2909 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2910 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2911 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002912 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002913 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002914 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002915 }
2916
mark gross5e0d2a62008-03-04 15:22:08 -08002917 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002918}
2919
2920static void flush_unmaps_timeout(unsigned long data)
2921{
mark gross80b20dd2008-04-18 13:53:58 -07002922 unsigned long flags;
2923
2924 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002925 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002926 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002927}
2928
2929static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2930{
2931 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002932 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002933 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002934
2935 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002936 if (list_size == HIGH_WATER_MARK)
2937 flush_unmaps();
2938
Weidong Han8c11e792008-12-08 15:29:22 +08002939 iommu = domain_get_iommu(dom);
2940 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002941
mark gross80b20dd2008-04-18 13:53:58 -07002942 next = deferred_flush[iommu_id].next;
2943 deferred_flush[iommu_id].domain[next] = dom;
2944 deferred_flush[iommu_id].iova[next] = iova;
2945 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002946
2947 if (!timer_on) {
2948 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2949 timer_on = 1;
2950 }
2951 list_size++;
2952 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2953}
2954
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002955static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2956 size_t size, enum dma_data_direction dir,
2957 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002958{
2959 struct pci_dev *pdev = to_pci_dev(dev);
2960 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002961 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002962 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002963 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964
David Woodhouse73676832009-07-04 14:08:36 +01002965 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002966 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002968 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002969 BUG_ON(!domain);
2970
Weidong Han8c11e792008-12-08 15:29:22 +08002971 iommu = domain_get_iommu(domain);
2972
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002973 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002974 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2975 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002976 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002977
David Woodhoused794dc92009-06-28 00:27:49 +01002978 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2979 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002980
David Woodhoused794dc92009-06-28 00:27:49 +01002981 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2982 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002983
2984 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002985 dma_pte_clear_range(domain, start_pfn, last_pfn);
2986
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002987 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002988 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2989
mark gross5e0d2a62008-03-04 15:22:08 -08002990 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002991 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002992 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002993 /* free iova */
2994 __free_iova(&domain->iovad, iova);
2995 } else {
2996 add_unmap(domain, iova);
2997 /*
2998 * queue up the release of the unmap to save the 1/6th of the
2999 * cpu used up by the iotlb flush operation...
3000 */
mark gross5e0d2a62008-03-04 15:22:08 -08003001 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003002}
3003
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003004static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003005 dma_addr_t *dma_handle, gfp_t flags,
3006 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003007{
3008 void *vaddr;
3009 int order;
3010
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003011 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003012 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003013
3014 if (!iommu_no_mapping(hwdev))
3015 flags &= ~(GFP_DMA | GFP_DMA32);
3016 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3017 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3018 flags |= GFP_DMA;
3019 else
3020 flags |= GFP_DMA32;
3021 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003022
3023 vaddr = (void *)__get_free_pages(flags, order);
3024 if (!vaddr)
3025 return NULL;
3026 memset(vaddr, 0, size);
3027
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003028 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3029 DMA_BIDIRECTIONAL,
3030 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003031 if (*dma_handle)
3032 return vaddr;
3033 free_pages((unsigned long)vaddr, order);
3034 return NULL;
3035}
3036
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003037static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003038 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003039{
3040 int order;
3041
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003042 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003043 order = get_order(size);
3044
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003045 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003046 free_pages((unsigned long)vaddr, order);
3047}
3048
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003049static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3050 int nelems, enum dma_data_direction dir,
3051 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003052{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053 struct pci_dev *pdev = to_pci_dev(hwdev);
3054 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003055 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003056 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003057 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003058
David Woodhouse73676832009-07-04 14:08:36 +01003059 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003060 return;
3061
3062 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003063 BUG_ON(!domain);
3064
3065 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003066
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003067 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003068 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3069 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003070 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003071
David Woodhoused794dc92009-06-28 00:27:49 +01003072 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3073 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003074
3075 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003076 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077
David Woodhoused794dc92009-06-28 00:27:49 +01003078 /* free page tables */
3079 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3080
David Woodhouseacea0012009-07-14 01:55:11 +01003081 if (intel_iommu_strict) {
3082 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003083 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003084 /* free iova */
3085 __free_iova(&domain->iovad, iova);
3086 } else {
3087 add_unmap(domain, iova);
3088 /*
3089 * queue up the release of the unmap to save the 1/6th of the
3090 * cpu used up by the iotlb flush operation...
3091 */
3092 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003093}
3094
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003095static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003096 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003097{
3098 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003099 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003100
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003101 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003102 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003103 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003104 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003105 }
3106 return nelems;
3107}
3108
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003109static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3110 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003111{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003112 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003113 struct pci_dev *pdev = to_pci_dev(hwdev);
3114 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003115 size_t size = 0;
3116 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003117 struct iova *iova = NULL;
3118 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003119 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003120 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003121 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003122
3123 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003124 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003125 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003126
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003127 domain = get_valid_domain_for_dev(pdev);
3128 if (!domain)
3129 return 0;
3130
Weidong Han8c11e792008-12-08 15:29:22 +08003131 iommu = domain_get_iommu(domain);
3132
David Woodhouseb536d242009-06-28 14:49:31 +01003133 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003134 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003135
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003136 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3137 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003138 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003139 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003140 return 0;
3141 }
3142
3143 /*
3144 * Check if DMAR supports zero-length reads on write only
3145 * mappings..
3146 */
3147 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003148 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003149 prot |= DMA_PTE_READ;
3150 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3151 prot |= DMA_PTE_WRITE;
3152
David Woodhouseb536d242009-06-28 14:49:31 +01003153 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003154
Fenghua Yuf5329592009-08-04 15:09:37 -07003155 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003156 if (unlikely(ret)) {
3157 /* clear the page */
3158 dma_pte_clear_range(domain, start_vpfn,
3159 start_vpfn + size - 1);
3160 /* free page tables */
3161 dma_pte_free_pagetable(domain, start_vpfn,
3162 start_vpfn + size - 1);
3163 /* free iova */
3164 __free_iova(&domain->iovad, iova);
3165 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003166 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003167
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003168 /* it's a non-present to present mapping. Only flush if caching mode */
3169 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003170 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003171 else
Weidong Han8c11e792008-12-08 15:29:22 +08003172 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003173
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003174 return nelems;
3175}
3176
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003177static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3178{
3179 return !dma_addr;
3180}
3181
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003182struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003183 .alloc = intel_alloc_coherent,
3184 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185 .map_sg = intel_map_sg,
3186 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003187 .map_page = intel_map_page,
3188 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003189 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190};
3191
3192static inline int iommu_domain_cache_init(void)
3193{
3194 int ret = 0;
3195
3196 iommu_domain_cache = kmem_cache_create("iommu_domain",
3197 sizeof(struct dmar_domain),
3198 0,
3199 SLAB_HWCACHE_ALIGN,
3200
3201 NULL);
3202 if (!iommu_domain_cache) {
3203 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3204 ret = -ENOMEM;
3205 }
3206
3207 return ret;
3208}
3209
3210static inline int iommu_devinfo_cache_init(void)
3211{
3212 int ret = 0;
3213
3214 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3215 sizeof(struct device_domain_info),
3216 0,
3217 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003218 NULL);
3219 if (!iommu_devinfo_cache) {
3220 printk(KERN_ERR "Couldn't create devinfo cache\n");
3221 ret = -ENOMEM;
3222 }
3223
3224 return ret;
3225}
3226
3227static inline int iommu_iova_cache_init(void)
3228{
3229 int ret = 0;
3230
3231 iommu_iova_cache = kmem_cache_create("iommu_iova",
3232 sizeof(struct iova),
3233 0,
3234 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003235 NULL);
3236 if (!iommu_iova_cache) {
3237 printk(KERN_ERR "Couldn't create iova cache\n");
3238 ret = -ENOMEM;
3239 }
3240
3241 return ret;
3242}
3243
3244static int __init iommu_init_mempool(void)
3245{
3246 int ret;
3247 ret = iommu_iova_cache_init();
3248 if (ret)
3249 return ret;
3250
3251 ret = iommu_domain_cache_init();
3252 if (ret)
3253 goto domain_error;
3254
3255 ret = iommu_devinfo_cache_init();
3256 if (!ret)
3257 return ret;
3258
3259 kmem_cache_destroy(iommu_domain_cache);
3260domain_error:
3261 kmem_cache_destroy(iommu_iova_cache);
3262
3263 return -ENOMEM;
3264}
3265
3266static void __init iommu_exit_mempool(void)
3267{
3268 kmem_cache_destroy(iommu_devinfo_cache);
3269 kmem_cache_destroy(iommu_domain_cache);
3270 kmem_cache_destroy(iommu_iova_cache);
3271
3272}
3273
Dan Williams556ab452010-07-23 15:47:56 -07003274static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3275{
3276 struct dmar_drhd_unit *drhd;
3277 u32 vtbar;
3278 int rc;
3279
3280 /* We know that this device on this chipset has its own IOMMU.
3281 * If we find it under a different IOMMU, then the BIOS is lying
3282 * to us. Hope that the IOMMU for this device is actually
3283 * disabled, and it needs no translation...
3284 */
3285 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3286 if (rc) {
3287 /* "can't" happen */
3288 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3289 return;
3290 }
3291 vtbar &= 0xffff0000;
3292
3293 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3294 drhd = dmar_find_matched_drhd_unit(pdev);
3295 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3296 TAINT_FIRMWARE_WORKAROUND,
3297 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3298 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3299}
3300DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3301
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302static void __init init_no_remapping_devices(void)
3303{
3304 struct dmar_drhd_unit *drhd;
Jiang Liub683b232014-02-19 14:07:32 +08003305 struct pci_dev *dev;
3306 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003307
3308 for_each_drhd_unit(drhd) {
3309 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003310 for_each_active_dev_scope(drhd->devices,
3311 drhd->devices_cnt, i, dev)
3312 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003313 /* ignore DMAR unit if no pci devices exist */
3314 if (i == drhd->devices_cnt)
3315 drhd->ignored = 1;
3316 }
3317 }
3318
Jiang Liu7c919772014-01-06 14:18:18 +08003319 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003320 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003321 continue;
3322
Jiang Liub683b232014-02-19 14:07:32 +08003323 for_each_active_dev_scope(drhd->devices,
3324 drhd->devices_cnt, i, dev)
3325 if (!IS_GFX_DEVICE(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003326 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003327 if (i < drhd->devices_cnt)
3328 continue;
3329
David Woodhousec0771df2011-10-14 20:59:46 +01003330 /* This IOMMU has *only* gfx devices. Either bypass it or
3331 set the gfx_mapped flag, as appropriate */
3332 if (dmar_map_gfx) {
3333 intel_iommu_gfx_mapped = 1;
3334 } else {
3335 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003336 for_each_active_dev_scope(drhd->devices,
3337 drhd->devices_cnt, i, dev)
3338 dev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003339 }
3340 }
3341}
3342
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003343#ifdef CONFIG_SUSPEND
3344static int init_iommu_hw(void)
3345{
3346 struct dmar_drhd_unit *drhd;
3347 struct intel_iommu *iommu = NULL;
3348
3349 for_each_active_iommu(iommu, drhd)
3350 if (iommu->qi)
3351 dmar_reenable_qi(iommu);
3352
Joseph Cihulab7792602011-05-03 00:08:37 -07003353 for_each_iommu(iommu, drhd) {
3354 if (drhd->ignored) {
3355 /*
3356 * we always have to disable PMRs or DMA may fail on
3357 * this device
3358 */
3359 if (force_on)
3360 iommu_disable_protect_mem_regions(iommu);
3361 continue;
3362 }
3363
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003364 iommu_flush_write_buffer(iommu);
3365
3366 iommu_set_root_entry(iommu);
3367
3368 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003369 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003370 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003371 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003372 if (iommu_enable_translation(iommu))
3373 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003374 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003375 }
3376
3377 return 0;
3378}
3379
3380static void iommu_flush_all(void)
3381{
3382 struct dmar_drhd_unit *drhd;
3383 struct intel_iommu *iommu;
3384
3385 for_each_active_iommu(iommu, drhd) {
3386 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003387 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003388 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003389 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003390 }
3391}
3392
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003393static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003394{
3395 struct dmar_drhd_unit *drhd;
3396 struct intel_iommu *iommu = NULL;
3397 unsigned long flag;
3398
3399 for_each_active_iommu(iommu, drhd) {
3400 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3401 GFP_ATOMIC);
3402 if (!iommu->iommu_state)
3403 goto nomem;
3404 }
3405
3406 iommu_flush_all();
3407
3408 for_each_active_iommu(iommu, drhd) {
3409 iommu_disable_translation(iommu);
3410
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003411 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003412
3413 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3414 readl(iommu->reg + DMAR_FECTL_REG);
3415 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3416 readl(iommu->reg + DMAR_FEDATA_REG);
3417 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3418 readl(iommu->reg + DMAR_FEADDR_REG);
3419 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3420 readl(iommu->reg + DMAR_FEUADDR_REG);
3421
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003422 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003423 }
3424 return 0;
3425
3426nomem:
3427 for_each_active_iommu(iommu, drhd)
3428 kfree(iommu->iommu_state);
3429
3430 return -ENOMEM;
3431}
3432
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003433static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003434{
3435 struct dmar_drhd_unit *drhd;
3436 struct intel_iommu *iommu = NULL;
3437 unsigned long flag;
3438
3439 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003440 if (force_on)
3441 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3442 else
3443 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003444 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003445 }
3446
3447 for_each_active_iommu(iommu, drhd) {
3448
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003449 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003450
3451 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3452 iommu->reg + DMAR_FECTL_REG);
3453 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3454 iommu->reg + DMAR_FEDATA_REG);
3455 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3456 iommu->reg + DMAR_FEADDR_REG);
3457 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3458 iommu->reg + DMAR_FEUADDR_REG);
3459
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003460 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003461 }
3462
3463 for_each_active_iommu(iommu, drhd)
3464 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003465}
3466
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003467static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003468 .resume = iommu_resume,
3469 .suspend = iommu_suspend,
3470};
3471
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003472static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003473{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003474 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003475}
3476
3477#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003478static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003479#endif /* CONFIG_PM */
3480
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003481
3482int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3483{
3484 struct acpi_dmar_reserved_memory *rmrr;
3485 struct dmar_rmrr_unit *rmrru;
3486
3487 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3488 if (!rmrru)
3489 return -ENOMEM;
3490
3491 rmrru->hdr = header;
3492 rmrr = (struct acpi_dmar_reserved_memory *)header;
3493 rmrru->base_address = rmrr->base_address;
3494 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003495 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3496 ((void *)rmrr) + rmrr->header.length,
3497 &rmrru->devices_cnt);
3498 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3499 kfree(rmrru);
3500 return -ENOMEM;
3501 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003502
Jiang Liu2e455282014-02-19 14:07:36 +08003503 list_add(&rmrru->list, &dmar_rmrr_units);
3504
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003505 return 0;
3506}
3507
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003508int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3509{
3510 struct acpi_dmar_atsr *atsr;
3511 struct dmar_atsr_unit *atsru;
3512
3513 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3514 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3515 if (!atsru)
3516 return -ENOMEM;
3517
3518 atsru->hdr = hdr;
3519 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003520 if (!atsru->include_all) {
3521 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3522 (void *)atsr + atsr->header.length,
3523 &atsru->devices_cnt);
3524 if (atsru->devices_cnt && atsru->devices == NULL) {
3525 kfree(atsru);
3526 return -ENOMEM;
3527 }
3528 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003529
Jiang Liu0e242612014-02-19 14:07:34 +08003530 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003531
3532 return 0;
3533}
3534
Jiang Liu9bdc5312014-01-06 14:18:27 +08003535static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3536{
3537 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3538 kfree(atsru);
3539}
3540
3541static void intel_iommu_free_dmars(void)
3542{
3543 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3544 struct dmar_atsr_unit *atsru, *atsr_n;
3545
3546 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3547 list_del(&rmrru->list);
3548 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3549 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003550 }
3551
Jiang Liu9bdc5312014-01-06 14:18:27 +08003552 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3553 list_del(&atsru->list);
3554 intel_iommu_free_atsr(atsru);
3555 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003556}
3557
3558int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3559{
Jiang Liub683b232014-02-19 14:07:32 +08003560 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003561 struct pci_bus *bus;
Jiang Liub683b232014-02-19 14:07:32 +08003562 struct pci_dev *bridge = NULL, *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003563 struct acpi_dmar_atsr *atsr;
3564 struct dmar_atsr_unit *atsru;
3565
3566 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003567 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003568 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003569 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003570 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003571 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003572 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003573 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003574 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003575 if (!bridge)
3576 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003577
Jiang Liu0e242612014-02-19 14:07:34 +08003578 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003579 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3580 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3581 if (atsr->segment != pci_domain_nr(dev->bus))
3582 continue;
3583
Jiang Liub683b232014-02-19 14:07:32 +08003584 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3585 if (tmp == bridge)
3586 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003587
3588 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003589 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003590 }
Jiang Liub683b232014-02-19 14:07:32 +08003591 ret = 0;
3592out:
Jiang Liu0e242612014-02-19 14:07:34 +08003593 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003594
Jiang Liub683b232014-02-19 14:07:32 +08003595 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003596}
3597
Jiang Liu59ce0512014-02-19 14:07:35 +08003598int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3599{
3600 int ret = 0;
3601 struct dmar_rmrr_unit *rmrru;
3602 struct dmar_atsr_unit *atsru;
3603 struct acpi_dmar_atsr *atsr;
3604 struct acpi_dmar_reserved_memory *rmrr;
3605
3606 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3607 return 0;
3608
3609 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3610 rmrr = container_of(rmrru->hdr,
3611 struct acpi_dmar_reserved_memory, header);
3612 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3613 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3614 ((void *)rmrr) + rmrr->header.length,
3615 rmrr->segment, rmrru->devices,
3616 rmrru->devices_cnt);
3617 if (ret > 0)
3618 break;
3619 else if(ret < 0)
3620 return ret;
3621 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3622 if (dmar_remove_dev_scope(info, rmrr->segment,
3623 rmrru->devices, rmrru->devices_cnt))
3624 break;
3625 }
3626 }
3627
3628 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3629 if (atsru->include_all)
3630 continue;
3631
3632 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3633 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3634 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3635 (void *)atsr + atsr->header.length,
3636 atsr->segment, atsru->devices,
3637 atsru->devices_cnt);
3638 if (ret > 0)
3639 break;
3640 else if(ret < 0)
3641 return ret;
3642 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3643 if (dmar_remove_dev_scope(info, atsr->segment,
3644 atsru->devices, atsru->devices_cnt))
3645 break;
3646 }
3647 }
3648
3649 return 0;
3650}
3651
Fenghua Yu99dcade2009-11-11 07:23:06 -08003652/*
3653 * Here we only respond to action of unbound device from driver.
3654 *
3655 * Added device is not attached to its DMAR domain here yet. That will happen
3656 * when mapping the device to iova.
3657 */
3658static int device_notifier(struct notifier_block *nb,
3659 unsigned long action, void *data)
3660{
3661 struct device *dev = data;
3662 struct pci_dev *pdev = to_pci_dev(dev);
3663 struct dmar_domain *domain;
3664
Jiang Liu816997d2014-02-19 14:07:22 +08003665 if (iommu_dummy(pdev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003666 return 0;
3667
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003668 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3669 action != BUS_NOTIFY_DEL_DEVICE)
3670 return 0;
3671
Fenghua Yu99dcade2009-11-11 07:23:06 -08003672 domain = find_domain(pdev);
3673 if (!domain)
3674 return 0;
3675
Jiang Liu3a5670e2014-02-19 14:07:33 +08003676 down_read(&dmar_global_lock);
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003677 domain_remove_one_dev_info(domain, pdev);
3678 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3679 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3680 list_empty(&domain->devices))
3681 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003682 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003683
Fenghua Yu99dcade2009-11-11 07:23:06 -08003684 return 0;
3685}
3686
3687static struct notifier_block device_nb = {
3688 .notifier_call = device_notifier,
3689};
3690
Jiang Liu75f05562014-02-19 14:07:37 +08003691static int intel_iommu_memory_notifier(struct notifier_block *nb,
3692 unsigned long val, void *v)
3693{
3694 struct memory_notify *mhp = v;
3695 unsigned long long start, end;
3696 unsigned long start_vpfn, last_vpfn;
3697
3698 switch (val) {
3699 case MEM_GOING_ONLINE:
3700 start = mhp->start_pfn << PAGE_SHIFT;
3701 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3702 if (iommu_domain_identity_map(si_domain, start, end)) {
3703 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3704 start, end);
3705 return NOTIFY_BAD;
3706 }
3707 break;
3708
3709 case MEM_OFFLINE:
3710 case MEM_CANCEL_ONLINE:
3711 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3712 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3713 while (start_vpfn <= last_vpfn) {
3714 struct iova *iova;
3715 struct dmar_drhd_unit *drhd;
3716 struct intel_iommu *iommu;
3717
3718 iova = find_iova(&si_domain->iovad, start_vpfn);
3719 if (iova == NULL) {
3720 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3721 start_vpfn);
3722 break;
3723 }
3724
3725 iova = split_and_remove_iova(&si_domain->iovad, iova,
3726 start_vpfn, last_vpfn);
3727 if (iova == NULL) {
3728 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3729 start_vpfn, last_vpfn);
3730 return NOTIFY_BAD;
3731 }
3732
3733 rcu_read_lock();
3734 for_each_active_iommu(iommu, drhd)
3735 iommu_flush_iotlb_psi(iommu, si_domain->id,
3736 iova->pfn_lo,
3737 iova->pfn_hi - iova->pfn_lo + 1, 0);
3738 rcu_read_unlock();
3739 dma_pte_clear_range(si_domain, iova->pfn_lo,
3740 iova->pfn_hi);
3741 dma_pte_free_pagetable(si_domain, iova->pfn_lo,
3742 iova->pfn_hi);
3743
3744 start_vpfn = iova->pfn_hi + 1;
3745 free_iova_mem(iova);
3746 }
3747 break;
3748 }
3749
3750 return NOTIFY_OK;
3751}
3752
3753static struct notifier_block intel_iommu_memory_nb = {
3754 .notifier_call = intel_iommu_memory_notifier,
3755 .priority = 0
3756};
3757
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003758int __init intel_iommu_init(void)
3759{
Jiang Liu9bdc5312014-01-06 14:18:27 +08003760 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09003761 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08003762 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003763
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003764 /* VT-d is required for a TXT/tboot launch, so enforce that */
3765 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003766
Jiang Liu3a5670e2014-02-19 14:07:33 +08003767 if (iommu_init_mempool()) {
3768 if (force_on)
3769 panic("tboot: Failed to initialize iommu memory\n");
3770 return -ENOMEM;
3771 }
3772
3773 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003774 if (dmar_table_init()) {
3775 if (force_on)
3776 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003777 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003778 }
3779
Takao Indoh3a93c842013-04-23 17:35:03 +09003780 /*
3781 * Disable translation if already enabled prior to OS handover.
3782 */
Jiang Liu7c919772014-01-06 14:18:18 +08003783 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09003784 if (iommu->gcmd & DMA_GCMD_TE)
3785 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09003786
Suresh Siddhac2c72862011-08-23 17:05:19 -07003787 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003788 if (force_on)
3789 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003790 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003791 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003792
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003793 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08003794 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07003795
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003796 if (list_empty(&dmar_rmrr_units))
3797 printk(KERN_INFO "DMAR: No RMRR found\n");
3798
3799 if (list_empty(&dmar_atsr_units))
3800 printk(KERN_INFO "DMAR: No ATSR found\n");
3801
Joseph Cihula51a63e62011-03-21 11:04:24 -07003802 if (dmar_init_reserved_ranges()) {
3803 if (force_on)
3804 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08003805 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07003806 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003807
3808 init_no_remapping_devices();
3809
Joseph Cihulab7792602011-05-03 00:08:37 -07003810 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003811 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003812 if (force_on)
3813 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003814 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08003815 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003816 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08003817 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003818 printk(KERN_INFO
3819 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3820
mark gross5e0d2a62008-03-04 15:22:08 -08003821 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003822#ifdef CONFIG_SWIOTLB
3823 swiotlb = 0;
3824#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003825 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003826
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003827 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003828
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003829 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003830 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08003831 if (si_domain && !hw_pass_through)
3832 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003833
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003834 intel_iommu_enabled = 1;
3835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003836 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08003837
3838out_free_reserved_range:
3839 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08003840out_free_dmar:
3841 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08003842 up_write(&dmar_global_lock);
3843 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08003844 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003845}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003846
Han, Weidong3199aa62009-02-26 17:31:12 +08003847static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3848 struct pci_dev *pdev)
3849{
3850 struct pci_dev *tmp, *parent;
3851
3852 if (!iommu || !pdev)
3853 return;
3854
3855 /* dependent device detach */
3856 tmp = pci_find_upstream_pcie_bridge(pdev);
3857 /* Secondary interface's bus number and devfn 0 */
3858 if (tmp) {
3859 parent = pdev->bus->self;
3860 while (parent != tmp) {
3861 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003862 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003863 parent = parent->bus->self;
3864 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003865 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003866 iommu_detach_dev(iommu,
3867 tmp->subordinate->number, 0);
3868 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003869 iommu_detach_dev(iommu, tmp->bus->number,
3870 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003871 }
3872}
3873
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003874static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003875 struct pci_dev *pdev)
3876{
Yijing Wangbca2b912013-10-31 17:26:04 +08003877 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08003878 struct intel_iommu *iommu;
3879 unsigned long flags;
3880 int found = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +08003881
David Woodhouse276dbf992009-04-04 01:45:37 +01003882 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3883 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003884 if (!iommu)
3885 return;
3886
3887 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08003888 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
Mike Habeck8519dc42011-05-28 13:15:07 -05003889 if (info->segment == pci_domain_nr(pdev->bus) &&
3890 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003891 info->devfn == pdev->devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01003892 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003893 spin_unlock_irqrestore(&device_domain_lock, flags);
3894
Yu Zhao93a23a72009-05-18 13:51:37 +08003895 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003896 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003897 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003898 free_devinfo_mem(info);
3899
3900 spin_lock_irqsave(&device_domain_lock, flags);
3901
3902 if (found)
3903 break;
3904 else
3905 continue;
3906 }
3907
3908 /* if there is no other devices under the same iommu
3909 * owned by this domain, clear this iommu in iommu_bmp
3910 * update iommu count and coherency
3911 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003912 if (iommu == device_to_iommu(info->segment, info->bus,
3913 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003914 found = 1;
3915 }
3916
Roland Dreier3e7abe22011-07-20 06:22:21 -07003917 spin_unlock_irqrestore(&device_domain_lock, flags);
3918
Weidong Hanc7151a82008-12-08 22:51:37 +08003919 if (found == 0) {
3920 unsigned long tmp_flags;
3921 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003922 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003923 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003924 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003925 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003926
Alex Williamson9b4554b2011-05-24 12:19:04 -04003927 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3928 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3929 spin_lock_irqsave(&iommu->lock, tmp_flags);
3930 clear_bit(domain->id, iommu->domain_ids);
3931 iommu->domains[domain->id] = NULL;
3932 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3933 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003934 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003935}
3936
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003937static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003938{
3939 int adjust_width;
3940
3941 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003942 domain_reserve_special_ranges(domain);
3943
3944 /* calculate AGAW */
3945 domain->gaw = guest_width;
3946 adjust_width = guestwidth_to_adjustwidth(guest_width);
3947 domain->agaw = width_to_agaw(adjust_width);
3948
Weidong Han5e98c4b2008-12-08 23:03:27 +08003949 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003950 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003951 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003952 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003953 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003954
3955 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003956 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003957 if (!domain->pgd)
3958 return -ENOMEM;
3959 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3960 return 0;
3961}
3962
Joerg Roedel5d450802008-12-03 14:52:32 +01003963static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003964{
Joerg Roedel5d450802008-12-03 14:52:32 +01003965 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003966
Jiang Liu92d03cc2014-02-19 14:07:28 +08003967 dmar_domain = alloc_domain(true);
Joerg Roedel5d450802008-12-03 14:52:32 +01003968 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003969 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003970 "intel_iommu_domain_init: dmar_domain == NULL\n");
3971 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003972 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003973 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003974 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003975 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08003976 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003977 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003978 }
Allen Kay8140a952011-10-14 12:32:17 -07003979 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003980 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003981
Joerg Roedel8a0e7152012-01-26 19:40:54 +01003982 domain->geometry.aperture_start = 0;
3983 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3984 domain->geometry.force_aperture = true;
3985
Joerg Roedel5d450802008-12-03 14:52:32 +01003986 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003987}
Kay, Allen M38717942008-09-09 18:37:29 +03003988
Joerg Roedel5d450802008-12-03 14:52:32 +01003989static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003990{
Joerg Roedel5d450802008-12-03 14:52:32 +01003991 struct dmar_domain *dmar_domain = domain->priv;
3992
3993 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08003994 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003995}
Kay, Allen M38717942008-09-09 18:37:29 +03003996
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003997static int intel_iommu_attach_device(struct iommu_domain *domain,
3998 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003999{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004000 struct dmar_domain *dmar_domain = domain->priv;
4001 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004002 struct intel_iommu *iommu;
4003 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03004004
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004005 /* normally pdev is not mapped */
4006 if (unlikely(domain_context_mapped(pdev))) {
4007 struct dmar_domain *old_domain;
4008
4009 old_domain = find_domain(pdev);
4010 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004011 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4012 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4013 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004014 else
4015 domain_remove_dev_info(old_domain);
4016 }
4017 }
4018
David Woodhouse276dbf992009-04-04 01:45:37 +01004019 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4020 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004021 if (!iommu)
4022 return -ENODEV;
4023
4024 /* check if this iommu agaw is sufficient for max mapped address */
4025 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004026 if (addr_width > cap_mgaw(iommu->cap))
4027 addr_width = cap_mgaw(iommu->cap);
4028
4029 if (dmar_domain->max_addr > (1LL << addr_width)) {
4030 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004031 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004032 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004033 return -EFAULT;
4034 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004035 dmar_domain->gaw = addr_width;
4036
4037 /*
4038 * Knock out extra levels of page tables if necessary
4039 */
4040 while (iommu->agaw < dmar_domain->agaw) {
4041 struct dma_pte *pte;
4042
4043 pte = dmar_domain->pgd;
4044 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004045 dmar_domain->pgd = (struct dma_pte *)
4046 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004047 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004048 }
4049 dmar_domain->agaw--;
4050 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004051
David Woodhouse5fe60f42009-08-09 10:53:41 +01004052 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004053}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004054
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004055static void intel_iommu_detach_device(struct iommu_domain *domain,
4056 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004057{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004058 struct dmar_domain *dmar_domain = domain->priv;
4059 struct pci_dev *pdev = to_pci_dev(dev);
4060
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004061 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004062}
Kay, Allen M38717942008-09-09 18:37:29 +03004063
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004064static int intel_iommu_map(struct iommu_domain *domain,
4065 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004066 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004067{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004068 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004069 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004070 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004071 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004072
Joerg Roedeldde57a22008-12-03 15:04:09 +01004073 if (iommu_prot & IOMMU_READ)
4074 prot |= DMA_PTE_READ;
4075 if (iommu_prot & IOMMU_WRITE)
4076 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004077 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4078 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004079
David Woodhouse163cc522009-06-28 00:51:17 +01004080 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004081 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004082 u64 end;
4083
4084 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004085 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004086 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004087 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004088 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004089 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004090 return -EFAULT;
4091 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004092 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004093 }
David Woodhousead051222009-06-28 14:22:28 +01004094 /* Round up size to next multiple of PAGE_SIZE, if it and
4095 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004096 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004097 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4098 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004099 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004100}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004101
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004102static size_t intel_iommu_unmap(struct iommu_domain *domain,
4103 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004104{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004105 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004106 int level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004107
David Woodhouse5cf0a762014-03-19 16:07:49 +00004108 /* Cope with horrid API which requires us to unmap more than the
4109 size argument if it happens to be a large-page mapping. */
4110 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4111 BUG();
4112
4113 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4114 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4115
4116 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004117 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004118
David Woodhouse163cc522009-06-28 00:51:17 +01004119 if (dmar_domain->max_addr == iova + size)
4120 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004121
David Woodhouse5cf0a762014-03-19 16:07:49 +00004122 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004123}
Kay, Allen M38717942008-09-09 18:37:29 +03004124
Joerg Roedeld14d6572008-12-03 15:06:57 +01004125static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304126 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004127{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004128 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004129 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004130 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004131 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004132
David Woodhouse5cf0a762014-03-19 16:07:49 +00004133 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004134 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004135 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004136
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004137 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004138}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004139
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004140static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4141 unsigned long cap)
4142{
4143 struct dmar_domain *dmar_domain = domain->priv;
4144
4145 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4146 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004147 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004148 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004149
4150 return 0;
4151}
4152
Alex Williamson783f1572012-05-30 14:19:43 -06004153#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4154
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004155static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004156{
4157 struct pci_dev *pdev = to_pci_dev(dev);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004158 struct pci_dev *bridge, *dma_pdev = NULL;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004159 struct iommu_group *group;
4160 int ret;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004161
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004162 if (!device_to_iommu(pci_domain_nr(pdev->bus),
4163 pdev->bus->number, pdev->devfn))
Alex Williamson70ae6f02011-10-21 15:56:11 -04004164 return -ENODEV;
4165
4166 bridge = pci_find_upstream_pcie_bridge(pdev);
4167 if (bridge) {
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004168 if (pci_is_pcie(bridge))
4169 dma_pdev = pci_get_domain_bus_and_slot(
4170 pci_domain_nr(pdev->bus),
4171 bridge->subordinate->number, 0);
Alex Williamson3da4af0a2012-11-13 10:22:03 -07004172 if (!dma_pdev)
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004173 dma_pdev = pci_dev_get(bridge);
4174 } else
4175 dma_pdev = pci_dev_get(pdev);
4176
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004177 /* Account for quirked devices */
Alex Williamson783f1572012-05-30 14:19:43 -06004178 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4179
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004180 /*
4181 * If it's a multifunction device that does not support our
Alex Williamsonc14d2692013-05-30 12:39:18 -06004182 * required ACS flags, add to the same group as lowest numbered
4183 * function that also does not suport the required ACS flags.
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004184 */
Alex Williamson783f1572012-05-30 14:19:43 -06004185 if (dma_pdev->multifunction &&
Alex Williamsonc14d2692013-05-30 12:39:18 -06004186 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4187 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4188
4189 for (i = 0; i < 8; i++) {
4190 struct pci_dev *tmp;
4191
4192 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4193 if (!tmp)
4194 continue;
4195
4196 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4197 swap_pci_ref(&dma_pdev, tmp);
4198 break;
4199 }
4200 pci_dev_put(tmp);
4201 }
4202 }
Alex Williamson783f1572012-05-30 14:19:43 -06004203
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004204 /*
4205 * Devices on the root bus go through the iommu. If that's not us,
4206 * find the next upstream device and test ACS up to the root bus.
4207 * Finding the next device may require skipping virtual buses.
4208 */
Alex Williamson783f1572012-05-30 14:19:43 -06004209 while (!pci_is_root_bus(dma_pdev->bus)) {
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004210 struct pci_bus *bus = dma_pdev->bus;
4211
4212 while (!bus->self) {
4213 if (!pci_is_root_bus(bus))
4214 bus = bus->parent;
4215 else
4216 goto root_bus;
4217 }
4218
4219 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
Alex Williamson783f1572012-05-30 14:19:43 -06004220 break;
4221
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004222 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
Alex Williamson70ae6f02011-10-21 15:56:11 -04004223 }
4224
Alex Williamsona4ff1fc2012-08-04 12:08:55 -06004225root_bus:
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004226 group = iommu_group_get(&dma_pdev->dev);
4227 pci_dev_put(dma_pdev);
4228 if (!group) {
4229 group = iommu_group_alloc();
4230 if (IS_ERR(group))
4231 return PTR_ERR(group);
4232 }
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004233
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004234 ret = iommu_group_add_device(group, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004235
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004236 iommu_group_put(group);
4237 return ret;
4238}
4239
4240static void intel_iommu_remove_device(struct device *dev)
4241{
4242 iommu_group_remove_device(dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004243}
4244
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004245static struct iommu_ops intel_iommu_ops = {
4246 .domain_init = intel_iommu_domain_init,
4247 .domain_destroy = intel_iommu_domain_destroy,
4248 .attach_dev = intel_iommu_attach_device,
4249 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004250 .map = intel_iommu_map,
4251 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004252 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004253 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004254 .add_device = intel_iommu_add_device,
4255 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004256 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004257};
David Woodhouse9af88142009-02-13 23:18:03 +00004258
Daniel Vetter94526182013-01-20 23:50:13 +01004259static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4260{
4261 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4262 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4263 dmar_map_gfx = 0;
4264}
4265
4266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4273
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004274static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004275{
4276 /*
4277 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004278 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004279 */
4280 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4281 rwbf_quirk = 1;
4282}
4283
4284DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004285DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4286DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4287DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4289DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4290DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004291
Adam Jacksoneecfd572010-08-25 21:17:34 +01004292#define GGC 0x52
4293#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4294#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4295#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4296#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4297#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4298#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4299#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4300#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4301
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004302static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004303{
4304 unsigned short ggc;
4305
Adam Jacksoneecfd572010-08-25 21:17:34 +01004306 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004307 return;
4308
Adam Jacksoneecfd572010-08-25 21:17:34 +01004309 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004310 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4311 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004312 } else if (dmar_map_gfx) {
4313 /* we have to ensure the gfx device is idle before we flush */
4314 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4315 intel_iommu_strict = 1;
4316 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004317}
4318DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4320DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4322
David Woodhousee0fc7e02009-09-30 09:12:17 -07004323/* On Tylersburg chipsets, some BIOSes have been known to enable the
4324 ISOCH DMAR unit for the Azalia sound device, but not give it any
4325 TLB entries, which causes it to deadlock. Check for that. We do
4326 this in a function called from init_dmars(), instead of in a PCI
4327 quirk, because we don't want to print the obnoxious "BIOS broken"
4328 message if VT-d is actually disabled.
4329*/
4330static void __init check_tylersburg_isoch(void)
4331{
4332 struct pci_dev *pdev;
4333 uint32_t vtisochctrl;
4334
4335 /* If there's no Azalia in the system anyway, forget it. */
4336 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4337 if (!pdev)
4338 return;
4339 pci_dev_put(pdev);
4340
4341 /* System Management Registers. Might be hidden, in which case
4342 we can't do the sanity check. But that's OK, because the
4343 known-broken BIOSes _don't_ actually hide it, so far. */
4344 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4345 if (!pdev)
4346 return;
4347
4348 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4349 pci_dev_put(pdev);
4350 return;
4351 }
4352
4353 pci_dev_put(pdev);
4354
4355 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4356 if (vtisochctrl & 1)
4357 return;
4358
4359 /* Drop all bits other than the number of TLB entries */
4360 vtisochctrl &= 0x1c;
4361
4362 /* If we have the recommended number of TLB entries (16), fine. */
4363 if (vtisochctrl == 0x10)
4364 return;
4365
4366 /* Zero TLB entries? You get to ride the short bus to school. */
4367 if (!vtisochctrl) {
4368 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4369 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4370 dmi_get_system_info(DMI_BIOS_VENDOR),
4371 dmi_get_system_info(DMI_BIOS_VERSION),
4372 dmi_get_system_info(DMI_PRODUCT_VERSION));
4373 iommu_identity_mapping |= IDENTMAP_AZALIA;
4374 return;
4375 }
4376
4377 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4378 vtisochctrl);
4379}