blob: 147ab1c0b573be4a409c5d2a4b523a978d38cb3d [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
David Woodhouseea8ea462014-03-05 17:09:32 +00002 * Copyright © 2006-2014 Intel Corporation.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
David Woodhouseea8ea462014-03-05 17:09:32 +000013 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070018 */
19
20#include <linux/init.h>
21#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080022#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040023#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070024#include <linux/slab.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/spinlock.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/dma-mapping.h>
31#include <linux/mempool.h>
Jiang Liu75f05562014-02-19 14:07:37 +080032#include <linux/memory.h>
mark gross5e0d2a62008-03-04 15:22:08 -080033#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030034#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010035#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010037#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070038#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100039#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020040#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080041#include <linux/memblock.h>
Akinobu Mita36746432014-06-04 16:06:51 -070042#include <linux/dma-contiguous.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070043#include <asm/irq_remapping.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Joerg Roedel078e1ee2012-09-26 12:44:43 +020047#include "irq_remapping.h"
48
Fenghua Yu5b6985c2008-10-16 18:02:32 -070049#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE
51
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
Jiang Liu5c645b32014-01-06 14:18:12 +080063#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070064
David Woodhouse2ebe3152009-09-19 07:34:04 -070065#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070073
Mark McLoughlinf27be032008-11-20 15:49:43 +000074#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070075#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070076#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080077
Andrew Mortondf08cdc2010-09-22 13:05:11 -070078/* page table handling */
79#define LEVEL_STRIDE (9)
80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020082/*
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
87 *
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
91 *
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
94 *
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
97 */
98#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700100static inline int agaw_to_level(int agaw)
101{
102 return agaw + 2;
103}
104
105static inline int agaw_to_width(int agaw)
106{
Jiang Liu5c645b32014-01-06 14:18:12 +0800107 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700108}
109
110static inline int width_to_agaw(int width)
111{
Jiang Liu5c645b32014-01-06 14:18:12 +0800112 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
Andrew Mortondf08cdc2010-09-22 13:05:11 -0700113}
114
115static inline unsigned int level_to_offset_bits(int level)
116{
117 return (level - 1) * LEVEL_STRIDE;
118}
119
120static inline int pfn_level_offset(unsigned long pfn, int level)
121{
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123}
124
125static inline unsigned long level_mask(int level)
126{
127 return -1UL << level_to_offset_bits(level);
128}
129
130static inline unsigned long level_size(int level)
131{
132 return 1UL << level_to_offset_bits(level);
133}
134
135static inline unsigned long align_to_level(unsigned long pfn, int level)
136{
137 return (pfn + level_size(level) - 1) & level_mask(level);
138}
David Woodhousefd18de52009-05-10 23:57:41 +0100139
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100140static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141{
Jiang Liu5c645b32014-01-06 14:18:12 +0800142 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100143}
144
David Woodhousedd4e8312009-06-27 16:21:20 +0100145/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148{
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150}
151
152static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153{
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155}
156static inline unsigned long page_to_dma_pfn(struct page *pg)
157{
158 return mm_to_dma_pfn(page_to_pfn(pg));
159}
160static inline unsigned long virt_to_dma_pfn(void *p)
161{
162 return page_to_dma_pfn(virt_to_page(p));
163}
164
Weidong Hand9630fe2008-12-08 11:06:32 +0800165/* global iommu list, set NULL for ignored DMAR units */
166static struct intel_iommu **g_iommus;
167
David Woodhousee0fc7e02009-09-30 09:12:17 -0700168static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000169static int rwbf_quirk;
170
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000171/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
174 */
175static int force_on = 0;
176
177/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000178 * 0: Present
179 * 1-11: Reserved
180 * 12-63: Context Ptr (12 - (haw-1))
181 * 64-127: Reserved
182 */
183struct root_entry {
184 u64 val;
185 u64 rsvd1;
186};
187#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188static inline bool root_present(struct root_entry *root)
189{
190 return (root->val & 1);
191}
192static inline void set_root_present(struct root_entry *root)
193{
194 root->val |= 1;
195}
196static inline void set_root_value(struct root_entry *root, unsigned long value)
197{
198 root->val |= value & VTD_PAGE_MASK;
199}
200
201static inline struct context_entry *
202get_context_addr_from_root(struct root_entry *root)
203{
204 return (struct context_entry *)
205 (root_present(root)?phys_to_virt(
206 root->val & VTD_PAGE_MASK) :
207 NULL);
208}
209
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000210/*
211 * low 64 bits:
212 * 0: present
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
216 * high 64 bits:
217 * 0-2: address width
218 * 3-6: aval
219 * 8-23: domain id
220 */
221struct context_entry {
222 u64 lo;
223 u64 hi;
224};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000225
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000226static inline bool context_present(struct context_entry *context)
227{
228 return (context->lo & 1);
229}
230static inline void context_set_present(struct context_entry *context)
231{
232 context->lo |= 1;
233}
234
235static inline void context_set_fault_enable(struct context_entry *context)
236{
237 context->lo &= (((u64)-1) << 2) | 1;
238}
239
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000240static inline void context_set_translation_type(struct context_entry *context,
241 unsigned long value)
242{
243 context->lo &= (((u64)-1) << 4) | 3;
244 context->lo |= (value & 3) << 2;
245}
246
247static inline void context_set_address_root(struct context_entry *context,
248 unsigned long value)
249{
250 context->lo |= value & VTD_PAGE_MASK;
251}
252
253static inline void context_set_address_width(struct context_entry *context,
254 unsigned long value)
255{
256 context->hi |= value & 7;
257}
258
259static inline void context_set_domain_id(struct context_entry *context,
260 unsigned long value)
261{
262 context->hi |= (value & ((1 << 16) - 1)) << 8;
263}
264
265static inline void context_clear_entry(struct context_entry *context)
266{
267 context->lo = 0;
268 context->hi = 0;
269}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000270
Mark McLoughlin622ba122008-11-20 15:49:46 +0000271/*
272 * 0: readable
273 * 1: writable
274 * 2-6: reserved
275 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800276 * 8-10: available
277 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000278 * 12-63: Host physcial address
279 */
280struct dma_pte {
281 u64 val;
282};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline void dma_clear_pte(struct dma_pte *pte)
285{
286 pte->val = 0;
287}
288
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289static inline u64 dma_pte_addr(struct dma_pte *pte)
290{
David Woodhousec85994e2009-07-01 19:21:24 +0100291#ifdef CONFIG_64BIT
292 return pte->val & VTD_PAGE_MASK;
293#else
294 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100296#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000297}
298
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000299static inline bool dma_pte_present(struct dma_pte *pte)
300{
301 return (pte->val & 3) != 0;
302}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000303
Allen Kay4399c8b2011-10-14 12:32:46 -0700304static inline bool dma_pte_superpage(struct dma_pte *pte)
305{
Joerg Roedelc3c75eb2014-07-04 11:19:10 +0200306 return (pte->val & DMA_PTE_LARGE_PAGE);
Allen Kay4399c8b2011-10-14 12:32:46 -0700307}
308
David Woodhouse75e6bf92009-07-02 11:21:16 +0100309static inline int first_pte_in_page(struct dma_pte *pte)
310{
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
312}
313
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700314/*
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
319 */
David Woodhouse19943b02009-08-04 16:19:20 +0100320static struct dmar_domain *si_domain;
321static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700322
Weidong Han3b5410e2008-12-08 09:17:15 +0800323/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100324#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800325
Weidong Han1ce28fe2008-12-08 16:35:39 +0800326/* domain represents a virtual machine, more than one devices
327 * across iommus may be owned in one domain, e.g. kvm guest.
328 */
329#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
330
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700331/* si_domain contains mulitple devices */
332#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
333
Mike Travis1b198bb2012-03-05 15:05:16 -0800334/* define the limit of IOMMUs supported in each domain */
335#ifdef CONFIG_X86
336# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
337#else
338# define IOMMU_UNITS_SUPPORTED 64
339#endif
340
Mark McLoughlin99126f72008-11-20 15:49:47 +0000341struct dmar_domain {
342 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700343 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800344 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
345 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000346
347 struct list_head devices; /* all devices' list */
348 struct iova_domain iovad; /* iova's that belong to this domain */
349
350 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000351 int gaw; /* max guest address width */
352
353 /* adjusted guest address width, 0 is level 2 30-bit */
354 int agaw;
355
Weidong Han3b5410e2008-12-08 09:17:15 +0800356 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800357
358 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800359 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800360 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100361 int iommu_superpage;/* Level of superpages supported:
362 0 == 4KiB (no superpages), 1 == 2MiB,
363 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800364 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800365 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000366};
367
Mark McLoughlina647dac2008-11-20 15:49:48 +0000368/* PCI domain-device relationship */
369struct device_domain_info {
370 struct list_head link; /* link to domain siblings */
371 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100372 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000373 u8 devfn; /* PCI devfn number */
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000374 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800375 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000376 struct dmar_domain *domain; /* pointer to domain */
377};
378
Jiang Liub94e4112014-02-19 14:07:25 +0800379struct dmar_rmrr_unit {
380 struct list_head list; /* list of rmrr units */
381 struct acpi_dmar_header *hdr; /* ACPI header */
382 u64 base_address; /* reserved base address*/
383 u64 end_address; /* reserved end address */
David Woodhouse832bd852014-03-07 15:08:36 +0000384 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800385 int devices_cnt; /* target device count */
386};
387
388struct dmar_atsr_unit {
389 struct list_head list; /* list of ATSR units */
390 struct acpi_dmar_header *hdr; /* ACPI header */
David Woodhouse832bd852014-03-07 15:08:36 +0000391 struct dmar_dev_scope *devices; /* target devices */
Jiang Liub94e4112014-02-19 14:07:25 +0800392 int devices_cnt; /* target device count */
393 u8 include_all:1; /* include all ports */
394};
395
396static LIST_HEAD(dmar_atsr_units);
397static LIST_HEAD(dmar_rmrr_units);
398
399#define for_each_rmrr_units(rmrr) \
400 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
401
mark gross5e0d2a62008-03-04 15:22:08 -0800402static void flush_unmaps_timeout(unsigned long data);
403
Jiang Liub707cb02014-01-06 14:18:26 +0800404static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
mark gross5e0d2a62008-03-04 15:22:08 -0800405
mark gross80b20dd2008-04-18 13:53:58 -0700406#define HIGH_WATER_MARK 250
407struct deferred_flush_tables {
408 int next;
409 struct iova *iova[HIGH_WATER_MARK];
410 struct dmar_domain *domain[HIGH_WATER_MARK];
David Woodhouseea8ea462014-03-05 17:09:32 +0000411 struct page *freelist[HIGH_WATER_MARK];
mark gross80b20dd2008-04-18 13:53:58 -0700412};
413
414static struct deferred_flush_tables *deferred_flush;
415
mark gross5e0d2a62008-03-04 15:22:08 -0800416/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800417static int g_num_of_iommus;
418
419static DEFINE_SPINLOCK(async_umap_flush_lock);
420static LIST_HEAD(unmaps_to_do);
421
422static int timer_on;
423static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800424
Jiang Liu92d03cc2014-02-19 14:07:28 +0800425static void domain_exit(struct dmar_domain *domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700426static void domain_remove_dev_info(struct dmar_domain *domain);
Jiang Liub94e4112014-02-19 14:07:25 +0800427static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -0700428 struct device *dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +0800429static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +0000430 struct device *dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700431
Suresh Siddhad3f13812011-08-23 17:05:25 -0700432#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800433int dmar_disabled = 0;
434#else
435int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700436#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800437
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200438int intel_iommu_enabled = 0;
439EXPORT_SYMBOL_GPL(intel_iommu_enabled);
440
David Woodhouse2d9e6672010-06-15 10:57:57 +0100441static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700442static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800443static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100444static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700445
David Woodhousec0771df2011-10-14 20:59:46 +0100446int intel_iommu_gfx_mapped;
447EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
448
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
450static DEFINE_SPINLOCK(device_domain_lock);
451static LIST_HEAD(device_domain_list);
452
Thierry Redingb22f6432014-06-27 09:03:12 +0200453static const struct iommu_ops intel_iommu_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100454
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700455static int __init intel_iommu_setup(char *str)
456{
457 if (!str)
458 return -EINVAL;
459 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800460 if (!strncmp(str, "on", 2)) {
461 dmar_disabled = 0;
462 printk(KERN_INFO "Intel-IOMMU: enabled\n");
463 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700464 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800465 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700466 } else if (!strncmp(str, "igfx_off", 8)) {
467 dmar_map_gfx = 0;
468 printk(KERN_INFO
469 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700470 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800471 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700472 "Intel-IOMMU: Forcing DAC for PCI devices\n");
473 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800474 } else if (!strncmp(str, "strict", 6)) {
475 printk(KERN_INFO
476 "Intel-IOMMU: disable batched IOTLB flush\n");
477 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100478 } else if (!strncmp(str, "sp_off", 6)) {
479 printk(KERN_INFO
480 "Intel-IOMMU: disable supported super page\n");
481 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700482 }
483
484 str += strcspn(str, ",");
485 while (*str == ',')
486 str++;
487 }
488 return 0;
489}
490__setup("intel_iommu=", intel_iommu_setup);
491
492static struct kmem_cache *iommu_domain_cache;
493static struct kmem_cache *iommu_devinfo_cache;
494static struct kmem_cache *iommu_iova_cache;
495
Suresh Siddha4c923d42009-10-02 11:01:24 -0700496static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700497{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700498 struct page *page;
499 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700500
Suresh Siddha4c923d42009-10-02 11:01:24 -0700501 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
502 if (page)
503 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700504 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700505}
506
507static inline void free_pgtable_page(void *vaddr)
508{
509 free_page((unsigned long)vaddr);
510}
511
512static inline void *alloc_domain_mem(void)
513{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900514 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700515}
516
Kay, Allen M38717942008-09-09 18:37:29 +0300517static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700518{
519 kmem_cache_free(iommu_domain_cache, vaddr);
520}
521
522static inline void * alloc_devinfo_mem(void)
523{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900524 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700525}
526
527static inline void free_devinfo_mem(void *vaddr)
528{
529 kmem_cache_free(iommu_devinfo_cache, vaddr);
530}
531
532struct iova *alloc_iova_mem(void)
533{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900534 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700535}
536
537void free_iova_mem(struct iova *iova)
538{
539 kmem_cache_free(iommu_iova_cache, iova);
540}
541
Weidong Han1b573682008-12-08 15:34:06 +0800542
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700543static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800544{
545 unsigned long sagaw;
546 int agaw = -1;
547
548 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700549 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800550 agaw >= 0; agaw--) {
551 if (test_bit(agaw, &sagaw))
552 break;
553 }
554
555 return agaw;
556}
557
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700558/*
559 * Calculate max SAGAW for each iommu.
560 */
561int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
562{
563 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
564}
565
566/*
567 * calculate agaw for each iommu.
568 * "SAGAW" may be different across iommus, use a default agaw, and
569 * get a supported less agaw for iommus that don't support the default agaw.
570 */
571int iommu_calculate_agaw(struct intel_iommu *iommu)
572{
573 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
574}
575
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700576/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800577static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
578{
579 int iommu_id;
580
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700581 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800582 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700583 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800584
Mike Travis1b198bb2012-03-05 15:05:16 -0800585 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800586 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
587 return NULL;
588
589 return g_iommus[iommu_id];
590}
591
Weidong Han8e6040972008-12-08 15:49:06 +0800592static void domain_update_iommu_coherency(struct dmar_domain *domain)
593{
David Woodhoused0501962014-03-11 17:10:29 -0700594 struct dmar_drhd_unit *drhd;
595 struct intel_iommu *iommu;
596 int i, found = 0;
Weidong Han8e6040972008-12-08 15:49:06 +0800597
David Woodhoused0501962014-03-11 17:10:29 -0700598 domain->iommu_coherency = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800599
Mike Travis1b198bb2012-03-05 15:05:16 -0800600 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
David Woodhoused0501962014-03-11 17:10:29 -0700601 found = 1;
Weidong Han8e6040972008-12-08 15:49:06 +0800602 if (!ecap_coherent(g_iommus[i]->ecap)) {
603 domain->iommu_coherency = 0;
604 break;
605 }
Weidong Han8e6040972008-12-08 15:49:06 +0800606 }
David Woodhoused0501962014-03-11 17:10:29 -0700607 if (found)
608 return;
609
610 /* No hardware attached; use lowest common denominator */
611 rcu_read_lock();
612 for_each_active_iommu(iommu, drhd) {
613 if (!ecap_coherent(iommu->ecap)) {
614 domain->iommu_coherency = 0;
615 break;
616 }
617 }
618 rcu_read_unlock();
Weidong Han8e6040972008-12-08 15:49:06 +0800619}
620
Sheng Yang58c610b2009-03-18 15:33:05 +0800621static void domain_update_iommu_snooping(struct dmar_domain *domain)
622{
623 int i;
624
625 domain->iommu_snooping = 1;
626
Mike Travis1b198bb2012-03-05 15:05:16 -0800627 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800628 if (!ecap_sc_support(g_iommus[i]->ecap)) {
629 domain->iommu_snooping = 0;
630 break;
631 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800632 }
633}
634
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100635static void domain_update_iommu_superpage(struct dmar_domain *domain)
636{
Allen Kay8140a952011-10-14 12:32:17 -0700637 struct dmar_drhd_unit *drhd;
638 struct intel_iommu *iommu = NULL;
639 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100640
641 if (!intel_iommu_superpage) {
642 domain->iommu_superpage = 0;
643 return;
644 }
645
Allen Kay8140a952011-10-14 12:32:17 -0700646 /* set iommu_superpage to the smallest common denominator */
Jiang Liu0e242612014-02-19 14:07:34 +0800647 rcu_read_lock();
Allen Kay8140a952011-10-14 12:32:17 -0700648 for_each_active_iommu(iommu, drhd) {
649 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100650 if (!mask) {
651 break;
652 }
653 }
Jiang Liu0e242612014-02-19 14:07:34 +0800654 rcu_read_unlock();
655
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100656 domain->iommu_superpage = fls(mask);
657}
658
Sheng Yang58c610b2009-03-18 15:33:05 +0800659/* Some capabilities may be different across iommus */
660static void domain_update_iommu_cap(struct dmar_domain *domain)
661{
662 domain_update_iommu_coherency(domain);
663 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100664 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800665}
666
David Woodhouse156baca2014-03-09 14:00:57 -0700667static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800668{
669 struct dmar_drhd_unit *drhd = NULL;
Jiang Liub683b232014-02-19 14:07:32 +0800670 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -0700671 struct device *tmp;
672 struct pci_dev *ptmp, *pdev = NULL;
Yijing Wangaa4d0662014-05-26 20:14:06 +0800673 u16 segment = 0;
Weidong Hanc7151a82008-12-08 22:51:37 +0800674 int i;
675
David Woodhouse156baca2014-03-09 14:00:57 -0700676 if (dev_is_pci(dev)) {
677 pdev = to_pci_dev(dev);
678 segment = pci_domain_nr(pdev->bus);
679 } else if (ACPI_COMPANION(dev))
680 dev = &ACPI_COMPANION(dev)->dev;
681
Jiang Liu0e242612014-02-19 14:07:34 +0800682 rcu_read_lock();
Jiang Liub683b232014-02-19 14:07:32 +0800683 for_each_active_iommu(iommu, drhd) {
David Woodhouse156baca2014-03-09 14:00:57 -0700684 if (pdev && segment != drhd->segment)
David Woodhouse276dbf992009-04-04 01:45:37 +0100685 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800686
Jiang Liub683b232014-02-19 14:07:32 +0800687 for_each_active_dev_scope(drhd->devices,
David Woodhouse156baca2014-03-09 14:00:57 -0700688 drhd->devices_cnt, i, tmp) {
689 if (tmp == dev) {
690 *bus = drhd->devices[i].bus;
691 *devfn = drhd->devices[i].devfn;
692 goto out;
693 }
694
695 if (!pdev || !dev_is_pci(tmp))
David Woodhouse832bd852014-03-07 15:08:36 +0000696 continue;
David Woodhouse156baca2014-03-09 14:00:57 -0700697
698 ptmp = to_pci_dev(tmp);
699 if (ptmp->subordinate &&
700 ptmp->subordinate->number <= pdev->bus->number &&
701 ptmp->subordinate->busn_res.end >= pdev->bus->number)
702 goto got_pdev;
David Woodhouse924b6232009-04-04 00:39:25 +0100703 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800704
David Woodhouse156baca2014-03-09 14:00:57 -0700705 if (pdev && drhd->include_all) {
706 got_pdev:
707 *bus = pdev->bus->number;
708 *devfn = pdev->devfn;
Jiang Liub683b232014-02-19 14:07:32 +0800709 goto out;
David Woodhouse156baca2014-03-09 14:00:57 -0700710 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800711 }
Jiang Liub683b232014-02-19 14:07:32 +0800712 iommu = NULL;
David Woodhouse156baca2014-03-09 14:00:57 -0700713 out:
Jiang Liu0e242612014-02-19 14:07:34 +0800714 rcu_read_unlock();
Weidong Hanc7151a82008-12-08 22:51:37 +0800715
Jiang Liub683b232014-02-19 14:07:32 +0800716 return iommu;
Weidong Hanc7151a82008-12-08 22:51:37 +0800717}
718
Weidong Han5331fe62008-12-08 23:00:00 +0800719static void domain_flush_cache(struct dmar_domain *domain,
720 void *addr, int size)
721{
722 if (!domain->iommu_coherency)
723 clflush_cache_range(addr, size);
724}
725
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700726/* Gets context entry for a given bus and devfn */
727static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
728 u8 bus, u8 devfn)
729{
730 struct root_entry *root;
731 struct context_entry *context;
732 unsigned long phy_addr;
733 unsigned long flags;
734
735 spin_lock_irqsave(&iommu->lock, flags);
736 root = &iommu->root_entry[bus];
737 context = get_context_addr_from_root(root);
738 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700739 context = (struct context_entry *)
740 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700741 if (!context) {
742 spin_unlock_irqrestore(&iommu->lock, flags);
743 return NULL;
744 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700745 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700746 phy_addr = virt_to_phys((void *)context);
747 set_root_value(root, phy_addr);
748 set_root_present(root);
749 __iommu_flush_cache(iommu, root, sizeof(*root));
750 }
751 spin_unlock_irqrestore(&iommu->lock, flags);
752 return &context[devfn];
753}
754
755static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
756{
757 struct root_entry *root;
758 struct context_entry *context;
759 int ret;
760 unsigned long flags;
761
762 spin_lock_irqsave(&iommu->lock, flags);
763 root = &iommu->root_entry[bus];
764 context = get_context_addr_from_root(root);
765 if (!context) {
766 ret = 0;
767 goto out;
768 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000769 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770out:
771 spin_unlock_irqrestore(&iommu->lock, flags);
772 return ret;
773}
774
775static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
776{
777 struct root_entry *root;
778 struct context_entry *context;
779 unsigned long flags;
780
781 spin_lock_irqsave(&iommu->lock, flags);
782 root = &iommu->root_entry[bus];
783 context = get_context_addr_from_root(root);
784 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000785 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 __iommu_flush_cache(iommu, &context[devfn], \
787 sizeof(*context));
788 }
789 spin_unlock_irqrestore(&iommu->lock, flags);
790}
791
792static void free_context_table(struct intel_iommu *iommu)
793{
794 struct root_entry *root;
795 int i;
796 unsigned long flags;
797 struct context_entry *context;
798
799 spin_lock_irqsave(&iommu->lock, flags);
800 if (!iommu->root_entry) {
801 goto out;
802 }
803 for (i = 0; i < ROOT_ENTRY_NR; i++) {
804 root = &iommu->root_entry[i];
805 context = get_context_addr_from_root(root);
806 if (context)
807 free_pgtable_page(context);
808 }
809 free_pgtable_page(iommu->root_entry);
810 iommu->root_entry = NULL;
811out:
812 spin_unlock_irqrestore(&iommu->lock, flags);
813}
814
David Woodhouseb026fd22009-06-28 10:37:25 +0100815static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
David Woodhouse5cf0a762014-03-19 16:07:49 +0000816 unsigned long pfn, int *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817{
David Woodhouseb026fd22009-06-28 10:37:25 +0100818 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819 struct dma_pte *parent, *pte = NULL;
820 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700821 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822
823 BUG_ON(!domain->pgd);
Julian Stecklinaf9423602013-10-09 10:03:52 +0200824
825 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
826 /* Address beyond IOMMU's addressing capabilities. */
827 return NULL;
828
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 parent = domain->pgd;
830
David Woodhouse5cf0a762014-03-19 16:07:49 +0000831 while (1) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700832 void *tmp_page;
833
David Woodhouseb026fd22009-06-28 10:37:25 +0100834 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 pte = &parent[offset];
David Woodhouse5cf0a762014-03-19 16:07:49 +0000836 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100837 break;
David Woodhouse5cf0a762014-03-19 16:07:49 +0000838 if (level == *target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 break;
840
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000841 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100842 uint64_t pteval;
843
Suresh Siddha4c923d42009-10-02 11:01:24 -0700844 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700845
David Woodhouse206a73c12009-07-01 19:30:28 +0100846 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700847 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100848
David Woodhousec85994e2009-07-01 19:21:24 +0100849 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400850 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
Yijing Wangeffad4b2014-05-26 20:13:47 +0800851 if (cmpxchg64(&pte->val, 0ULL, pteval))
David Woodhousec85994e2009-07-01 19:21:24 +0100852 /* Someone else set it while we were thinking; use theirs. */
853 free_pgtable_page(tmp_page);
Yijing Wangeffad4b2014-05-26 20:13:47 +0800854 else
David Woodhousec85994e2009-07-01 19:21:24 +0100855 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856 }
David Woodhouse5cf0a762014-03-19 16:07:49 +0000857 if (level == 1)
858 break;
859
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000860 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861 level--;
862 }
863
David Woodhouse5cf0a762014-03-19 16:07:49 +0000864 if (!*target_level)
865 *target_level = level;
866
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867 return pte;
868}
869
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100870
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100872static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
873 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100874 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875{
876 struct dma_pte *parent, *pte = NULL;
877 int total = agaw_to_level(domain->agaw);
878 int offset;
879
880 parent = domain->pgd;
881 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100882 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883 pte = &parent[offset];
884 if (level == total)
885 return pte;
886
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100887 if (!dma_pte_present(pte)) {
888 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700889 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100890 }
891
Yijing Wange16922a2014-05-20 20:37:51 +0800892 if (dma_pte_superpage(pte)) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100893 *large_page = total;
894 return pte;
895 }
896
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000897 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898 total--;
899 }
900 return NULL;
901}
902
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700903/* clear last level pte, a tlb flush should be followed */
David Woodhouse5cf0a762014-03-19 16:07:49 +0000904static void dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100905 unsigned long start_pfn,
906 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907{
David Woodhouse04b18e62009-06-27 19:15:01 +0100908 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100909 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100910 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700911
David Woodhouse04b18e62009-06-27 19:15:01 +0100912 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100913 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700914 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100915
David Woodhouse04b18e62009-06-27 19:15:01 +0100916 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700917 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100918 large_page = 1;
919 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100920 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100921 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100922 continue;
923 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100924 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100925 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100926 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100927 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100928 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
929
David Woodhouse310a5ab2009-06-28 18:52:20 +0100930 domain_flush_cache(domain, first_pte,
931 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700932
933 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934}
935
Alex Williamson3269ee02013-06-15 10:27:19 -0600936static void dma_pte_free_level(struct dmar_domain *domain, int level,
937 struct dma_pte *pte, unsigned long pfn,
938 unsigned long start_pfn, unsigned long last_pfn)
939{
940 pfn = max(start_pfn, pfn);
941 pte = &pte[pfn_level_offset(pfn, level)];
942
943 do {
944 unsigned long level_pfn;
945 struct dma_pte *level_pte;
946
947 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
948 goto next;
949
950 level_pfn = pfn & level_mask(level - 1);
951 level_pte = phys_to_virt(dma_pte_addr(pte));
952
953 if (level > 2)
954 dma_pte_free_level(domain, level - 1, level_pte,
955 level_pfn, start_pfn, last_pfn);
956
957 /* If range covers entire pagetable, free it */
958 if (!(start_pfn > level_pfn ||
Alex Williamson08336fd2014-01-21 15:48:18 -0800959 last_pfn < level_pfn + level_size(level) - 1)) {
Alex Williamson3269ee02013-06-15 10:27:19 -0600960 dma_clear_pte(pte);
961 domain_flush_cache(domain, pte, sizeof(*pte));
962 free_pgtable_page(level_pte);
963 }
964next:
965 pfn += level_size(level);
966 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
967}
968
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969/* free page table pages. last level pte should already be cleared */
970static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100971 unsigned long start_pfn,
972 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973{
David Woodhouse6660c632009-06-27 22:41:00 +0100974 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975
David Woodhouse6660c632009-06-27 22:41:00 +0100976 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
977 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700978 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979
David Woodhousef3a0a522009-06-30 03:40:07 +0100980 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson3269ee02013-06-15 10:27:19 -0600981 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
982 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100983
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100985 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700986 free_pgtable_page(domain->pgd);
987 domain->pgd = NULL;
988 }
989}
990
David Woodhouseea8ea462014-03-05 17:09:32 +0000991/* When a page at a given level is being unlinked from its parent, we don't
992 need to *modify* it at all. All we need to do is make a list of all the
993 pages which can be freed just as soon as we've flushed the IOTLB and we
994 know the hardware page-walk will no longer touch them.
995 The 'pte' argument is the *parent* PTE, pointing to the page that is to
996 be freed. */
997static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
998 int level, struct dma_pte *pte,
999 struct page *freelist)
1000{
1001 struct page *pg;
1002
1003 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1004 pg->freelist = freelist;
1005 freelist = pg;
1006
1007 if (level == 1)
1008 return freelist;
1009
Jiang Liuadeb2592014-04-09 10:20:39 +08001010 pte = page_address(pg);
1011 do {
David Woodhouseea8ea462014-03-05 17:09:32 +00001012 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1013 freelist = dma_pte_list_pagetables(domain, level - 1,
1014 pte, freelist);
Jiang Liuadeb2592014-04-09 10:20:39 +08001015 pte++;
1016 } while (!first_pte_in_page(pte));
David Woodhouseea8ea462014-03-05 17:09:32 +00001017
1018 return freelist;
1019}
1020
1021static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1022 struct dma_pte *pte, unsigned long pfn,
1023 unsigned long start_pfn,
1024 unsigned long last_pfn,
1025 struct page *freelist)
1026{
1027 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1028
1029 pfn = max(start_pfn, pfn);
1030 pte = &pte[pfn_level_offset(pfn, level)];
1031
1032 do {
1033 unsigned long level_pfn;
1034
1035 if (!dma_pte_present(pte))
1036 goto next;
1037
1038 level_pfn = pfn & level_mask(level);
1039
1040 /* If range covers entire pagetable, free it */
1041 if (start_pfn <= level_pfn &&
1042 last_pfn >= level_pfn + level_size(level) - 1) {
1043 /* These suborbinate page tables are going away entirely. Don't
1044 bother to clear them; we're just going to *free* them. */
1045 if (level > 1 && !dma_pte_superpage(pte))
1046 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1047
1048 dma_clear_pte(pte);
1049 if (!first_pte)
1050 first_pte = pte;
1051 last_pte = pte;
1052 } else if (level > 1) {
1053 /* Recurse down into a level that isn't *entirely* obsolete */
1054 freelist = dma_pte_clear_level(domain, level - 1,
1055 phys_to_virt(dma_pte_addr(pte)),
1056 level_pfn, start_pfn, last_pfn,
1057 freelist);
1058 }
1059next:
1060 pfn += level_size(level);
1061 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1062
1063 if (first_pte)
1064 domain_flush_cache(domain, first_pte,
1065 (void *)++last_pte - (void *)first_pte);
1066
1067 return freelist;
1068}
1069
1070/* We can't just free the pages because the IOMMU may still be walking
1071 the page tables, and may have cached the intermediate levels. The
1072 pages can only be freed after the IOTLB flush has been done. */
1073struct page *domain_unmap(struct dmar_domain *domain,
1074 unsigned long start_pfn,
1075 unsigned long last_pfn)
1076{
1077 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1078 struct page *freelist = NULL;
1079
1080 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1081 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1082 BUG_ON(start_pfn > last_pfn);
1083
1084 /* we don't need lock here; nobody else touches the iova range */
1085 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1086 domain->pgd, 0, start_pfn, last_pfn, NULL);
1087
1088 /* free pgd */
1089 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1090 struct page *pgd_page = virt_to_page(domain->pgd);
1091 pgd_page->freelist = freelist;
1092 freelist = pgd_page;
1093
1094 domain->pgd = NULL;
1095 }
1096
1097 return freelist;
1098}
1099
1100void dma_free_pagelist(struct page *freelist)
1101{
1102 struct page *pg;
1103
1104 while ((pg = freelist)) {
1105 freelist = pg->freelist;
1106 free_pgtable_page(page_address(pg));
1107 }
1108}
1109
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001110/* iommu handling */
1111static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1112{
1113 struct root_entry *root;
1114 unsigned long flags;
1115
Suresh Siddha4c923d42009-10-02 11:01:24 -07001116 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001117 if (!root)
1118 return -ENOMEM;
1119
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001120 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001121
1122 spin_lock_irqsave(&iommu->lock, flags);
1123 iommu->root_entry = root;
1124 spin_unlock_irqrestore(&iommu->lock, flags);
1125
1126 return 0;
1127}
1128
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001129static void iommu_set_root_entry(struct intel_iommu *iommu)
1130{
1131 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +01001132 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001133 unsigned long flag;
1134
1135 addr = iommu->root_entry;
1136
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1139
David Woodhousec416daa2009-05-10 20:30:58 +01001140 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141
1142 /* Make sure hardware complete it */
1143 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001144 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001146 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001147}
1148
1149static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1150{
1151 u32 val;
1152 unsigned long flag;
1153
David Woodhouse9af88142009-02-13 23:18:03 +00001154 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001156
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001157 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +01001158 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
1160 /* Make sure hardware complete it */
1161 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001162 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001164 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165}
1166
1167/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001168static void __iommu_flush_context(struct intel_iommu *iommu,
1169 u16 did, u16 source_id, u8 function_mask,
1170 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171{
1172 u64 val = 0;
1173 unsigned long flag;
1174
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175 switch (type) {
1176 case DMA_CCMD_GLOBAL_INVL:
1177 val = DMA_CCMD_GLOBAL_INVL;
1178 break;
1179 case DMA_CCMD_DOMAIN_INVL:
1180 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1181 break;
1182 case DMA_CCMD_DEVICE_INVL:
1183 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1184 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1185 break;
1186 default:
1187 BUG();
1188 }
1189 val |= DMA_CCMD_ICC;
1190
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001191 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1193
1194 /* Make sure hardware complete it */
1195 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1196 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1197
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001198 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199}
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001202static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1203 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204{
1205 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1206 u64 val = 0, val_iva = 0;
1207 unsigned long flag;
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209 switch (type) {
1210 case DMA_TLB_GLOBAL_FLUSH:
1211 /* global flush doesn't need set IVA_REG */
1212 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1213 break;
1214 case DMA_TLB_DSI_FLUSH:
1215 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1216 break;
1217 case DMA_TLB_PSI_FLUSH:
1218 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
David Woodhouseea8ea462014-03-05 17:09:32 +00001219 /* IH bit is passed in as part of address */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001220 val_iva = size_order | addr;
1221 break;
1222 default:
1223 BUG();
1224 }
1225 /* Note: set drain read/write */
1226#if 0
1227 /*
1228 * This is probably to be super secure.. Looks like we can
1229 * ignore it without any impact.
1230 */
1231 if (cap_read_drain(iommu->cap))
1232 val |= DMA_TLB_READ_DRAIN;
1233#endif
1234 if (cap_write_drain(iommu->cap))
1235 val |= DMA_TLB_WRITE_DRAIN;
1236
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001237 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 /* Note: Only uses first TLB reg currently */
1239 if (val_iva)
1240 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1241 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1242
1243 /* Make sure hardware complete it */
1244 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1245 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1246
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001247 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248
1249 /* check IOTLB invalidation granularity */
1250 if (DMA_TLB_IAIG(val) == 0)
1251 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1252 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1253 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001254 (unsigned long long)DMA_TLB_IIRG(type),
1255 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256}
1257
David Woodhouse64ae8922014-03-09 12:52:30 -07001258static struct device_domain_info *
1259iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1260 u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261{
Yu Zhao93a23a72009-05-18 13:51:37 +08001262 int found = 0;
1263 unsigned long flags;
1264 struct device_domain_info *info;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001265 struct pci_dev *pdev;
Yu Zhao93a23a72009-05-18 13:51:37 +08001266
1267 if (!ecap_dev_iotlb_support(iommu->ecap))
1268 return NULL;
1269
1270 if (!iommu->qi)
1271 return NULL;
1272
1273 spin_lock_irqsave(&device_domain_lock, flags);
1274 list_for_each_entry(info, &domain->devices, link)
Jiang Liuc3b497c2014-07-11 14:19:25 +08001275 if (info->iommu == iommu && info->bus == bus &&
1276 info->devfn == devfn) {
Yu Zhao93a23a72009-05-18 13:51:37 +08001277 found = 1;
1278 break;
1279 }
1280 spin_unlock_irqrestore(&device_domain_lock, flags);
1281
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001282 if (!found || !info->dev || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001283 return NULL;
1284
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001285 pdev = to_pci_dev(info->dev);
1286
1287 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
Yu Zhao93a23a72009-05-18 13:51:37 +08001288 return NULL;
1289
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001290 if (!dmar_find_matched_atsr_unit(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001291 return NULL;
1292
Yu Zhao93a23a72009-05-18 13:51:37 +08001293 return info;
1294}
1295
1296static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1297{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001298 if (!info || !dev_is_pci(info->dev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001299 return;
1300
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001301 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
Yu Zhao93a23a72009-05-18 13:51:37 +08001302}
1303
1304static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1305{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001306 if (!info->dev || !dev_is_pci(info->dev) ||
1307 !pci_ats_enabled(to_pci_dev(info->dev)))
Yu Zhao93a23a72009-05-18 13:51:37 +08001308 return;
1309
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001310 pci_disable_ats(to_pci_dev(info->dev));
Yu Zhao93a23a72009-05-18 13:51:37 +08001311}
1312
1313static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1314 u64 addr, unsigned mask)
1315{
1316 u16 sid, qdep;
1317 unsigned long flags;
1318 struct device_domain_info *info;
1319
1320 spin_lock_irqsave(&device_domain_lock, flags);
1321 list_for_each_entry(info, &domain->devices, link) {
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001322 struct pci_dev *pdev;
1323 if (!info->dev || !dev_is_pci(info->dev))
1324 continue;
1325
1326 pdev = to_pci_dev(info->dev);
1327 if (!pci_ats_enabled(pdev))
Yu Zhao93a23a72009-05-18 13:51:37 +08001328 continue;
1329
1330 sid = info->bus << 8 | info->devfn;
David Woodhouse0bcb3e22014-03-06 17:12:03 +00001331 qdep = pci_ats_queue_depth(pdev);
Yu Zhao93a23a72009-05-18 13:51:37 +08001332 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1333 }
1334 spin_unlock_irqrestore(&device_domain_lock, flags);
1335}
1336
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001337static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouseea8ea462014-03-05 17:09:32 +00001338 unsigned long pfn, unsigned int pages, int ih, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001340 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001341 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001342
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343 BUG_ON(pages == 0);
1344
David Woodhouseea8ea462014-03-05 17:09:32 +00001345 if (ih)
1346 ih = 1 << 6;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001348 * Fallback to domain selective flush if no PSI support or the size is
1349 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350 * PSI requires page size to be 2 ^ x, and the base address is naturally
1351 * aligned to the size
1352 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001353 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1354 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001355 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001356 else
David Woodhouseea8ea462014-03-05 17:09:32 +00001357 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001358 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001359
1360 /*
Nadav Amit82653632010-04-01 13:24:40 +03001361 * In caching mode, changes of pages from non-present to present require
1362 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001363 */
Nadav Amit82653632010-04-01 13:24:40 +03001364 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001365 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001366}
1367
mark grossf8bab732008-02-08 04:18:38 -08001368static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1369{
1370 u32 pmen;
1371 unsigned long flags;
1372
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001373 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001374 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1375 pmen &= ~DMA_PMEN_EPM;
1376 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1377
1378 /* wait for the protected region status bit to clear */
1379 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1380 readl, !(pmen & DMA_PMEN_PRS), pmen);
1381
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001382 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001383}
1384
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385static int iommu_enable_translation(struct intel_iommu *iommu)
1386{
1387 u32 sts;
1388 unsigned long flags;
1389
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001390 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001391 iommu->gcmd |= DMA_GCMD_TE;
1392 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001393
1394 /* Make sure hardware complete it */
1395 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001396 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001397
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001398 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001399 return 0;
1400}
1401
1402static int iommu_disable_translation(struct intel_iommu *iommu)
1403{
1404 u32 sts;
1405 unsigned long flag;
1406
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001407 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408 iommu->gcmd &= ~DMA_GCMD_TE;
1409 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1410
1411 /* Make sure hardware complete it */
1412 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001413 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001415 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001416 return 0;
1417}
1418
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001419
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420static int iommu_init_domains(struct intel_iommu *iommu)
1421{
1422 unsigned long ndomains;
1423 unsigned long nlongs;
1424
1425 ndomains = cap_ndoms(iommu->cap);
Jiang Liu852bdb02014-01-06 14:18:11 +08001426 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1427 iommu->seq_id, ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428 nlongs = BITS_TO_LONGS(ndomains);
1429
Donald Dutile94a91b52009-08-20 16:51:34 -04001430 spin_lock_init(&iommu->lock);
1431
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432 /* TBD: there might be 64K domains,
1433 * consider other allocation for future chip
1434 */
1435 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1436 if (!iommu->domain_ids) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001437 pr_err("IOMMU%d: allocating domain id array failed\n",
1438 iommu->seq_id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001439 return -ENOMEM;
1440 }
1441 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1442 GFP_KERNEL);
1443 if (!iommu->domains) {
Jiang Liu852bdb02014-01-06 14:18:11 +08001444 pr_err("IOMMU%d: allocating domain array failed\n",
1445 iommu->seq_id);
1446 kfree(iommu->domain_ids);
1447 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 return -ENOMEM;
1449 }
1450
1451 /*
1452 * if Caching mode is set, then invalid translations are tagged
1453 * with domainid 0. Hence we need to pre-allocate it.
1454 */
1455 if (cap_caching_mode(iommu->cap))
1456 set_bit(0, iommu->domain_ids);
1457 return 0;
1458}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001459
Jiang Liua868e6b2014-01-06 14:18:20 +08001460static void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461{
1462 struct dmar_domain *domain;
Jiang Liu5ced12a2014-01-06 14:18:22 +08001463 int i, count;
Weidong Hanc7151a82008-12-08 22:51:37 +08001464 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465
Donald Dutile94a91b52009-08-20 16:51:34 -04001466 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001467 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Jiang Liua4eaa862014-02-19 14:07:30 +08001468 /*
1469 * Domain id 0 is reserved for invalid translation
1470 * if hardware supports caching mode.
1471 */
1472 if (cap_caching_mode(iommu->cap) && i == 0)
1473 continue;
1474
Donald Dutile94a91b52009-08-20 16:51:34 -04001475 domain = iommu->domains[i];
1476 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001477
Donald Dutile94a91b52009-08-20 16:51:34 -04001478 spin_lock_irqsave(&domain->iommu_lock, flags);
Jiang Liu5ced12a2014-01-06 14:18:22 +08001479 count = --domain->iommu_count;
1480 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001481 if (count == 0)
1482 domain_exit(domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001483 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001484 }
1485
1486 if (iommu->gcmd & DMA_GCMD_TE)
1487 iommu_disable_translation(iommu);
1488
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489 kfree(iommu->domains);
1490 kfree(iommu->domain_ids);
Jiang Liua868e6b2014-01-06 14:18:20 +08001491 iommu->domains = NULL;
1492 iommu->domain_ids = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001493
Weidong Hand9630fe2008-12-08 11:06:32 +08001494 g_iommus[iommu->seq_id] = NULL;
1495
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496 /* free context mapping */
1497 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498}
1499
Jiang Liu92d03cc2014-02-19 14:07:28 +08001500static struct dmar_domain *alloc_domain(bool vm)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001501{
Jiang Liu92d03cc2014-02-19 14:07:28 +08001502 /* domain id for virtual machine, it won't be set in context */
1503 static atomic_t vm_domid = ATOMIC_INIT(0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001504 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505
1506 domain = alloc_domain_mem();
1507 if (!domain)
1508 return NULL;
1509
Suresh Siddha4c923d42009-10-02 11:01:24 -07001510 domain->nid = -1;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001511 domain->iommu_count = 0;
Mike Travis1b198bb2012-03-05 15:05:16 -08001512 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001513 domain->flags = 0;
Jiang Liu92d03cc2014-02-19 14:07:28 +08001514 spin_lock_init(&domain->iommu_lock);
1515 INIT_LIST_HEAD(&domain->devices);
1516 if (vm) {
1517 domain->id = atomic_inc_return(&vm_domid);
1518 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1519 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001520
1521 return domain;
1522}
1523
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001524static int iommu_attach_domain(struct dmar_domain *domain,
1525 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001527 int num;
1528 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529 unsigned long flags;
1530
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001531 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001532
1533 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001534
1535 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1536 if (num >= ndomains) {
1537 spin_unlock_irqrestore(&iommu->lock, flags);
1538 printk(KERN_ERR "IOMMU: no free domain ids\n");
1539 return -ENOMEM;
1540 }
1541
1542 domain->id = num;
Jiang Liu9ebd6822014-02-19 14:07:29 +08001543 domain->iommu_count++;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001544 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001545 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001546 iommu->domains[num] = domain;
1547 spin_unlock_irqrestore(&iommu->lock, flags);
1548
1549 return 0;
1550}
1551
1552static void iommu_detach_domain(struct dmar_domain *domain,
1553 struct intel_iommu *iommu)
1554{
1555 unsigned long flags;
1556 int num, ndomains;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001557
1558 spin_lock_irqsave(&iommu->lock, flags);
1559 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001560 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001561 if (iommu->domains[num] == domain) {
Jiang Liu92d03cc2014-02-19 14:07:28 +08001562 clear_bit(num, iommu->domain_ids);
1563 iommu->domains[num] = NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001564 break;
1565 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001566 }
Weidong Han8c11e792008-12-08 15:29:22 +08001567 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001568}
1569
1570static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001571static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572
Joseph Cihula51a63e62011-03-21 11:04:24 -07001573static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574{
1575 struct pci_dev *pdev = NULL;
1576 struct iova *iova;
1577 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001578
David Millerf6611972008-02-06 01:36:23 -08001579 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580
Mark Gross8a443df2008-03-04 14:59:31 -08001581 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1582 &reserved_rbtree_key);
1583
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001584 /* IOAPIC ranges shouldn't be accessed by DMA */
1585 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1586 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001587 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001588 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001589 return -ENODEV;
1590 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001591
1592 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1593 for_each_pci_dev(pdev) {
1594 struct resource *r;
1595
1596 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1597 r = &pdev->resource[i];
1598 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1599 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001600 iova = reserve_iova(&reserved_iova_list,
1601 IOVA_PFN(r->start),
1602 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001603 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001604 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001605 return -ENODEV;
1606 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001607 }
1608 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001609 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001610}
1611
1612static void domain_reserve_special_ranges(struct dmar_domain *domain)
1613{
1614 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1615}
1616
1617static inline int guestwidth_to_adjustwidth(int gaw)
1618{
1619 int agaw;
1620 int r = (gaw - 12) % 9;
1621
1622 if (r == 0)
1623 agaw = gaw;
1624 else
1625 agaw = gaw + 9 - r;
1626 if (agaw > 64)
1627 agaw = 64;
1628 return agaw;
1629}
1630
1631static int domain_init(struct dmar_domain *domain, int guest_width)
1632{
1633 struct intel_iommu *iommu;
1634 int adjust_width, agaw;
1635 unsigned long sagaw;
1636
David Millerf6611972008-02-06 01:36:23 -08001637 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638 domain_reserve_special_ranges(domain);
1639
1640 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001641 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642 if (guest_width > cap_mgaw(iommu->cap))
1643 guest_width = cap_mgaw(iommu->cap);
1644 domain->gaw = guest_width;
1645 adjust_width = guestwidth_to_adjustwidth(guest_width);
1646 agaw = width_to_agaw(adjust_width);
1647 sagaw = cap_sagaw(iommu->cap);
1648 if (!test_bit(agaw, &sagaw)) {
1649 /* hardware doesn't support it, choose a bigger one */
1650 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1651 agaw = find_next_bit(&sagaw, 5, agaw);
1652 if (agaw >= 5)
1653 return -ENODEV;
1654 }
1655 domain->agaw = agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656
Weidong Han8e6040972008-12-08 15:49:06 +08001657 if (ecap_coherent(iommu->ecap))
1658 domain->iommu_coherency = 1;
1659 else
1660 domain->iommu_coherency = 0;
1661
Sheng Yang58c610b2009-03-18 15:33:05 +08001662 if (ecap_sc_support(iommu->ecap))
1663 domain->iommu_snooping = 1;
1664 else
1665 domain->iommu_snooping = 0;
1666
David Woodhouse214e39a2014-03-19 10:38:49 +00001667 if (intel_iommu_superpage)
1668 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1669 else
1670 domain->iommu_superpage = 0;
1671
Suresh Siddha4c923d42009-10-02 11:01:24 -07001672 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001673
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001675 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 if (!domain->pgd)
1677 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001678 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001679 return 0;
1680}
1681
1682static void domain_exit(struct dmar_domain *domain)
1683{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001684 struct dmar_drhd_unit *drhd;
1685 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00001686 struct page *freelist = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001687
1688 /* Domain 0 is reserved, so dont process it */
1689 if (!domain)
1690 return;
1691
Alex Williamson7b668352011-05-24 12:02:41 +01001692 /* Flush any lazy unmaps that may reference this domain */
1693 if (!intel_iommu_strict)
1694 flush_unmaps_timeout(0);
1695
Jiang Liu92d03cc2014-02-19 14:07:28 +08001696 /* remove associated devices */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697 domain_remove_dev_info(domain);
Jiang Liu92d03cc2014-02-19 14:07:28 +08001698
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 /* destroy iovas */
1700 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701
David Woodhouseea8ea462014-03-05 17:09:32 +00001702 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001703
Jiang Liu92d03cc2014-02-19 14:07:28 +08001704 /* clear attached or cached domains */
Jiang Liu0e242612014-02-19 14:07:34 +08001705 rcu_read_lock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001706 for_each_active_iommu(iommu, drhd)
Jiang Liu92d03cc2014-02-19 14:07:28 +08001707 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1708 test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001709 iommu_detach_domain(domain, iommu);
Jiang Liu0e242612014-02-19 14:07:34 +08001710 rcu_read_unlock();
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001711
David Woodhouseea8ea462014-03-05 17:09:32 +00001712 dma_free_pagelist(freelist);
1713
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714 free_domain_mem(domain);
1715}
1716
David Woodhouse64ae8922014-03-09 12:52:30 -07001717static int domain_context_mapping_one(struct dmar_domain *domain,
1718 struct intel_iommu *iommu,
1719 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001720{
1721 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722 unsigned long flags;
Weidong Hanea6606b2008-12-08 23:08:15 +08001723 struct dma_pte *pgd;
1724 unsigned long num;
1725 unsigned long ndomains;
1726 int id;
1727 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001728 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729
1730 pr_debug("Set context mapping for %02x:%02x.%d\n",
1731 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001732
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001733 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001734 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1735 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001736
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 context = device_to_context_entry(iommu, bus, devfn);
1738 if (!context)
1739 return -ENOMEM;
1740 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001741 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742 spin_unlock_irqrestore(&iommu->lock, flags);
1743 return 0;
1744 }
1745
Weidong Hanea6606b2008-12-08 23:08:15 +08001746 id = domain->id;
1747 pgd = domain->pgd;
1748
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001749 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1750 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001751 int found = 0;
1752
1753 /* find an available domain id for this device in iommu */
1754 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001755 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001756 if (iommu->domains[num] == domain) {
1757 id = num;
1758 found = 1;
1759 break;
1760 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001761 }
1762
1763 if (found == 0) {
1764 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1765 if (num >= ndomains) {
1766 spin_unlock_irqrestore(&iommu->lock, flags);
1767 printk(KERN_ERR "IOMMU: no free domain ids\n");
1768 return -EFAULT;
1769 }
1770
1771 set_bit(num, iommu->domain_ids);
1772 iommu->domains[num] = domain;
1773 id = num;
1774 }
1775
1776 /* Skip top levels of page tables for
1777 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001778 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001779 */
Chris Wright1672af12009-12-02 12:06:34 -08001780 if (translation != CONTEXT_TT_PASS_THROUGH) {
1781 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1782 pgd = phys_to_virt(dma_pte_addr(pgd));
1783 if (!dma_pte_present(pgd)) {
1784 spin_unlock_irqrestore(&iommu->lock, flags);
1785 return -ENOMEM;
1786 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001787 }
1788 }
1789 }
1790
1791 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001792
Yu Zhao93a23a72009-05-18 13:51:37 +08001793 if (translation != CONTEXT_TT_PASS_THROUGH) {
David Woodhouse64ae8922014-03-09 12:52:30 -07001794 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
Yu Zhao93a23a72009-05-18 13:51:37 +08001795 translation = info ? CONTEXT_TT_DEV_IOTLB :
1796 CONTEXT_TT_MULTI_LEVEL;
1797 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001798 /*
1799 * In pass through mode, AW must be programmed to indicate the largest
1800 * AGAW value supported by hardware. And ASR is ignored by hardware.
1801 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001802 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001803 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001804 else {
1805 context_set_address_root(context, virt_to_phys(pgd));
1806 context_set_address_width(context, iommu->agaw);
1807 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001808
1809 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001810 context_set_fault_enable(context);
1811 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001812 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001813
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001814 /*
1815 * It's a non-present to present mapping. If hardware doesn't cache
1816 * non-present entry we only need to flush the write-buffer. If the
1817 * _does_ cache non-present entries, then it does so in the special
1818 * domain #0, which we have to flush:
1819 */
1820 if (cap_caching_mode(iommu->cap)) {
1821 iommu->flush.flush_context(iommu, 0,
1822 (((u16)bus) << 8) | devfn,
1823 DMA_CCMD_MASK_NOBIT,
1824 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001825 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001826 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001827 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001828 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001829 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001830 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001831
1832 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001833 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001834 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001835 if (domain->iommu_count == 1)
1836 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001837 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001838 }
1839 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840 return 0;
1841}
1842
Alex Williamson579305f2014-07-03 09:51:43 -06001843struct domain_context_mapping_data {
1844 struct dmar_domain *domain;
1845 struct intel_iommu *iommu;
1846 int translation;
1847};
1848
1849static int domain_context_mapping_cb(struct pci_dev *pdev,
1850 u16 alias, void *opaque)
1851{
1852 struct domain_context_mapping_data *data = opaque;
1853
1854 return domain_context_mapping_one(data->domain, data->iommu,
1855 PCI_BUS_NUM(alias), alias & 0xff,
1856 data->translation);
1857}
1858
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859static int
David Woodhousee1f167f2014-03-09 15:24:46 -07001860domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1861 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001862{
David Woodhouse64ae8922014-03-09 12:52:30 -07001863 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001864 u8 bus, devfn;
Alex Williamson579305f2014-07-03 09:51:43 -06001865 struct domain_context_mapping_data data;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001866
David Woodhousee1f167f2014-03-09 15:24:46 -07001867 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse64ae8922014-03-09 12:52:30 -07001868 if (!iommu)
1869 return -ENODEV;
1870
Alex Williamson579305f2014-07-03 09:51:43 -06001871 if (!dev_is_pci(dev))
1872 return domain_context_mapping_one(domain, iommu, bus, devfn,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001873 translation);
Alex Williamson579305f2014-07-03 09:51:43 -06001874
1875 data.domain = domain;
1876 data.iommu = iommu;
1877 data.translation = translation;
1878
1879 return pci_for_each_dma_alias(to_pci_dev(dev),
1880 &domain_context_mapping_cb, &data);
1881}
1882
1883static int domain_context_mapped_cb(struct pci_dev *pdev,
1884 u16 alias, void *opaque)
1885{
1886 struct intel_iommu *iommu = opaque;
1887
1888 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889}
1890
David Woodhousee1f167f2014-03-09 15:24:46 -07001891static int domain_context_mapped(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001892{
Weidong Han5331fe62008-12-08 23:00:00 +08001893 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07001894 u8 bus, devfn;
Weidong Han5331fe62008-12-08 23:00:00 +08001895
David Woodhousee1f167f2014-03-09 15:24:46 -07001896 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001897 if (!iommu)
1898 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899
Alex Williamson579305f2014-07-03 09:51:43 -06001900 if (!dev_is_pci(dev))
1901 return device_context_mapped(iommu, bus, devfn);
David Woodhousee1f167f2014-03-09 15:24:46 -07001902
Alex Williamson579305f2014-07-03 09:51:43 -06001903 return !pci_for_each_dma_alias(to_pci_dev(dev),
1904 domain_context_mapped_cb, iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905}
1906
Fenghua Yuf5329592009-08-04 15:09:37 -07001907/* Returns a number of VTD pages, but aligned to MM page size */
1908static inline unsigned long aligned_nrpages(unsigned long host_addr,
1909 size_t size)
1910{
1911 host_addr &= ~PAGE_MASK;
1912 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1913}
1914
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001915/* Return largest possible superpage level for a given mapping */
1916static inline int hardware_largepage_caps(struct dmar_domain *domain,
1917 unsigned long iov_pfn,
1918 unsigned long phy_pfn,
1919 unsigned long pages)
1920{
1921 int support, level = 1;
1922 unsigned long pfnmerge;
1923
1924 support = domain->iommu_superpage;
1925
1926 /* To use a large page, the virtual *and* physical addresses
1927 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1928 of them will mean we have to use smaller pages. So just
1929 merge them and check both at once. */
1930 pfnmerge = iov_pfn | phy_pfn;
1931
1932 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1933 pages >>= VTD_STRIDE_SHIFT;
1934 if (!pages)
1935 break;
1936 pfnmerge >>= VTD_STRIDE_SHIFT;
1937 level++;
1938 support--;
1939 }
1940 return level;
1941}
1942
David Woodhouse9051aa02009-06-29 12:30:54 +01001943static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1944 struct scatterlist *sg, unsigned long phys_pfn,
1945 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001946{
1947 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001948 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001949 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001950 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001951 unsigned int largepage_lvl = 0;
1952 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001953
1954 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1955
1956 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1957 return -EINVAL;
1958
1959 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1960
David Woodhouse9051aa02009-06-29 12:30:54 +01001961 if (sg)
1962 sg_res = 0;
1963 else {
1964 sg_res = nr_pages + 1;
1965 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1966 }
1967
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001968 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001969 uint64_t tmp;
1970
David Woodhousee1605492009-06-29 11:17:38 +01001971 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001972 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001973 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1974 sg->dma_length = sg->length;
1975 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001976 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001977 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001978
David Woodhousee1605492009-06-29 11:17:38 +01001979 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001980 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1981
David Woodhouse5cf0a762014-03-19 16:07:49 +00001982 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001983 if (!pte)
1984 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001985 /* It is large page*/
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001986 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001987 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001988 /* Ensure that old small page tables are removed to make room
1989 for superpage, if they exist. */
1990 dma_pte_clear_range(domain, iov_pfn,
1991 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1992 dma_pte_free_pagetable(domain, iov_pfn,
1993 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1994 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001995 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David6491d4d2012-12-19 13:25:35 +00001996 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001997
David Woodhousee1605492009-06-29 11:17:38 +01001998 }
1999 /* We don't need lock here, nobody else
2000 * touches the iova range
2001 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01002002 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01002003 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01002004 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01002005 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2006 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01002007 if (dumps) {
2008 dumps--;
2009 debug_dma_dump_mappings(NULL);
2010 }
2011 WARN_ON(1);
2012 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002013
2014 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2015
2016 BUG_ON(nr_pages < lvl_pages);
2017 BUG_ON(sg_res < lvl_pages);
2018
2019 nr_pages -= lvl_pages;
2020 iov_pfn += lvl_pages;
2021 phys_pfn += lvl_pages;
2022 pteval += lvl_pages * VTD_PAGE_SIZE;
2023 sg_res -= lvl_pages;
2024
2025 /* If the next PTE would be the first in a new page, then we
2026 need to flush the cache on the entries we've just written.
2027 And then we'll need to recalculate 'pte', so clear it and
2028 let it get set again in the if (!pte) block above.
2029
2030 If we're done (!nr_pages) we need to flush the cache too.
2031
2032 Also if we've been setting superpages, we may need to
2033 recalculate 'pte' and switch back to smaller pages for the
2034 end of the mapping, if the trailing size is not enough to
2035 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01002036 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002037 if (!nr_pages || first_pte_in_page(pte) ||
2038 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01002039 domain_flush_cache(domain, first_pte,
2040 (void *)pte - (void *)first_pte);
2041 pte = NULL;
2042 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01002043
2044 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01002045 sg = sg_next(sg);
2046 }
2047 return 0;
2048}
2049
David Woodhouse9051aa02009-06-29 12:30:54 +01002050static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2051 struct scatterlist *sg, unsigned long nr_pages,
2052 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053{
David Woodhouse9051aa02009-06-29 12:30:54 +01002054 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2055}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002056
David Woodhouse9051aa02009-06-29 12:30:54 +01002057static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2058 unsigned long phys_pfn, unsigned long nr_pages,
2059 int prot)
2060{
2061 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002062}
2063
Weidong Hanc7151a82008-12-08 22:51:37 +08002064static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002065{
Weidong Hanc7151a82008-12-08 22:51:37 +08002066 if (!iommu)
2067 return;
Weidong Han8c11e792008-12-08 15:29:22 +08002068
2069 clear_context_table(iommu, bus, devfn);
2070 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002071 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002072 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002073}
2074
David Woodhouse109b9b02012-05-25 17:43:02 +01002075static inline void unlink_domain_info(struct device_domain_info *info)
2076{
2077 assert_spin_locked(&device_domain_lock);
2078 list_del(&info->link);
2079 list_del(&info->global);
2080 if (info->dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002081 info->dev->archdata.iommu = NULL;
David Woodhouse109b9b02012-05-25 17:43:02 +01002082}
2083
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002084static void domain_remove_dev_info(struct dmar_domain *domain)
2085{
Yijing Wang3a74ca02014-05-20 20:37:47 +08002086 struct device_domain_info *info, *tmp;
Jiang Liu92d03cc2014-02-19 14:07:28 +08002087 unsigned long flags, flags2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002088
2089 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wang3a74ca02014-05-20 20:37:47 +08002090 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhouse109b9b02012-05-25 17:43:02 +01002091 unlink_domain_info(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002092 spin_unlock_irqrestore(&device_domain_lock, flags);
2093
Yu Zhao93a23a72009-05-18 13:51:37 +08002094 iommu_disable_dev_iotlb(info);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002095 iommu_detach_dev(info->iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002096
Jiang Liu92d03cc2014-02-19 14:07:28 +08002097 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
David Woodhouse7c7faa12014-03-09 13:33:06 -07002098 iommu_detach_dependent_devices(info->iommu, info->dev);
Jiang Liu92d03cc2014-02-19 14:07:28 +08002099 /* clear this iommu in iommu_bmp, update iommu count
2100 * and capabilities
2101 */
2102 spin_lock_irqsave(&domain->iommu_lock, flags2);
David Woodhouse7c7faa12014-03-09 13:33:06 -07002103 if (test_and_clear_bit(info->iommu->seq_id,
Jiang Liu92d03cc2014-02-19 14:07:28 +08002104 domain->iommu_bmp)) {
2105 domain->iommu_count--;
2106 domain_update_iommu_cap(domain);
2107 }
2108 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2109 }
2110
2111 free_devinfo_mem(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002112 spin_lock_irqsave(&device_domain_lock, flags);
2113 }
2114 spin_unlock_irqrestore(&device_domain_lock, flags);
2115}
2116
2117/*
2118 * find_domain
David Woodhouse1525a292014-03-06 16:19:30 +00002119 * Note: we use struct device->archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002120 */
David Woodhouse1525a292014-03-06 16:19:30 +00002121static struct dmar_domain *find_domain(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122{
2123 struct device_domain_info *info;
2124
2125 /* No lock here, assumes no domain exit in normal case */
David Woodhouse1525a292014-03-06 16:19:30 +00002126 info = dev->archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002127 if (info)
2128 return info->domain;
2129 return NULL;
2130}
2131
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002132static inline struct device_domain_info *
Jiang Liu745f2582014-02-19 14:07:26 +08002133dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2134{
2135 struct device_domain_info *info;
2136
2137 list_for_each_entry(info, &device_domain_list, global)
David Woodhouse41e80dca2014-03-09 13:55:54 -07002138 if (info->iommu->segment == segment && info->bus == bus &&
Jiang Liu745f2582014-02-19 14:07:26 +08002139 info->devfn == devfn)
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002140 return info;
Jiang Liu745f2582014-02-19 14:07:26 +08002141
2142 return NULL;
2143}
2144
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002145static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
David Woodhouse41e80dca2014-03-09 13:55:54 -07002146 int bus, int devfn,
David Woodhouseb718cd32014-03-09 13:11:33 -07002147 struct device *dev,
2148 struct dmar_domain *domain)
Jiang Liu745f2582014-02-19 14:07:26 +08002149{
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002150 struct dmar_domain *found = NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002151 struct device_domain_info *info;
2152 unsigned long flags;
2153
2154 info = alloc_devinfo_mem();
2155 if (!info)
David Woodhouseb718cd32014-03-09 13:11:33 -07002156 return NULL;
Jiang Liu745f2582014-02-19 14:07:26 +08002157
Jiang Liu745f2582014-02-19 14:07:26 +08002158 info->bus = bus;
2159 info->devfn = devfn;
2160 info->dev = dev;
2161 info->domain = domain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002162 info->iommu = iommu;
Jiang Liu745f2582014-02-19 14:07:26 +08002163 if (!dev)
2164 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2165
2166 spin_lock_irqsave(&device_domain_lock, flags);
2167 if (dev)
David Woodhouse0bcb3e22014-03-06 17:12:03 +00002168 found = find_domain(dev);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002169 else {
2170 struct device_domain_info *info2;
David Woodhouse41e80dca2014-03-09 13:55:54 -07002171 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002172 if (info2)
2173 found = info2->domain;
2174 }
Jiang Liu745f2582014-02-19 14:07:26 +08002175 if (found) {
2176 spin_unlock_irqrestore(&device_domain_lock, flags);
2177 free_devinfo_mem(info);
David Woodhouseb718cd32014-03-09 13:11:33 -07002178 /* Caller must free the original domain */
2179 return found;
Jiang Liu745f2582014-02-19 14:07:26 +08002180 }
2181
David Woodhouseb718cd32014-03-09 13:11:33 -07002182 list_add(&info->link, &domain->devices);
2183 list_add(&info->global, &device_domain_list);
2184 if (dev)
2185 dev->archdata.iommu = info;
2186 spin_unlock_irqrestore(&device_domain_lock, flags);
2187
2188 return domain;
Jiang Liu745f2582014-02-19 14:07:26 +08002189}
2190
Alex Williamson579305f2014-07-03 09:51:43 -06002191static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2192{
2193 *(u16 *)opaque = alias;
2194 return 0;
2195}
2196
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002197/* domain is initialized */
David Woodhouse146922e2014-03-09 15:44:17 -07002198static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002199{
Alex Williamson579305f2014-07-03 09:51:43 -06002200 struct dmar_domain *domain, *tmp;
2201 struct intel_iommu *iommu;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002202 struct device_domain_info *info;
Alex Williamson579305f2014-07-03 09:51:43 -06002203 u16 dma_alias;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002204 unsigned long flags;
Yijing Wangaa4d0662014-05-26 20:14:06 +08002205 u8 bus, devfn;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002206
David Woodhouse146922e2014-03-09 15:44:17 -07002207 domain = find_domain(dev);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002208 if (domain)
2209 return domain;
2210
David Woodhouse146922e2014-03-09 15:44:17 -07002211 iommu = device_to_iommu(dev, &bus, &devfn);
2212 if (!iommu)
Alex Williamson579305f2014-07-03 09:51:43 -06002213 return NULL;
2214
2215 if (dev_is_pci(dev)) {
2216 struct pci_dev *pdev = to_pci_dev(dev);
2217
2218 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2219
2220 spin_lock_irqsave(&device_domain_lock, flags);
2221 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2222 PCI_BUS_NUM(dma_alias),
2223 dma_alias & 0xff);
2224 if (info) {
2225 iommu = info->iommu;
2226 domain = info->domain;
2227 }
2228 spin_unlock_irqrestore(&device_domain_lock, flags);
2229
2230 /* DMA alias already has a domain, uses it */
2231 if (info)
2232 goto found_domain;
2233 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002234
David Woodhouse146922e2014-03-09 15:44:17 -07002235 /* Allocate and initialize new domain for the device */
Jiang Liu92d03cc2014-02-19 14:07:28 +08002236 domain = alloc_domain(false);
Jiang Liu745f2582014-02-19 14:07:26 +08002237 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002238 return NULL;
2239
Jiang Liu745f2582014-02-19 14:07:26 +08002240 if (iommu_attach_domain(domain, iommu)) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002241 free_domain_mem(domain);
Alex Williamson579305f2014-07-03 09:51:43 -06002242 return NULL;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002243 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002244
Alex Williamson579305f2014-07-03 09:51:43 -06002245 if (domain_init(domain, gaw)) {
2246 domain_exit(domain);
2247 return NULL;
2248 }
2249
2250 /* register PCI DMA alias device */
2251 if (dev_is_pci(dev)) {
2252 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2253 dma_alias & 0xff, NULL, domain);
2254
2255 if (!tmp || tmp != domain) {
2256 domain_exit(domain);
2257 domain = tmp;
2258 }
2259
David Woodhouseb718cd32014-03-09 13:11:33 -07002260 if (!domain)
Alex Williamson579305f2014-07-03 09:51:43 -06002261 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002262 }
2263
2264found_domain:
Alex Williamson579305f2014-07-03 09:51:43 -06002265 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2266
2267 if (!tmp || tmp != domain) {
2268 domain_exit(domain);
2269 domain = tmp;
2270 }
David Woodhouseb718cd32014-03-09 13:11:33 -07002271
2272 return domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002273}
2274
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002275static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002276#define IDENTMAP_ALL 1
2277#define IDENTMAP_GFX 2
2278#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002279
David Woodhouseb2132032009-06-26 18:50:28 +01002280static int iommu_domain_identity_map(struct dmar_domain *domain,
2281 unsigned long long start,
2282 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002283{
David Woodhousec5395d52009-06-28 16:35:56 +01002284 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2285 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002286
David Woodhousec5395d52009-06-28 16:35:56 +01002287 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2288 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002289 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002290 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002291 }
2292
David Woodhousec5395d52009-06-28 16:35:56 +01002293 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2294 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002295 /*
2296 * RMRR range might have overlap with physical memory range,
2297 * clear it first
2298 */
David Woodhousec5395d52009-06-28 16:35:56 +01002299 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002300
David Woodhousec5395d52009-06-28 16:35:56 +01002301 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2302 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002303 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002304}
2305
David Woodhouse0b9d9752014-03-09 15:48:15 -07002306static int iommu_prepare_identity_map(struct device *dev,
David Woodhouseb2132032009-06-26 18:50:28 +01002307 unsigned long long start,
2308 unsigned long long end)
2309{
2310 struct dmar_domain *domain;
2311 int ret;
2312
David Woodhouse0b9d9752014-03-09 15:48:15 -07002313 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002314 if (!domain)
2315 return -ENOMEM;
2316
David Woodhouse19943b02009-08-04 16:19:20 +01002317 /* For _hardware_ passthrough, don't bother. But for software
2318 passthrough, we do it anyway -- it may indicate a memory
2319 range which is reserved in E820, so which didn't get set
2320 up to start with in si_domain */
2321 if (domain == si_domain && hw_pass_through) {
2322 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002323 dev_name(dev), start, end);
David Woodhouse19943b02009-08-04 16:19:20 +01002324 return 0;
2325 }
2326
2327 printk(KERN_INFO
2328 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
David Woodhouse0b9d9752014-03-09 15:48:15 -07002329 dev_name(dev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002330
David Woodhouse5595b522009-12-02 09:21:55 +00002331 if (end < start) {
2332 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2333 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2334 dmi_get_system_info(DMI_BIOS_VENDOR),
2335 dmi_get_system_info(DMI_BIOS_VERSION),
2336 dmi_get_system_info(DMI_PRODUCT_VERSION));
2337 ret = -EIO;
2338 goto error;
2339 }
2340
David Woodhouse2ff729f2009-08-26 14:25:41 +01002341 if (end >> agaw_to_width(domain->agaw)) {
2342 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2343 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2344 agaw_to_width(domain->agaw),
2345 dmi_get_system_info(DMI_BIOS_VENDOR),
2346 dmi_get_system_info(DMI_BIOS_VERSION),
2347 dmi_get_system_info(DMI_PRODUCT_VERSION));
2348 ret = -EIO;
2349 goto error;
2350 }
David Woodhouse19943b02009-08-04 16:19:20 +01002351
David Woodhouseb2132032009-06-26 18:50:28 +01002352 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002353 if (ret)
2354 goto error;
2355
2356 /* context entry init */
David Woodhouse0b9d9752014-03-09 15:48:15 -07002357 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002358 if (ret)
2359 goto error;
2360
2361 return 0;
2362
2363 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002364 domain_exit(domain);
2365 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002366}
2367
2368static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
David Woodhouse0b9d9752014-03-09 15:48:15 -07002369 struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002370{
David Woodhouse0b9d9752014-03-09 15:48:15 -07002371 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002372 return 0;
David Woodhouse0b9d9752014-03-09 15:48:15 -07002373 return iommu_prepare_identity_map(dev, rmrr->base_address,
2374 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002375}
2376
Suresh Siddhad3f13812011-08-23 17:05:25 -07002377#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002378static inline void iommu_prepare_isa(void)
2379{
2380 struct pci_dev *pdev;
2381 int ret;
2382
2383 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2384 if (!pdev)
2385 return;
2386
David Woodhousec7ab48d2009-06-26 19:10:36 +01002387 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse0b9d9752014-03-09 15:48:15 -07002388 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002389
2390 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002391 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2392 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002393
Yijing Wang9b27e822014-05-20 20:37:52 +08002394 pci_dev_put(pdev);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002395}
2396#else
2397static inline void iommu_prepare_isa(void)
2398{
2399 return;
2400}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002401#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002402
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002403static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002404
Matt Kraai071e1372009-08-23 22:30:22 -07002405static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002406{
2407 struct dmar_drhd_unit *drhd;
2408 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002409 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002410
Jiang Liu92d03cc2014-02-19 14:07:28 +08002411 si_domain = alloc_domain(false);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002412 if (!si_domain)
2413 return -EFAULT;
2414
Jiang Liu92d03cc2014-02-19 14:07:28 +08002415 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2416
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002417 for_each_active_iommu(iommu, drhd) {
2418 ret = iommu_attach_domain(si_domain, iommu);
2419 if (ret) {
2420 domain_exit(si_domain);
2421 return -EFAULT;
2422 }
2423 }
2424
2425 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2426 domain_exit(si_domain);
2427 return -EFAULT;
2428 }
2429
Jiang Liu9544c002014-01-06 14:18:13 +08002430 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2431 si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002432
David Woodhouse19943b02009-08-04 16:19:20 +01002433 if (hw)
2434 return 0;
2435
David Woodhousec7ab48d2009-06-26 19:10:36 +01002436 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002437 unsigned long start_pfn, end_pfn;
2438 int i;
2439
2440 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2441 ret = iommu_domain_identity_map(si_domain,
2442 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2443 if (ret)
2444 return ret;
2445 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002446 }
2447
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002448 return 0;
2449}
2450
David Woodhouse9b226622014-03-09 14:03:28 -07002451static int identity_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002452{
2453 struct device_domain_info *info;
2454
2455 if (likely(!iommu_identity_mapping))
2456 return 0;
2457
David Woodhouse9b226622014-03-09 14:03:28 -07002458 info = dev->archdata.iommu;
Mike Traviscb452a42011-05-28 13:15:03 -05002459 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2460 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002461
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002462 return 0;
2463}
2464
2465static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5913c9b2014-03-09 16:27:31 -07002466 struct device *dev, int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002467{
David Woodhouse0ac72662014-03-09 13:19:22 -07002468 struct dmar_domain *ndomain;
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002469 struct intel_iommu *iommu;
David Woodhouse156baca2014-03-09 14:00:57 -07002470 u8 bus, devfn;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002471 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002472
David Woodhouse5913c9b2014-03-09 16:27:31 -07002473 iommu = device_to_iommu(dev, &bus, &devfn);
David Woodhouse5a8f40e2014-03-09 13:31:18 -07002474 if (!iommu)
2475 return -ENODEV;
2476
David Woodhouse5913c9b2014-03-09 16:27:31 -07002477 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
David Woodhouse0ac72662014-03-09 13:19:22 -07002478 if (ndomain != domain)
2479 return -EBUSY;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002480
David Woodhouse5913c9b2014-03-09 16:27:31 -07002481 ret = domain_context_mapping(domain, dev, translation);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002482 if (ret) {
David Woodhouse5913c9b2014-03-09 16:27:31 -07002483 domain_remove_one_dev_info(domain, dev);
David Woodhousee2ad23d2012-05-25 17:42:54 +01002484 return ret;
2485 }
2486
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002487 return 0;
2488}
2489
David Woodhouse0b9d9752014-03-09 15:48:15 -07002490static bool device_has_rmrr(struct device *dev)
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002491{
2492 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002493 struct device *tmp;
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002494 int i;
2495
Jiang Liu0e242612014-02-19 14:07:34 +08002496 rcu_read_lock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002497 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002498 /*
2499 * Return TRUE if this RMRR contains the device that
2500 * is passed in.
2501 */
2502 for_each_active_dev_scope(rmrr->devices,
2503 rmrr->devices_cnt, i, tmp)
David Woodhouse0b9d9752014-03-09 15:48:15 -07002504 if (tmp == dev) {
Jiang Liu0e242612014-02-19 14:07:34 +08002505 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002506 return true;
Jiang Liub683b232014-02-19 14:07:32 +08002507 }
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002508 }
Jiang Liu0e242612014-02-19 14:07:34 +08002509 rcu_read_unlock();
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002510 return false;
2511}
2512
David Woodhouse3bdb2592014-03-09 16:03:08 -07002513static int iommu_should_identity_map(struct device *dev, int startup)
David Woodhouse6941af22009-07-04 18:24:27 +01002514{
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002515
David Woodhouse3bdb2592014-03-09 16:03:08 -07002516 if (dev_is_pci(dev)) {
2517 struct pci_dev *pdev = to_pci_dev(dev);
Tom Mingarelliea2447f2012-11-20 19:43:17 +00002518
David Woodhouse3bdb2592014-03-09 16:03:08 -07002519 /*
2520 * We want to prevent any device associated with an RMRR from
2521 * getting placed into the SI Domain. This is done because
2522 * problems exist when devices are moved in and out of domains
2523 * and their respective RMRR info is lost. We exempt USB devices
2524 * from this process due to their usage of RMRRs that are known
2525 * to not be needed after BIOS hand-off to OS.
2526 */
2527 if (device_has_rmrr(dev) &&
2528 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2529 return 0;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002530
David Woodhouse3bdb2592014-03-09 16:03:08 -07002531 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2532 return 1;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002533
David Woodhouse3bdb2592014-03-09 16:03:08 -07002534 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2535 return 1;
2536
2537 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2538 return 0;
2539
2540 /*
2541 * We want to start off with all devices in the 1:1 domain, and
2542 * take them out later if we find they can't access all of memory.
2543 *
2544 * However, we can't do this for PCI devices behind bridges,
2545 * because all PCI devices behind the same bridge will end up
2546 * with the same source-id on their transactions.
2547 *
2548 * Practically speaking, we can't change things around for these
2549 * devices at run-time, because we can't be sure there'll be no
2550 * DMA transactions in flight for any of their siblings.
2551 *
2552 * So PCI devices (unless they're on the root bus) as well as
2553 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2554 * the 1:1 domain, just in _case_ one of their siblings turns out
2555 * not to be able to map all of memory.
2556 */
2557 if (!pci_is_pcie(pdev)) {
2558 if (!pci_is_root_bus(pdev->bus))
2559 return 0;
2560 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2561 return 0;
2562 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2563 return 0;
2564 } else {
2565 if (device_has_rmrr(dev))
2566 return 0;
2567 }
David Woodhouse6941af22009-07-04 18:24:27 +01002568
David Woodhouse3dfc8132009-07-04 19:11:08 +01002569 /*
David Woodhouse3dfc8132009-07-04 19:11:08 +01002570 * At boot time, we don't yet know if devices will be 64-bit capable.
David Woodhouse3bdb2592014-03-09 16:03:08 -07002571 * Assume that they will — if they turn out not to be, then we can
David Woodhouse3dfc8132009-07-04 19:11:08 +01002572 * take them out of the 1:1 domain later.
2573 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002574 if (!startup) {
2575 /*
2576 * If the device's dma_mask is less than the system's memory
2577 * size then this is not a candidate for identity mapping.
2578 */
David Woodhouse3bdb2592014-03-09 16:03:08 -07002579 u64 dma_mask = *dev->dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002580
David Woodhouse3bdb2592014-03-09 16:03:08 -07002581 if (dev->coherent_dma_mask &&
2582 dev->coherent_dma_mask < dma_mask)
2583 dma_mask = dev->coherent_dma_mask;
Chris Wright8fcc5372011-05-28 13:15:02 -05002584
David Woodhouse3bdb2592014-03-09 16:03:08 -07002585 return dma_mask >= dma_get_required_mask(dev);
Chris Wright8fcc5372011-05-28 13:15:02 -05002586 }
David Woodhouse6941af22009-07-04 18:24:27 +01002587
2588 return 1;
2589}
2590
David Woodhousecf04eee2014-03-21 16:49:04 +00002591static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2592{
2593 int ret;
2594
2595 if (!iommu_should_identity_map(dev, 1))
2596 return 0;
2597
2598 ret = domain_add_dev_info(si_domain, dev,
2599 hw ? CONTEXT_TT_PASS_THROUGH :
2600 CONTEXT_TT_MULTI_LEVEL);
2601 if (!ret)
2602 pr_info("IOMMU: %s identity mapping for device %s\n",
2603 hw ? "hardware" : "software", dev_name(dev));
2604 else if (ret == -ENODEV)
2605 /* device not associated with an iommu */
2606 ret = 0;
2607
2608 return ret;
2609}
2610
2611
Matt Kraai071e1372009-08-23 22:30:22 -07002612static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002613{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002614 struct pci_dev *pdev = NULL;
David Woodhousecf04eee2014-03-21 16:49:04 +00002615 struct dmar_drhd_unit *drhd;
2616 struct intel_iommu *iommu;
2617 struct device *dev;
2618 int i;
2619 int ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002620
David Woodhouse19943b02009-08-04 16:19:20 +01002621 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002622 if (ret)
2623 return -EFAULT;
2624
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002625 for_each_pci_dev(pdev) {
David Woodhousecf04eee2014-03-21 16:49:04 +00002626 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2627 if (ret)
2628 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002629 }
2630
David Woodhousecf04eee2014-03-21 16:49:04 +00002631 for_each_active_iommu(iommu, drhd)
2632 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2633 struct acpi_device_physical_node *pn;
2634 struct acpi_device *adev;
2635
2636 if (dev->bus != &acpi_bus_type)
2637 continue;
2638
2639 adev= to_acpi_device(dev);
2640 mutex_lock(&adev->physical_node_lock);
2641 list_for_each_entry(pn, &adev->physical_node_list, node) {
2642 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2643 if (ret)
2644 break;
2645 }
2646 mutex_unlock(&adev->physical_node_lock);
2647 if (ret)
2648 return ret;
2649 }
2650
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002651 return 0;
2652}
2653
Joseph Cihulab7792602011-05-03 00:08:37 -07002654static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002655{
2656 struct dmar_drhd_unit *drhd;
2657 struct dmar_rmrr_unit *rmrr;
David Woodhouse832bd852014-03-07 15:08:36 +00002658 struct device *dev;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002659 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002660 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002661
2662 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002663 * for each drhd
2664 * allocate root
2665 * initialize and program root entry to not present
2666 * endfor
2667 */
2668 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002669 /*
2670 * lock not needed as this is only incremented in the single
2671 * threaded kernel __init code path all other access are read
2672 * only
2673 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002674 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2675 g_num_of_iommus++;
2676 continue;
2677 }
2678 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2679 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002680 }
2681
Weidong Hand9630fe2008-12-08 11:06:32 +08002682 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2683 GFP_KERNEL);
2684 if (!g_iommus) {
2685 printk(KERN_ERR "Allocating global iommu array failed\n");
2686 ret = -ENOMEM;
2687 goto error;
2688 }
2689
mark gross80b20dd2008-04-18 13:53:58 -07002690 deferred_flush = kzalloc(g_num_of_iommus *
2691 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2692 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002693 ret = -ENOMEM;
Jiang Liu989d51f2014-02-19 14:07:21 +08002694 goto free_g_iommus;
mark gross5e0d2a62008-03-04 15:22:08 -08002695 }
2696
Jiang Liu7c919772014-01-06 14:18:18 +08002697 for_each_active_iommu(iommu, drhd) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002698 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002699
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002700 ret = iommu_init_domains(iommu);
2701 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002702 goto free_iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002703
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002704 /*
2705 * TBD:
2706 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002707 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708 */
2709 ret = iommu_alloc_root_entry(iommu);
2710 if (ret) {
2711 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002712 goto free_iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002713 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002714 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002715 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 }
2717
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002718 /*
2719 * Start from the sane iommu hardware state.
2720 */
Jiang Liu7c919772014-01-06 14:18:18 +08002721 for_each_active_iommu(iommu, drhd) {
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002722 /*
2723 * If the queued invalidation is already initialized by us
2724 * (for example, while enabling interrupt-remapping) then
2725 * we got the things already rolling from a sane state.
2726 */
2727 if (iommu->qi)
2728 continue;
2729
2730 /*
2731 * Clear any previous faults.
2732 */
2733 dmar_fault(-1, iommu);
2734 /*
2735 * Disable queued invalidation if supported and already enabled
2736 * before OS handover.
2737 */
2738 dmar_disable_qi(iommu);
2739 }
2740
Jiang Liu7c919772014-01-06 14:18:18 +08002741 for_each_active_iommu(iommu, drhd) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002742 if (dmar_enable_qi(iommu)) {
2743 /*
2744 * Queued Invalidate not enabled, use Register Based
2745 * Invalidate
2746 */
2747 iommu->flush.flush_context = __iommu_flush_context;
2748 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002749 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002750 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002751 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002752 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002753 } else {
2754 iommu->flush.flush_context = qi_flush_context;
2755 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002756 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002757 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002758 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002759 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002760 }
2761 }
2762
David Woodhouse19943b02009-08-04 16:19:20 +01002763 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002764 iommu_identity_mapping |= IDENTMAP_ALL;
2765
Suresh Siddhad3f13812011-08-23 17:05:25 -07002766#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002767 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002768#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002769
2770 check_tylersburg_isoch();
2771
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002772 /*
2773 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002774 * identity mappings for rmrr, gfx, and isa and may fall back to static
2775 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002776 */
David Woodhouse19943b02009-08-04 16:19:20 +01002777 if (iommu_identity_mapping) {
2778 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2779 if (ret) {
2780 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
Jiang Liu989d51f2014-02-19 14:07:21 +08002781 goto free_iommu;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002782 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002783 }
David Woodhouse19943b02009-08-04 16:19:20 +01002784 /*
2785 * For each rmrr
2786 * for each dev attached to rmrr
2787 * do
2788 * locate drhd for dev, alloc domain for dev
2789 * allocate free domain
2790 * allocate page table entries for rmrr
2791 * if context not allocated for bus
2792 * allocate and init context
2793 * set present in root table for this bus
2794 * init context with domain, translation etc
2795 * endfor
2796 * endfor
2797 */
2798 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2799 for_each_rmrr_units(rmrr) {
Jiang Liub683b232014-02-19 14:07:32 +08002800 /* some BIOS lists non-exist devices in DMAR table. */
2801 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
David Woodhouse832bd852014-03-07 15:08:36 +00002802 i, dev) {
David Woodhouse0b9d9752014-03-09 15:48:15 -07002803 ret = iommu_prepare_rmrr_dev(rmrr, dev);
David Woodhouse19943b02009-08-04 16:19:20 +01002804 if (ret)
2805 printk(KERN_ERR
2806 "IOMMU: mapping reserved region failed\n");
2807 }
2808 }
2809
2810 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002811
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002812 /*
2813 * for each drhd
2814 * enable fault log
2815 * global invalidate context cache
2816 * global invalidate iotlb
2817 * enable translation
2818 */
Jiang Liu7c919772014-01-06 14:18:18 +08002819 for_each_iommu(iommu, drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002820 if (drhd->ignored) {
2821 /*
2822 * we always have to disable PMRs or DMA may fail on
2823 * this device
2824 */
2825 if (force_on)
Jiang Liu7c919772014-01-06 14:18:18 +08002826 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002827 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002828 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002829
2830 iommu_flush_write_buffer(iommu);
2831
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002832 ret = dmar_set_interrupt(iommu);
2833 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002834 goto free_iommu;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 iommu_set_root_entry(iommu);
2837
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002838 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002839 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002840
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002841 ret = iommu_enable_translation(iommu);
2842 if (ret)
Jiang Liu989d51f2014-02-19 14:07:21 +08002843 goto free_iommu;
David Woodhouseb94996c2009-09-19 15:28:12 -07002844
2845 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002846 }
2847
2848 return 0;
Jiang Liu989d51f2014-02-19 14:07:21 +08002849
2850free_iommu:
Jiang Liu7c919772014-01-06 14:18:18 +08002851 for_each_active_iommu(iommu, drhd)
Jiang Liua868e6b2014-01-06 14:18:20 +08002852 free_dmar_iommu(iommu);
Jiang Liu9bdc5312014-01-06 14:18:27 +08002853 kfree(deferred_flush);
Jiang Liu989d51f2014-02-19 14:07:21 +08002854free_g_iommus:
Weidong Hand9630fe2008-12-08 11:06:32 +08002855 kfree(g_iommus);
Jiang Liu989d51f2014-02-19 14:07:21 +08002856error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002857 return ret;
2858}
2859
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002860/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002861static struct iova *intel_alloc_iova(struct device *dev,
2862 struct dmar_domain *domain,
2863 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002865 struct iova *iova = NULL;
2866
David Woodhouse875764d2009-06-28 21:20:51 +01002867 /* Restrict dma_mask to the width that the iommu can handle */
2868 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2869
2870 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002871 /*
2872 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002873 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002874 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002875 */
David Woodhouse875764d2009-06-28 21:20:51 +01002876 iova = alloc_iova(&domain->iovad, nrpages,
2877 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2878 if (iova)
2879 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002880 }
David Woodhouse875764d2009-06-28 21:20:51 +01002881 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2882 if (unlikely(!iova)) {
2883 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
David Woodhouse207e3592014-03-09 16:12:32 -07002884 nrpages, dev_name(dev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002885 return NULL;
2886 }
2887
2888 return iova;
2889}
2890
David Woodhoused4b709f2014-03-09 16:07:40 -07002891static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002892{
2893 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002894 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002895
David Woodhoused4b709f2014-03-09 16:07:40 -07002896 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002897 if (!domain) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002898 printk(KERN_ERR "Allocating domain for %s failed",
2899 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002900 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002901 }
2902
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002903 /* make sure context mapping is ok */
David Woodhoused4b709f2014-03-09 16:07:40 -07002904 if (unlikely(!domain_context_mapped(dev))) {
2905 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002906 if (ret) {
David Woodhoused4b709f2014-03-09 16:07:40 -07002907 printk(KERN_ERR "Domain context map for %s failed",
2908 dev_name(dev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002909 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002910 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002911 }
2912
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002913 return domain;
2914}
2915
David Woodhoused4b709f2014-03-09 16:07:40 -07002916static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
David Woodhouse147202a2009-07-07 19:43:20 +01002917{
2918 struct device_domain_info *info;
2919
2920 /* No lock here, assumes no domain exit in normal case */
David Woodhoused4b709f2014-03-09 16:07:40 -07002921 info = dev->archdata.iommu;
David Woodhouse147202a2009-07-07 19:43:20 +01002922 if (likely(info))
2923 return info->domain;
2924
2925 return __get_valid_domain_for_dev(dev);
2926}
2927
David Woodhouse3d891942014-03-06 15:59:26 +00002928static int iommu_dummy(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002929{
David Woodhouse3d891942014-03-06 15:59:26 +00002930 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002931}
2932
David Woodhouseecb509e2014-03-09 16:29:55 -07002933/* Check if the dev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002934static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002935{
2936 int found;
2937
David Woodhouse3d891942014-03-06 15:59:26 +00002938 if (iommu_dummy(dev))
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002939 return 1;
2940
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002941 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002942 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002943
David Woodhouse9b226622014-03-09 14:03:28 -07002944 found = identity_mapping(dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002945 if (found) {
David Woodhouseecb509e2014-03-09 16:29:55 -07002946 if (iommu_should_identity_map(dev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002947 return 1;
2948 else {
2949 /*
2950 * 32 bit DMA is removed from si_domain and fall back
2951 * to non-identity mapping.
2952 */
David Woodhousebf9c9ed2014-03-09 16:19:13 -07002953 domain_remove_one_dev_info(si_domain, dev);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002954 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002955 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002956 return 0;
2957 }
2958 } else {
2959 /*
2960 * In case of a detached 64 bit DMA device from vm, the device
2961 * is put into si_domain for identity mapping.
2962 */
David Woodhouseecb509e2014-03-09 16:29:55 -07002963 if (iommu_should_identity_map(dev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002964 int ret;
David Woodhouse5913c9b2014-03-09 16:27:31 -07002965 ret = domain_add_dev_info(si_domain, dev,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002966 hw_pass_through ?
2967 CONTEXT_TT_PASS_THROUGH :
2968 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002969 if (!ret) {
2970 printk(KERN_INFO "64bit %s uses identity mapping\n",
David Woodhouseecb509e2014-03-09 16:29:55 -07002971 dev_name(dev));
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002972 return 1;
2973 }
2974 }
2975 }
2976
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002977 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002978}
2979
David Woodhouse5040a912014-03-09 16:14:00 -07002980static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002981 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002982{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002983 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002984 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002985 struct iova *iova;
2986 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002987 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002988 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002989 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002990
2991 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002992
David Woodhouse5040a912014-03-09 16:14:00 -07002993 if (iommu_no_mapping(dev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002994 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002995
David Woodhouse5040a912014-03-09 16:14:00 -07002996 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002997 if (!domain)
2998 return 0;
2999
Weidong Han8c11e792008-12-08 15:29:22 +08003000 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01003001 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003002
David Woodhouse5040a912014-03-09 16:14:00 -07003003 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003004 if (!iova)
3005 goto error;
3006
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003007 /*
3008 * Check if DMAR supports zero-length reads on write only
3009 * mappings..
3010 */
3011 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003012 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003013 prot |= DMA_PTE_READ;
3014 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3015 prot |= DMA_PTE_WRITE;
3016 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003017 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003018 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02003019 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003020 * is not a big problem
3021 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01003022 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07003023 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003024 if (ret)
3025 goto error;
3026
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003027 /* it's a non-present to present mapping. Only flush if caching mode */
3028 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003029 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003030 else
Weidong Han8c11e792008-12-08 15:29:22 +08003031 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003032
David Woodhouse03d6a242009-06-28 15:33:46 +01003033 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3034 start_paddr += paddr & ~PAGE_MASK;
3035 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003036
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003037error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003038 if (iova)
3039 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00003040 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
David Woodhouse5040a912014-03-09 16:14:00 -07003041 dev_name(dev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042 return 0;
3043}
3044
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003045static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3046 unsigned long offset, size_t size,
3047 enum dma_data_direction dir,
3048 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003049{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003050 return __intel_map_single(dev, page_to_phys(page) + offset, size,
David Woodhouse46333e32014-03-10 20:01:21 -07003051 dir, *dev->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003052}
3053
mark gross5e0d2a62008-03-04 15:22:08 -08003054static void flush_unmaps(void)
3055{
mark gross80b20dd2008-04-18 13:53:58 -07003056 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08003057
mark gross5e0d2a62008-03-04 15:22:08 -08003058 timer_on = 0;
3059
3060 /* just flush them all */
3061 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08003062 struct intel_iommu *iommu = g_iommus[i];
3063 if (!iommu)
3064 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003065
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003066 if (!deferred_flush[i].next)
3067 continue;
3068
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003069 /* In caching mode, global flushes turn emulation expensive */
3070 if (!cap_caching_mode(iommu->cap))
3071 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08003072 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003073 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08003074 unsigned long mask;
3075 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003076 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08003077
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003078 /* On real hardware multiple invalidations are expensive */
3079 if (cap_caching_mode(iommu->cap))
3080 iommu_flush_iotlb_psi(iommu, domain->id,
David Woodhouseea8ea462014-03-05 17:09:32 +00003081 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3082 !deferred_flush[i].freelist[j], 0);
Nadav Amit78d5f0f2010-04-08 23:00:41 +03003083 else {
3084 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3085 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3086 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3087 }
Yu Zhao93a23a72009-05-18 13:51:37 +08003088 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003089 if (deferred_flush[i].freelist[j])
3090 dma_free_pagelist(deferred_flush[i].freelist[j]);
mark gross80b20dd2008-04-18 13:53:58 -07003091 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08003092 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003093 }
3094
mark gross5e0d2a62008-03-04 15:22:08 -08003095 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08003096}
3097
3098static void flush_unmaps_timeout(unsigned long data)
3099{
mark gross80b20dd2008-04-18 13:53:58 -07003100 unsigned long flags;
3101
3102 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003103 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07003104 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08003105}
3106
David Woodhouseea8ea462014-03-05 17:09:32 +00003107static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
mark gross5e0d2a62008-03-04 15:22:08 -08003108{
3109 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07003110 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08003111 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08003112
3113 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07003114 if (list_size == HIGH_WATER_MARK)
3115 flush_unmaps();
3116
Weidong Han8c11e792008-12-08 15:29:22 +08003117 iommu = domain_get_iommu(dom);
3118 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07003119
mark gross80b20dd2008-04-18 13:53:58 -07003120 next = deferred_flush[iommu_id].next;
3121 deferred_flush[iommu_id].domain[next] = dom;
3122 deferred_flush[iommu_id].iova[next] = iova;
David Woodhouseea8ea462014-03-05 17:09:32 +00003123 deferred_flush[iommu_id].freelist[next] = freelist;
mark gross80b20dd2008-04-18 13:53:58 -07003124 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08003125
3126 if (!timer_on) {
3127 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3128 timer_on = 1;
3129 }
3130 list_size++;
3131 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3132}
3133
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003134static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3135 size_t size, enum dma_data_direction dir,
3136 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003137{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003138 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003139 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003140 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003141 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003142 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003143
David Woodhouse73676832009-07-04 14:08:36 +01003144 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003145 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003146
David Woodhouse1525a292014-03-06 16:19:30 +00003147 domain = find_domain(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003148 BUG_ON(!domain);
3149
Weidong Han8c11e792008-12-08 15:29:22 +08003150 iommu = domain_get_iommu(domain);
3151
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003152 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01003153 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3154 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003155 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003156
David Woodhoused794dc92009-06-28 00:27:49 +01003157 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3158 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003159
David Woodhoused794dc92009-06-28 00:27:49 +01003160 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
David Woodhouse207e3592014-03-09 16:12:32 -07003161 dev_name(dev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003162
David Woodhouseea8ea462014-03-05 17:09:32 +00003163 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003164
mark gross5e0d2a62008-03-04 15:22:08 -08003165 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01003166 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003167 last_pfn - start_pfn + 1, !freelist, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08003168 /* free iova */
3169 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003170 dma_free_pagelist(freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003171 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003172 add_unmap(domain, iova, freelist);
mark gross5e0d2a62008-03-04 15:22:08 -08003173 /*
3174 * queue up the release of the unmap to save the 1/6th of the
3175 * cpu used up by the iotlb flush operation...
3176 */
mark gross5e0d2a62008-03-04 15:22:08 -08003177 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003178}
3179
David Woodhouse5040a912014-03-09 16:14:00 -07003180static void *intel_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003181 dma_addr_t *dma_handle, gfp_t flags,
3182 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003183{
Akinobu Mita36746432014-06-04 16:06:51 -07003184 struct page *page = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003185 int order;
3186
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003187 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003188 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003189
David Woodhouse5040a912014-03-09 16:14:00 -07003190 if (!iommu_no_mapping(dev))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003191 flags &= ~(GFP_DMA | GFP_DMA32);
David Woodhouse5040a912014-03-09 16:14:00 -07003192 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3193 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Alex Williamsone8bb9102009-11-04 15:59:34 -07003194 flags |= GFP_DMA;
3195 else
3196 flags |= GFP_DMA32;
3197 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003198
Akinobu Mita36746432014-06-04 16:06:51 -07003199 if (flags & __GFP_WAIT) {
3200 unsigned int count = size >> PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003201
Akinobu Mita36746432014-06-04 16:06:51 -07003202 page = dma_alloc_from_contiguous(dev, count, order);
3203 if (page && iommu_no_mapping(dev) &&
3204 page_to_phys(page) + size > dev->coherent_dma_mask) {
3205 dma_release_from_contiguous(dev, page, count);
3206 page = NULL;
3207 }
3208 }
3209
3210 if (!page)
3211 page = alloc_pages(flags, order);
3212 if (!page)
3213 return NULL;
3214 memset(page_address(page), 0, size);
3215
3216 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003217 DMA_BIDIRECTIONAL,
David Woodhouse5040a912014-03-09 16:14:00 -07003218 dev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003219 if (*dma_handle)
Akinobu Mita36746432014-06-04 16:06:51 -07003220 return page_address(page);
3221 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3222 __free_pages(page, order);
3223
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003224 return NULL;
3225}
3226
David Woodhouse5040a912014-03-09 16:14:00 -07003227static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003228 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003229{
3230 int order;
Akinobu Mita36746432014-06-04 16:06:51 -07003231 struct page *page = virt_to_page(vaddr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003232
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003233 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003234 order = get_order(size);
3235
David Woodhouse5040a912014-03-09 16:14:00 -07003236 intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Akinobu Mita36746432014-06-04 16:06:51 -07003237 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3238 __free_pages(page, order);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003239}
3240
David Woodhouse5040a912014-03-09 16:14:00 -07003241static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003242 int nelems, enum dma_data_direction dir,
3243 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003245 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003246 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003247 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003248 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003249 struct page *freelist;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003250
David Woodhouse5040a912014-03-09 16:14:00 -07003251 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003252 return;
3253
David Woodhouse5040a912014-03-09 16:14:00 -07003254 domain = find_domain(dev);
Weidong Han8c11e792008-12-08 15:29:22 +08003255 BUG_ON(!domain);
3256
3257 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003258
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003259 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003260 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3261 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003262 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003263
David Woodhoused794dc92009-06-28 00:27:49 +01003264 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3265 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003266
David Woodhouseea8ea462014-03-05 17:09:32 +00003267 freelist = domain_unmap(domain, start_pfn, last_pfn);
David Woodhoused794dc92009-06-28 00:27:49 +01003268
David Woodhouseacea0012009-07-14 01:55:11 +01003269 if (intel_iommu_strict) {
3270 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhouseea8ea462014-03-05 17:09:32 +00003271 last_pfn - start_pfn + 1, !freelist, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003272 /* free iova */
3273 __free_iova(&domain->iovad, iova);
David Woodhouseea8ea462014-03-05 17:09:32 +00003274 dma_free_pagelist(freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003275 } else {
David Woodhouseea8ea462014-03-05 17:09:32 +00003276 add_unmap(domain, iova, freelist);
David Woodhouseacea0012009-07-14 01:55:11 +01003277 /*
3278 * queue up the release of the unmap to save the 1/6th of the
3279 * cpu used up by the iotlb flush operation...
3280 */
3281 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003282}
3283
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003284static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003285 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003286{
3287 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003288 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003289
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003290 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003291 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003292 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003293 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003294 }
3295 return nelems;
3296}
3297
David Woodhouse5040a912014-03-09 16:14:00 -07003298static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003299 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003301 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003302 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003303 size_t size = 0;
3304 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003305 struct iova *iova = NULL;
3306 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003307 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003308 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003309 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003310
3311 BUG_ON(dir == DMA_NONE);
David Woodhouse5040a912014-03-09 16:14:00 -07003312 if (iommu_no_mapping(dev))
3313 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003314
David Woodhouse5040a912014-03-09 16:14:00 -07003315 domain = get_valid_domain_for_dev(dev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003316 if (!domain)
3317 return 0;
3318
Weidong Han8c11e792008-12-08 15:29:22 +08003319 iommu = domain_get_iommu(domain);
3320
David Woodhouseb536d242009-06-28 14:49:31 +01003321 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003322 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003323
David Woodhouse5040a912014-03-09 16:14:00 -07003324 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3325 *dev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003326 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003327 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003328 return 0;
3329 }
3330
3331 /*
3332 * Check if DMAR supports zero-length reads on write only
3333 * mappings..
3334 */
3335 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003336 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003337 prot |= DMA_PTE_READ;
3338 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3339 prot |= DMA_PTE_WRITE;
3340
David Woodhouseb536d242009-06-28 14:49:31 +01003341 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003342
Fenghua Yuf5329592009-08-04 15:09:37 -07003343 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003344 if (unlikely(ret)) {
3345 /* clear the page */
3346 dma_pte_clear_range(domain, start_vpfn,
3347 start_vpfn + size - 1);
3348 /* free page tables */
3349 dma_pte_free_pagetable(domain, start_vpfn,
3350 start_vpfn + size - 1);
3351 /* free iova */
3352 __free_iova(&domain->iovad, iova);
3353 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003354 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003355
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003356 /* it's a non-present to present mapping. Only flush if caching mode */
3357 if (cap_caching_mode(iommu->cap))
David Woodhouseea8ea462014-03-05 17:09:32 +00003358 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003359 else
Weidong Han8c11e792008-12-08 15:29:22 +08003360 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003361
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003362 return nelems;
3363}
3364
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003365static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3366{
3367 return !dma_addr;
3368}
3369
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003370struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003371 .alloc = intel_alloc_coherent,
3372 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003373 .map_sg = intel_map_sg,
3374 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003375 .map_page = intel_map_page,
3376 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003377 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003378};
3379
3380static inline int iommu_domain_cache_init(void)
3381{
3382 int ret = 0;
3383
3384 iommu_domain_cache = kmem_cache_create("iommu_domain",
3385 sizeof(struct dmar_domain),
3386 0,
3387 SLAB_HWCACHE_ALIGN,
3388
3389 NULL);
3390 if (!iommu_domain_cache) {
3391 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3392 ret = -ENOMEM;
3393 }
3394
3395 return ret;
3396}
3397
3398static inline int iommu_devinfo_cache_init(void)
3399{
3400 int ret = 0;
3401
3402 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3403 sizeof(struct device_domain_info),
3404 0,
3405 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003406 NULL);
3407 if (!iommu_devinfo_cache) {
3408 printk(KERN_ERR "Couldn't create devinfo cache\n");
3409 ret = -ENOMEM;
3410 }
3411
3412 return ret;
3413}
3414
3415static inline int iommu_iova_cache_init(void)
3416{
3417 int ret = 0;
3418
3419 iommu_iova_cache = kmem_cache_create("iommu_iova",
3420 sizeof(struct iova),
3421 0,
3422 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003423 NULL);
3424 if (!iommu_iova_cache) {
3425 printk(KERN_ERR "Couldn't create iova cache\n");
3426 ret = -ENOMEM;
3427 }
3428
3429 return ret;
3430}
3431
3432static int __init iommu_init_mempool(void)
3433{
3434 int ret;
3435 ret = iommu_iova_cache_init();
3436 if (ret)
3437 return ret;
3438
3439 ret = iommu_domain_cache_init();
3440 if (ret)
3441 goto domain_error;
3442
3443 ret = iommu_devinfo_cache_init();
3444 if (!ret)
3445 return ret;
3446
3447 kmem_cache_destroy(iommu_domain_cache);
3448domain_error:
3449 kmem_cache_destroy(iommu_iova_cache);
3450
3451 return -ENOMEM;
3452}
3453
3454static void __init iommu_exit_mempool(void)
3455{
3456 kmem_cache_destroy(iommu_devinfo_cache);
3457 kmem_cache_destroy(iommu_domain_cache);
3458 kmem_cache_destroy(iommu_iova_cache);
3459
3460}
3461
Dan Williams556ab452010-07-23 15:47:56 -07003462static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3463{
3464 struct dmar_drhd_unit *drhd;
3465 u32 vtbar;
3466 int rc;
3467
3468 /* We know that this device on this chipset has its own IOMMU.
3469 * If we find it under a different IOMMU, then the BIOS is lying
3470 * to us. Hope that the IOMMU for this device is actually
3471 * disabled, and it needs no translation...
3472 */
3473 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3474 if (rc) {
3475 /* "can't" happen */
3476 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3477 return;
3478 }
3479 vtbar &= 0xffff0000;
3480
3481 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3482 drhd = dmar_find_matched_drhd_unit(pdev);
3483 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3484 TAINT_FIRMWARE_WORKAROUND,
3485 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3486 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3487}
3488DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3489
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003490static void __init init_no_remapping_devices(void)
3491{
3492 struct dmar_drhd_unit *drhd;
David Woodhouse832bd852014-03-07 15:08:36 +00003493 struct device *dev;
Jiang Liub683b232014-02-19 14:07:32 +08003494 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003495
3496 for_each_drhd_unit(drhd) {
3497 if (!drhd->include_all) {
Jiang Liub683b232014-02-19 14:07:32 +08003498 for_each_active_dev_scope(drhd->devices,
3499 drhd->devices_cnt, i, dev)
3500 break;
David Woodhouse832bd852014-03-07 15:08:36 +00003501 /* ignore DMAR unit if no devices exist */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003502 if (i == drhd->devices_cnt)
3503 drhd->ignored = 1;
3504 }
3505 }
3506
Jiang Liu7c919772014-01-06 14:18:18 +08003507 for_each_active_drhd_unit(drhd) {
Jiang Liu7c919772014-01-06 14:18:18 +08003508 if (drhd->include_all)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003509 continue;
3510
Jiang Liub683b232014-02-19 14:07:32 +08003511 for_each_active_dev_scope(drhd->devices,
3512 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003513 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003514 break;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003515 if (i < drhd->devices_cnt)
3516 continue;
3517
David Woodhousec0771df2011-10-14 20:59:46 +01003518 /* This IOMMU has *only* gfx devices. Either bypass it or
3519 set the gfx_mapped flag, as appropriate */
3520 if (dmar_map_gfx) {
3521 intel_iommu_gfx_mapped = 1;
3522 } else {
3523 drhd->ignored = 1;
Jiang Liub683b232014-02-19 14:07:32 +08003524 for_each_active_dev_scope(drhd->devices,
3525 drhd->devices_cnt, i, dev)
David Woodhouse832bd852014-03-07 15:08:36 +00003526 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003527 }
3528 }
3529}
3530
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003531#ifdef CONFIG_SUSPEND
3532static int init_iommu_hw(void)
3533{
3534 struct dmar_drhd_unit *drhd;
3535 struct intel_iommu *iommu = NULL;
3536
3537 for_each_active_iommu(iommu, drhd)
3538 if (iommu->qi)
3539 dmar_reenable_qi(iommu);
3540
Joseph Cihulab7792602011-05-03 00:08:37 -07003541 for_each_iommu(iommu, drhd) {
3542 if (drhd->ignored) {
3543 /*
3544 * we always have to disable PMRs or DMA may fail on
3545 * this device
3546 */
3547 if (force_on)
3548 iommu_disable_protect_mem_regions(iommu);
3549 continue;
3550 }
3551
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003552 iommu_flush_write_buffer(iommu);
3553
3554 iommu_set_root_entry(iommu);
3555
3556 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003557 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003558 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003559 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003560 if (iommu_enable_translation(iommu))
3561 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003562 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003563 }
3564
3565 return 0;
3566}
3567
3568static void iommu_flush_all(void)
3569{
3570 struct dmar_drhd_unit *drhd;
3571 struct intel_iommu *iommu;
3572
3573 for_each_active_iommu(iommu, drhd) {
3574 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003575 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003576 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003577 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003578 }
3579}
3580
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003581static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003582{
3583 struct dmar_drhd_unit *drhd;
3584 struct intel_iommu *iommu = NULL;
3585 unsigned long flag;
3586
3587 for_each_active_iommu(iommu, drhd) {
3588 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3589 GFP_ATOMIC);
3590 if (!iommu->iommu_state)
3591 goto nomem;
3592 }
3593
3594 iommu_flush_all();
3595
3596 for_each_active_iommu(iommu, drhd) {
3597 iommu_disable_translation(iommu);
3598
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003599 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003600
3601 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3602 readl(iommu->reg + DMAR_FECTL_REG);
3603 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3604 readl(iommu->reg + DMAR_FEDATA_REG);
3605 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3606 readl(iommu->reg + DMAR_FEADDR_REG);
3607 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3608 readl(iommu->reg + DMAR_FEUADDR_REG);
3609
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003610 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003611 }
3612 return 0;
3613
3614nomem:
3615 for_each_active_iommu(iommu, drhd)
3616 kfree(iommu->iommu_state);
3617
3618 return -ENOMEM;
3619}
3620
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003621static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003622{
3623 struct dmar_drhd_unit *drhd;
3624 struct intel_iommu *iommu = NULL;
3625 unsigned long flag;
3626
3627 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003628 if (force_on)
3629 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3630 else
3631 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003632 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003633 }
3634
3635 for_each_active_iommu(iommu, drhd) {
3636
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003637 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003638
3639 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3640 iommu->reg + DMAR_FECTL_REG);
3641 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3642 iommu->reg + DMAR_FEDATA_REG);
3643 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3644 iommu->reg + DMAR_FEADDR_REG);
3645 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3646 iommu->reg + DMAR_FEUADDR_REG);
3647
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003648 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003649 }
3650
3651 for_each_active_iommu(iommu, drhd)
3652 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003653}
3654
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003655static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003656 .resume = iommu_resume,
3657 .suspend = iommu_suspend,
3658};
3659
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003660static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003661{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003662 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003663}
3664
3665#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003666static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003667#endif /* CONFIG_PM */
3668
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003669
3670int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3671{
3672 struct acpi_dmar_reserved_memory *rmrr;
3673 struct dmar_rmrr_unit *rmrru;
3674
3675 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3676 if (!rmrru)
3677 return -ENOMEM;
3678
3679 rmrru->hdr = header;
3680 rmrr = (struct acpi_dmar_reserved_memory *)header;
3681 rmrru->base_address = rmrr->base_address;
3682 rmrru->end_address = rmrr->end_address;
Jiang Liu2e455282014-02-19 14:07:36 +08003683 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3684 ((void *)rmrr) + rmrr->header.length,
3685 &rmrru->devices_cnt);
3686 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3687 kfree(rmrru);
3688 return -ENOMEM;
3689 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003690
Jiang Liu2e455282014-02-19 14:07:36 +08003691 list_add(&rmrru->list, &dmar_rmrr_units);
3692
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003693 return 0;
3694}
3695
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003696int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3697{
3698 struct acpi_dmar_atsr *atsr;
3699 struct dmar_atsr_unit *atsru;
3700
3701 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3702 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3703 if (!atsru)
3704 return -ENOMEM;
3705
3706 atsru->hdr = hdr;
3707 atsru->include_all = atsr->flags & 0x1;
Jiang Liu2e455282014-02-19 14:07:36 +08003708 if (!atsru->include_all) {
3709 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3710 (void *)atsr + atsr->header.length,
3711 &atsru->devices_cnt);
3712 if (atsru->devices_cnt && atsru->devices == NULL) {
3713 kfree(atsru);
3714 return -ENOMEM;
3715 }
3716 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003717
Jiang Liu0e242612014-02-19 14:07:34 +08003718 list_add_rcu(&atsru->list, &dmar_atsr_units);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003719
3720 return 0;
3721}
3722
Jiang Liu9bdc5312014-01-06 14:18:27 +08003723static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3724{
3725 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3726 kfree(atsru);
3727}
3728
3729static void intel_iommu_free_dmars(void)
3730{
3731 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3732 struct dmar_atsr_unit *atsru, *atsr_n;
3733
3734 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3735 list_del(&rmrru->list);
3736 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3737 kfree(rmrru);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003738 }
3739
Jiang Liu9bdc5312014-01-06 14:18:27 +08003740 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3741 list_del(&atsru->list);
3742 intel_iommu_free_atsr(atsru);
3743 }
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003744}
3745
3746int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3747{
Jiang Liub683b232014-02-19 14:07:32 +08003748 int i, ret = 1;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003749 struct pci_bus *bus;
David Woodhouse832bd852014-03-07 15:08:36 +00003750 struct pci_dev *bridge = NULL;
3751 struct device *tmp;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003752 struct acpi_dmar_atsr *atsr;
3753 struct dmar_atsr_unit *atsru;
3754
3755 dev = pci_physfn(dev);
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003756 for (bus = dev->bus; bus; bus = bus->parent) {
Jiang Liub5f82dd2014-02-19 14:07:31 +08003757 bridge = bus->self;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003758 if (!bridge || !pci_is_pcie(bridge) ||
Yijing Wang62f87c02012-07-24 17:20:03 +08003759 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003760 return 0;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003761 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003762 break;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003763 }
Jiang Liub5f82dd2014-02-19 14:07:31 +08003764 if (!bridge)
3765 return 0;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003766
Jiang Liu0e242612014-02-19 14:07:34 +08003767 rcu_read_lock();
Jiang Liub5f82dd2014-02-19 14:07:31 +08003768 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3769 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3770 if (atsr->segment != pci_domain_nr(dev->bus))
3771 continue;
3772
Jiang Liub683b232014-02-19 14:07:32 +08003773 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
David Woodhouse832bd852014-03-07 15:08:36 +00003774 if (tmp == &bridge->dev)
Jiang Liub683b232014-02-19 14:07:32 +08003775 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003776
3777 if (atsru->include_all)
Jiang Liub683b232014-02-19 14:07:32 +08003778 goto out;
Jiang Liub5f82dd2014-02-19 14:07:31 +08003779 }
Jiang Liub683b232014-02-19 14:07:32 +08003780 ret = 0;
3781out:
Jiang Liu0e242612014-02-19 14:07:34 +08003782 rcu_read_unlock();
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003783
Jiang Liub683b232014-02-19 14:07:32 +08003784 return ret;
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003785}
3786
Jiang Liu59ce0512014-02-19 14:07:35 +08003787int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3788{
3789 int ret = 0;
3790 struct dmar_rmrr_unit *rmrru;
3791 struct dmar_atsr_unit *atsru;
3792 struct acpi_dmar_atsr *atsr;
3793 struct acpi_dmar_reserved_memory *rmrr;
3794
3795 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3796 return 0;
3797
3798 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3799 rmrr = container_of(rmrru->hdr,
3800 struct acpi_dmar_reserved_memory, header);
3801 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3802 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3803 ((void *)rmrr) + rmrr->header.length,
3804 rmrr->segment, rmrru->devices,
3805 rmrru->devices_cnt);
Jiang Liu27e24952014-06-20 15:08:06 +08003806 if(ret < 0)
Jiang Liu59ce0512014-02-19 14:07:35 +08003807 return ret;
3808 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
Jiang Liu27e24952014-06-20 15:08:06 +08003809 dmar_remove_dev_scope(info, rmrr->segment,
3810 rmrru->devices, rmrru->devices_cnt);
Jiang Liu59ce0512014-02-19 14:07:35 +08003811 }
3812 }
3813
3814 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3815 if (atsru->include_all)
3816 continue;
3817
3818 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3819 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3820 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3821 (void *)atsr + atsr->header.length,
3822 atsr->segment, atsru->devices,
3823 atsru->devices_cnt);
3824 if (ret > 0)
3825 break;
3826 else if(ret < 0)
3827 return ret;
3828 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3829 if (dmar_remove_dev_scope(info, atsr->segment,
3830 atsru->devices, atsru->devices_cnt))
3831 break;
3832 }
3833 }
3834
3835 return 0;
3836}
3837
Fenghua Yu99dcade2009-11-11 07:23:06 -08003838/*
3839 * Here we only respond to action of unbound device from driver.
3840 *
3841 * Added device is not attached to its DMAR domain here yet. That will happen
3842 * when mapping the device to iova.
3843 */
3844static int device_notifier(struct notifier_block *nb,
3845 unsigned long action, void *data)
3846{
3847 struct device *dev = data;
Fenghua Yu99dcade2009-11-11 07:23:06 -08003848 struct dmar_domain *domain;
3849
David Woodhouse3d891942014-03-06 15:59:26 +00003850 if (iommu_dummy(dev))
David Woodhouse44cd6132009-12-02 10:18:30 +00003851 return 0;
3852
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003853 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3854 action != BUS_NOTIFY_DEL_DEVICE)
3855 return 0;
3856
David Woodhouse1525a292014-03-06 16:19:30 +00003857 domain = find_domain(dev);
Fenghua Yu99dcade2009-11-11 07:23:06 -08003858 if (!domain)
3859 return 0;
3860
Jiang Liu3a5670e2014-02-19 14:07:33 +08003861 down_read(&dmar_global_lock);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07003862 domain_remove_one_dev_info(domain, dev);
Jiang Liu7e7dfab2014-02-19 14:07:23 +08003863 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3864 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3865 list_empty(&domain->devices))
3866 domain_exit(domain);
Jiang Liu3a5670e2014-02-19 14:07:33 +08003867 up_read(&dmar_global_lock);
Alex Williamsona97590e2011-03-04 14:52:16 -07003868
Fenghua Yu99dcade2009-11-11 07:23:06 -08003869 return 0;
3870}
3871
3872static struct notifier_block device_nb = {
3873 .notifier_call = device_notifier,
3874};
3875
Jiang Liu75f05562014-02-19 14:07:37 +08003876static int intel_iommu_memory_notifier(struct notifier_block *nb,
3877 unsigned long val, void *v)
3878{
3879 struct memory_notify *mhp = v;
3880 unsigned long long start, end;
3881 unsigned long start_vpfn, last_vpfn;
3882
3883 switch (val) {
3884 case MEM_GOING_ONLINE:
3885 start = mhp->start_pfn << PAGE_SHIFT;
3886 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3887 if (iommu_domain_identity_map(si_domain, start, end)) {
3888 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3889 start, end);
3890 return NOTIFY_BAD;
3891 }
3892 break;
3893
3894 case MEM_OFFLINE:
3895 case MEM_CANCEL_ONLINE:
3896 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3897 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3898 while (start_vpfn <= last_vpfn) {
3899 struct iova *iova;
3900 struct dmar_drhd_unit *drhd;
3901 struct intel_iommu *iommu;
David Woodhouseea8ea462014-03-05 17:09:32 +00003902 struct page *freelist;
Jiang Liu75f05562014-02-19 14:07:37 +08003903
3904 iova = find_iova(&si_domain->iovad, start_vpfn);
3905 if (iova == NULL) {
3906 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3907 start_vpfn);
3908 break;
3909 }
3910
3911 iova = split_and_remove_iova(&si_domain->iovad, iova,
3912 start_vpfn, last_vpfn);
3913 if (iova == NULL) {
3914 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3915 start_vpfn, last_vpfn);
3916 return NOTIFY_BAD;
3917 }
3918
David Woodhouseea8ea462014-03-05 17:09:32 +00003919 freelist = domain_unmap(si_domain, iova->pfn_lo,
3920 iova->pfn_hi);
3921
Jiang Liu75f05562014-02-19 14:07:37 +08003922 rcu_read_lock();
3923 for_each_active_iommu(iommu, drhd)
3924 iommu_flush_iotlb_psi(iommu, si_domain->id,
3925 iova->pfn_lo,
David Woodhouseea8ea462014-03-05 17:09:32 +00003926 iova->pfn_hi - iova->pfn_lo + 1,
3927 !freelist, 0);
Jiang Liu75f05562014-02-19 14:07:37 +08003928 rcu_read_unlock();
David Woodhouseea8ea462014-03-05 17:09:32 +00003929 dma_free_pagelist(freelist);
Jiang Liu75f05562014-02-19 14:07:37 +08003930
3931 start_vpfn = iova->pfn_hi + 1;
3932 free_iova_mem(iova);
3933 }
3934 break;
3935 }
3936
3937 return NOTIFY_OK;
3938}
3939
3940static struct notifier_block intel_iommu_memory_nb = {
3941 .notifier_call = intel_iommu_memory_notifier,
3942 .priority = 0
3943};
3944
Alex Williamsona5459cf2014-06-12 16:12:31 -06003945
3946static ssize_t intel_iommu_show_version(struct device *dev,
3947 struct device_attribute *attr,
3948 char *buf)
3949{
3950 struct intel_iommu *iommu = dev_get_drvdata(dev);
3951 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3952 return sprintf(buf, "%d:%d\n",
3953 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3954}
3955static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3956
3957static ssize_t intel_iommu_show_address(struct device *dev,
3958 struct device_attribute *attr,
3959 char *buf)
3960{
3961 struct intel_iommu *iommu = dev_get_drvdata(dev);
3962 return sprintf(buf, "%llx\n", iommu->reg_phys);
3963}
3964static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3965
3966static ssize_t intel_iommu_show_cap(struct device *dev,
3967 struct device_attribute *attr,
3968 char *buf)
3969{
3970 struct intel_iommu *iommu = dev_get_drvdata(dev);
3971 return sprintf(buf, "%llx\n", iommu->cap);
3972}
3973static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3974
3975static ssize_t intel_iommu_show_ecap(struct device *dev,
3976 struct device_attribute *attr,
3977 char *buf)
3978{
3979 struct intel_iommu *iommu = dev_get_drvdata(dev);
3980 return sprintf(buf, "%llx\n", iommu->ecap);
3981}
3982static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
3983
3984static struct attribute *intel_iommu_attrs[] = {
3985 &dev_attr_version.attr,
3986 &dev_attr_address.attr,
3987 &dev_attr_cap.attr,
3988 &dev_attr_ecap.attr,
3989 NULL,
3990};
3991
3992static struct attribute_group intel_iommu_group = {
3993 .name = "intel-iommu",
3994 .attrs = intel_iommu_attrs,
3995};
3996
3997const struct attribute_group *intel_iommu_groups[] = {
3998 &intel_iommu_group,
3999 NULL,
4000};
4001
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004002int __init intel_iommu_init(void)
4003{
Jiang Liu9bdc5312014-01-06 14:18:27 +08004004 int ret = -ENODEV;
Takao Indoh3a93c842013-04-23 17:35:03 +09004005 struct dmar_drhd_unit *drhd;
Jiang Liu7c919772014-01-06 14:18:18 +08004006 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004007
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004008 /* VT-d is required for a TXT/tboot launch, so enforce that */
4009 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004010
Jiang Liu3a5670e2014-02-19 14:07:33 +08004011 if (iommu_init_mempool()) {
4012 if (force_on)
4013 panic("tboot: Failed to initialize iommu memory\n");
4014 return -ENOMEM;
4015 }
4016
4017 down_write(&dmar_global_lock);
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004018 if (dmar_table_init()) {
4019 if (force_on)
4020 panic("tboot: Failed to initialize DMAR table\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004021 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004022 }
4023
Takao Indoh3a93c842013-04-23 17:35:03 +09004024 /*
4025 * Disable translation if already enabled prior to OS handover.
4026 */
Jiang Liu7c919772014-01-06 14:18:18 +08004027 for_each_active_iommu(iommu, drhd)
Takao Indoh3a93c842013-04-23 17:35:03 +09004028 if (iommu->gcmd & DMA_GCMD_TE)
4029 iommu_disable_translation(iommu);
Takao Indoh3a93c842013-04-23 17:35:03 +09004030
Suresh Siddhac2c72862011-08-23 17:05:19 -07004031 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004032 if (force_on)
4033 panic("tboot: Failed to initialize DMAR device scope\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004034 goto out_free_dmar;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004035 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07004036
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004037 if (no_iommu || dmar_disabled)
Jiang Liu9bdc5312014-01-06 14:18:27 +08004038 goto out_free_dmar;
Suresh Siddha2ae21012008-07-10 11:16:43 -07004039
Suresh Siddha318fe7d2011-08-23 17:05:20 -07004040 if (list_empty(&dmar_rmrr_units))
4041 printk(KERN_INFO "DMAR: No RMRR found\n");
4042
4043 if (list_empty(&dmar_atsr_units))
4044 printk(KERN_INFO "DMAR: No ATSR found\n");
4045
Joseph Cihula51a63e62011-03-21 11:04:24 -07004046 if (dmar_init_reserved_ranges()) {
4047 if (force_on)
4048 panic("tboot: Failed to reserve iommu ranges\n");
Jiang Liu3a5670e2014-02-19 14:07:33 +08004049 goto out_free_reserved_range;
Joseph Cihula51a63e62011-03-21 11:04:24 -07004050 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004051
4052 init_no_remapping_devices();
4053
Joseph Cihulab7792602011-05-03 00:08:37 -07004054 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004055 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07004056 if (force_on)
4057 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004058 printk(KERN_ERR "IOMMU: dmar init failed\n");
Jiang Liu9bdc5312014-01-06 14:18:27 +08004059 goto out_free_reserved_range;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004060 }
Jiang Liu3a5670e2014-02-19 14:07:33 +08004061 up_write(&dmar_global_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004062 printk(KERN_INFO
4063 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4064
mark gross5e0d2a62008-03-04 15:22:08 -08004065 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09004066#ifdef CONFIG_SWIOTLB
4067 swiotlb = 0;
4068#endif
David Woodhouse19943b02009-08-04 16:19:20 +01004069 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07004070
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01004071 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004072
Alex Williamsona5459cf2014-06-12 16:12:31 -06004073 for_each_active_iommu(iommu, drhd)
4074 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4075 intel_iommu_groups,
4076 iommu->name);
4077
Joerg Roedel4236d97d2011-09-06 17:56:07 +02004078 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004079 bus_register_notifier(&pci_bus_type, &device_nb);
Jiang Liu75f05562014-02-19 14:07:37 +08004080 if (si_domain && !hw_pass_through)
4081 register_memory_notifier(&intel_iommu_memory_nb);
Fenghua Yu99dcade2009-11-11 07:23:06 -08004082
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02004083 intel_iommu_enabled = 1;
4084
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004085 return 0;
Jiang Liu9bdc5312014-01-06 14:18:27 +08004086
4087out_free_reserved_range:
4088 put_iova_domain(&reserved_iova_list);
Jiang Liu9bdc5312014-01-06 14:18:27 +08004089out_free_dmar:
4090 intel_iommu_free_dmars();
Jiang Liu3a5670e2014-02-19 14:07:33 +08004091 up_write(&dmar_global_lock);
4092 iommu_exit_mempool();
Jiang Liu9bdc5312014-01-06 14:18:27 +08004093 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07004094}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07004095
Alex Williamson579305f2014-07-03 09:51:43 -06004096static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4097{
4098 struct intel_iommu *iommu = opaque;
4099
4100 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4101 return 0;
4102}
4103
4104/*
4105 * NB - intel-iommu lacks any sort of reference counting for the users of
4106 * dependent devices. If multiple endpoints have intersecting dependent
4107 * devices, unbinding the driver from any one of them will possibly leave
4108 * the others unable to operate.
4109 */
Han, Weidong3199aa62009-02-26 17:31:12 +08004110static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004111 struct device *dev)
Han, Weidong3199aa62009-02-26 17:31:12 +08004112{
David Woodhouse0bcb3e22014-03-06 17:12:03 +00004113 if (!iommu || !dev || !dev_is_pci(dev))
Han, Weidong3199aa62009-02-26 17:31:12 +08004114 return;
4115
Alex Williamson579305f2014-07-03 09:51:43 -06004116 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
Han, Weidong3199aa62009-02-26 17:31:12 +08004117}
4118
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004119static void domain_remove_one_dev_info(struct dmar_domain *domain,
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004120 struct device *dev)
Weidong Hanc7151a82008-12-08 22:51:37 +08004121{
Yijing Wangbca2b912013-10-31 17:26:04 +08004122 struct device_domain_info *info, *tmp;
Weidong Hanc7151a82008-12-08 22:51:37 +08004123 struct intel_iommu *iommu;
4124 unsigned long flags;
4125 int found = 0;
David Woodhouse156baca2014-03-09 14:00:57 -07004126 u8 bus, devfn;
Weidong Hanc7151a82008-12-08 22:51:37 +08004127
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004128 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08004129 if (!iommu)
4130 return;
4131
4132 spin_lock_irqsave(&device_domain_lock, flags);
Yijing Wangbca2b912013-10-31 17:26:04 +08004133 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004134 if (info->iommu == iommu && info->bus == bus &&
4135 info->devfn == devfn) {
David Woodhouse109b9b02012-05-25 17:43:02 +01004136 unlink_domain_info(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004137 spin_unlock_irqrestore(&device_domain_lock, flags);
4138
Yu Zhao93a23a72009-05-18 13:51:37 +08004139 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08004140 iommu_detach_dev(iommu, info->bus, info->devfn);
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004141 iommu_detach_dependent_devices(iommu, dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08004142 free_devinfo_mem(info);
4143
4144 spin_lock_irqsave(&device_domain_lock, flags);
4145
4146 if (found)
4147 break;
4148 else
4149 continue;
4150 }
4151
4152 /* if there is no other devices under the same iommu
4153 * owned by this domain, clear this iommu in iommu_bmp
4154 * update iommu count and coherency
4155 */
David Woodhouse8bbc4412014-03-09 13:52:37 -07004156 if (info->iommu == iommu)
Weidong Hanc7151a82008-12-08 22:51:37 +08004157 found = 1;
4158 }
4159
Roland Dreier3e7abe22011-07-20 06:22:21 -07004160 spin_unlock_irqrestore(&device_domain_lock, flags);
4161
Weidong Hanc7151a82008-12-08 22:51:37 +08004162 if (found == 0) {
4163 unsigned long tmp_flags;
4164 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08004165 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08004166 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08004167 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08004168 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07004169
Alex Williamson9b4554b2011-05-24 12:19:04 -04004170 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
4171 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
4172 spin_lock_irqsave(&iommu->lock, tmp_flags);
4173 clear_bit(domain->id, iommu->domain_ids);
4174 iommu->domains[domain->id] = NULL;
4175 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
4176 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004177 }
Weidong Hanc7151a82008-12-08 22:51:37 +08004178}
4179
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004180static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08004181{
4182 int adjust_width;
4183
4184 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004185 domain_reserve_special_ranges(domain);
4186
4187 /* calculate AGAW */
4188 domain->gaw = guest_width;
4189 adjust_width = guestwidth_to_adjustwidth(guest_width);
4190 domain->agaw = width_to_agaw(adjust_width);
4191
Weidong Han5e98c4b2008-12-08 23:03:27 +08004192 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08004193 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004194 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004195 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08004196
4197 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07004198 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08004199 if (!domain->pgd)
4200 return -ENOMEM;
4201 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4202 return 0;
4203}
4204
Joerg Roedel5d450802008-12-03 14:52:32 +01004205static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004206{
Joerg Roedel5d450802008-12-03 14:52:32 +01004207 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03004208
Jiang Liu92d03cc2014-02-19 14:07:28 +08004209 dmar_domain = alloc_domain(true);
Joerg Roedel5d450802008-12-03 14:52:32 +01004210 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03004211 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004212 "intel_iommu_domain_init: dmar_domain == NULL\n");
4213 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004214 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004215 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03004216 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01004217 "intel_iommu_domain_init() failed\n");
Jiang Liu92d03cc2014-02-19 14:07:28 +08004218 domain_exit(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004219 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03004220 }
Allen Kay8140a952011-10-14 12:32:17 -07004221 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01004222 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004223
Joerg Roedel8a0e7152012-01-26 19:40:54 +01004224 domain->geometry.aperture_start = 0;
4225 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4226 domain->geometry.force_aperture = true;
4227
Joerg Roedel5d450802008-12-03 14:52:32 +01004228 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004229}
Kay, Allen M38717942008-09-09 18:37:29 +03004230
Joerg Roedel5d450802008-12-03 14:52:32 +01004231static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03004232{
Joerg Roedel5d450802008-12-03 14:52:32 +01004233 struct dmar_domain *dmar_domain = domain->priv;
4234
4235 domain->priv = NULL;
Jiang Liu92d03cc2014-02-19 14:07:28 +08004236 domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03004237}
Kay, Allen M38717942008-09-09 18:37:29 +03004238
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004239static int intel_iommu_attach_device(struct iommu_domain *domain,
4240 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004241{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004242 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004243 struct intel_iommu *iommu;
4244 int addr_width;
David Woodhouse156baca2014-03-09 14:00:57 -07004245 u8 bus, devfn;
Kay, Allen M38717942008-09-09 18:37:29 +03004246
David Woodhouse7207d8f2014-03-09 16:31:06 -07004247 /* normally dev is not mapped */
4248 if (unlikely(domain_context_mapped(dev))) {
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004249 struct dmar_domain *old_domain;
4250
David Woodhouse1525a292014-03-06 16:19:30 +00004251 old_domain = find_domain(dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004252 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004253 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4254 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004255 domain_remove_one_dev_info(old_domain, dev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004256 else
4257 domain_remove_dev_info(old_domain);
4258 }
4259 }
4260
David Woodhouse156baca2014-03-09 14:00:57 -07004261 iommu = device_to_iommu(dev, &bus, &devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004262 if (!iommu)
4263 return -ENODEV;
4264
4265 /* check if this iommu agaw is sufficient for max mapped address */
4266 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004267 if (addr_width > cap_mgaw(iommu->cap))
4268 addr_width = cap_mgaw(iommu->cap);
4269
4270 if (dmar_domain->max_addr > (1LL << addr_width)) {
4271 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004272 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004273 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004274 return -EFAULT;
4275 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004276 dmar_domain->gaw = addr_width;
4277
4278 /*
4279 * Knock out extra levels of page tables if necessary
4280 */
4281 while (iommu->agaw < dmar_domain->agaw) {
4282 struct dma_pte *pte;
4283
4284 pte = dmar_domain->pgd;
4285 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004286 dmar_domain->pgd = (struct dma_pte *)
4287 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004288 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004289 }
4290 dmar_domain->agaw--;
4291 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004292
David Woodhouse5913c9b2014-03-09 16:27:31 -07004293 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004294}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004295
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004296static void intel_iommu_detach_device(struct iommu_domain *domain,
4297 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004298{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004299 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004300
David Woodhousebf9c9ed2014-03-09 16:19:13 -07004301 domain_remove_one_dev_info(dmar_domain, dev);
Kay, Allen M38717942008-09-09 18:37:29 +03004302}
Kay, Allen M38717942008-09-09 18:37:29 +03004303
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004304static int intel_iommu_map(struct iommu_domain *domain,
4305 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004306 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004307{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004308 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004309 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004310 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004311 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004312
Joerg Roedeldde57a22008-12-03 15:04:09 +01004313 if (iommu_prot & IOMMU_READ)
4314 prot |= DMA_PTE_READ;
4315 if (iommu_prot & IOMMU_WRITE)
4316 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08004317 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4318 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004319
David Woodhouse163cc522009-06-28 00:51:17 +01004320 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004321 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004322 u64 end;
4323
4324 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004325 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004326 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004327 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004328 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004329 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004330 return -EFAULT;
4331 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004332 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004333 }
David Woodhousead051222009-06-28 14:22:28 +01004334 /* Round up size to next multiple of PAGE_SIZE, if it and
4335 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004336 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004337 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4338 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004339 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004340}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004341
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004342static size_t intel_iommu_unmap(struct iommu_domain *domain,
David Woodhouseea8ea462014-03-05 17:09:32 +00004343 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004344{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004345 struct dmar_domain *dmar_domain = domain->priv;
David Woodhouseea8ea462014-03-05 17:09:32 +00004346 struct page *freelist = NULL;
4347 struct intel_iommu *iommu;
4348 unsigned long start_pfn, last_pfn;
4349 unsigned int npages;
4350 int iommu_id, num, ndomains, level = 0;
Sheng Yang4b99d352009-07-08 11:52:52 +01004351
David Woodhouse5cf0a762014-03-19 16:07:49 +00004352 /* Cope with horrid API which requires us to unmap more than the
4353 size argument if it happens to be a large-page mapping. */
4354 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4355 BUG();
4356
4357 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4358 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4359
David Woodhouseea8ea462014-03-05 17:09:32 +00004360 start_pfn = iova >> VTD_PAGE_SHIFT;
4361 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4362
4363 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4364
4365 npages = last_pfn - start_pfn + 1;
4366
4367 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4368 iommu = g_iommus[iommu_id];
4369
4370 /*
4371 * find bit position of dmar_domain
4372 */
4373 ndomains = cap_ndoms(iommu->cap);
4374 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4375 if (iommu->domains[num] == dmar_domain)
4376 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4377 npages, !freelist, 0);
4378 }
4379
4380 }
4381
4382 dma_free_pagelist(freelist);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004383
David Woodhouse163cc522009-06-28 00:51:17 +01004384 if (dmar_domain->max_addr == iova + size)
4385 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004386
David Woodhouse5cf0a762014-03-19 16:07:49 +00004387 return size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004388}
Kay, Allen M38717942008-09-09 18:37:29 +03004389
Joerg Roedeld14d6572008-12-03 15:06:57 +01004390static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
Varun Sethibb5547ac2013-03-29 01:23:58 +05304391 dma_addr_t iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004392{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004393 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004394 struct dma_pte *pte;
David Woodhouse5cf0a762014-03-19 16:07:49 +00004395 int level = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004396 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004397
David Woodhouse5cf0a762014-03-19 16:07:49 +00004398 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
Kay, Allen M38717942008-09-09 18:37:29 +03004399 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004400 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004401
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004402 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004403}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004404
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004405static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4406 unsigned long cap)
4407{
4408 struct dmar_domain *dmar_domain = domain->priv;
4409
4410 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4411 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004412 if (cap == IOMMU_CAP_INTR_REMAP)
Suresh Siddha95a02e92012-03-30 11:47:07 -07004413 return irq_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004414
4415 return 0;
4416}
4417
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004418static int intel_iommu_add_device(struct device *dev)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004419{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004420 struct intel_iommu *iommu;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004421 struct iommu_group *group;
David Woodhouse156baca2014-03-09 14:00:57 -07004422 u8 bus, devfn;
Alex Williamson70ae6f02011-10-21 15:56:11 -04004423
Alex Williamsona5459cf2014-06-12 16:12:31 -06004424 iommu = device_to_iommu(dev, &bus, &devfn);
4425 if (!iommu)
Alex Williamson70ae6f02011-10-21 15:56:11 -04004426 return -ENODEV;
4427
Alex Williamsona5459cf2014-06-12 16:12:31 -06004428 iommu_device_link(iommu->iommu_dev, dev);
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004429
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004430 group = iommu_group_get_for_dev(dev);
Alex Williamson783f1572012-05-30 14:19:43 -06004431
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004432 if (IS_ERR(group))
4433 return PTR_ERR(group);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004434
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004435 iommu_group_put(group);
Alex Williamsone17f9ff2014-07-03 09:51:37 -06004436 return 0;
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004437}
4438
4439static void intel_iommu_remove_device(struct device *dev)
4440{
Alex Williamsona5459cf2014-06-12 16:12:31 -06004441 struct intel_iommu *iommu;
4442 u8 bus, devfn;
4443
4444 iommu = device_to_iommu(dev, &bus, &devfn);
4445 if (!iommu)
4446 return;
4447
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004448 iommu_group_remove_device(dev);
Alex Williamsona5459cf2014-06-12 16:12:31 -06004449
4450 iommu_device_unlink(iommu->iommu_dev, dev);
Alex Williamson70ae6f02011-10-21 15:56:11 -04004451}
4452
Thierry Redingb22f6432014-06-27 09:03:12 +02004453static const struct iommu_ops intel_iommu_ops = {
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004454 .domain_init = intel_iommu_domain_init,
4455 .domain_destroy = intel_iommu_domain_destroy,
4456 .attach_dev = intel_iommu_attach_device,
4457 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004458 .map = intel_iommu_map,
4459 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004460 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004461 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamsonabdfdde2012-05-30 14:19:19 -06004462 .add_device = intel_iommu_add_device,
4463 .remove_device = intel_iommu_remove_device,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004464 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004465};
David Woodhouse9af88142009-02-13 23:18:03 +00004466
Daniel Vetter94526182013-01-20 23:50:13 +01004467static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4468{
4469 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4470 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4471 dmar_map_gfx = 0;
4472}
4473
4474DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4475DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4476DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4477DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4478DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4479DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4480DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4481
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004482static void quirk_iommu_rwbf(struct pci_dev *dev)
David Woodhouse9af88142009-02-13 23:18:03 +00004483{
4484 /*
4485 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetter210561f2013-01-21 19:48:59 +01004486 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004487 */
4488 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4489 rwbf_quirk = 1;
4490}
4491
4492DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetter210561f2013-01-21 19:48:59 +01004493DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4494DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4495DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4496DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4497DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4498DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004499
Adam Jacksoneecfd572010-08-25 21:17:34 +01004500#define GGC 0x52
4501#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4502#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4503#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4504#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4505#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4506#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4507#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4508#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4509
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08004510static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
David Woodhouse9eecabc2010-09-21 22:28:23 +01004511{
4512 unsigned short ggc;
4513
Adam Jacksoneecfd572010-08-25 21:17:34 +01004514 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004515 return;
4516
Adam Jacksoneecfd572010-08-25 21:17:34 +01004517 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004518 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4519 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004520 } else if (dmar_map_gfx) {
4521 /* we have to ensure the gfx device is idle before we flush */
4522 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4523 intel_iommu_strict = 1;
4524 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004525}
4526DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4527DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4528DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4529DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4530
David Woodhousee0fc7e02009-09-30 09:12:17 -07004531/* On Tylersburg chipsets, some BIOSes have been known to enable the
4532 ISOCH DMAR unit for the Azalia sound device, but not give it any
4533 TLB entries, which causes it to deadlock. Check for that. We do
4534 this in a function called from init_dmars(), instead of in a PCI
4535 quirk, because we don't want to print the obnoxious "BIOS broken"
4536 message if VT-d is actually disabled.
4537*/
4538static void __init check_tylersburg_isoch(void)
4539{
4540 struct pci_dev *pdev;
4541 uint32_t vtisochctrl;
4542
4543 /* If there's no Azalia in the system anyway, forget it. */
4544 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4545 if (!pdev)
4546 return;
4547 pci_dev_put(pdev);
4548
4549 /* System Management Registers. Might be hidden, in which case
4550 we can't do the sanity check. But that's OK, because the
4551 known-broken BIOSes _don't_ actually hide it, so far. */
4552 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4553 if (!pdev)
4554 return;
4555
4556 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4557 pci_dev_put(pdev);
4558 return;
4559 }
4560
4561 pci_dev_put(pdev);
4562
4563 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4564 if (vtisochctrl & 1)
4565 return;
4566
4567 /* Drop all bits other than the number of TLB entries */
4568 vtisochctrl &= 0x1c;
4569
4570 /* If we have the recommended number of TLB entries (16), fine. */
4571 if (vtisochctrl == 0x10)
4572 return;
4573
4574 /* Zero TLB entries? You get to ride the short bus to school. */
4575 if (!vtisochctrl) {
4576 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4577 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4578 dmi_get_system_info(DMI_BIOS_VENDOR),
4579 dmi_get_system_info(DMI_BIOS_VERSION),
4580 dmi_get_system_info(DMI_PRODUCT_VERSION));
4581 iommu_identity_mapping |= IDENTMAP_AZALIA;
4582 return;
4583 }
4584
4585 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4586 vtisochctrl);
4587}