Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 1 | #ifndef _DMA_REMAPPING_H |
| 2 | #define _DMA_REMAPPING_H |
| 3 | |
| 4 | /* |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 5 | * VT-d hardware uses 4KiB page size regardless of host page size. |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 6 | */ |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 7 | #define VTD_PAGE_SHIFT (12) |
| 8 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) |
| 9 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) |
| 10 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 11 | |
Youquan Song | 6dd9a7c | 2011-05-25 19:13:49 +0100 | [diff] [blame] | 12 | #define VTD_STRIDE_SHIFT (9) |
| 13 | #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) |
| 14 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 15 | #define DMA_PTE_READ (1) |
| 16 | #define DMA_PTE_WRITE (2) |
Youquan Song | 6dd9a7c | 2011-05-25 19:13:49 +0100 | [diff] [blame] | 17 | #define DMA_PTE_LARGE_PAGE (1 << 7) |
Sheng Yang | 9cf06697 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 18 | #define DMA_PTE_SNP (1 << 11) |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 19 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 20 | #define CONTEXT_TT_MULTI_LEVEL 0 |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 21 | #define CONTEXT_TT_DEV_IOTLB 1 |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 22 | #define CONTEXT_TT_PASS_THROUGH 2 |
| 23 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 24 | struct intel_iommu; |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 25 | struct dmar_domain; |
| 26 | struct root_entry; |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 27 | |
Ingo Molnar | c66b990 | 2009-01-04 10:55:02 +0100 | [diff] [blame] | 28 | |
Suresh Siddha | d3f1381 | 2011-08-23 17:05:25 -0700 | [diff] [blame] | 29 | #ifdef CONFIG_INTEL_IOMMU |
Suresh Siddha | 318fe7d | 2011-08-23 17:05:20 -0700 | [diff] [blame] | 30 | extern void free_dmar_iommu(struct intel_iommu *iommu); |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 31 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 32 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); |
Suresh Siddha | f5d1b97 | 2011-08-23 17:05:22 -0700 | [diff] [blame] | 33 | extern int dmar_disabled; |
Eugeni Dodonov | 8bc1f85 | 2011-11-23 16:42:14 -0200 | [diff] [blame] | 34 | extern int intel_iommu_enabled; |
Ingo Molnar | c66b990 | 2009-01-04 10:55:02 +0100 | [diff] [blame] | 35 | #else |
| 36 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) |
| 37 | { |
| 38 | return 0; |
| 39 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 40 | static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) |
| 41 | { |
| 42 | return 0; |
| 43 | } |
Suresh Siddha | 318fe7d | 2011-08-23 17:05:20 -0700 | [diff] [blame] | 44 | static inline void free_dmar_iommu(struct intel_iommu *iommu) |
| 45 | { |
| 46 | } |
Suresh Siddha | f5d1b97 | 2011-08-23 17:05:22 -0700 | [diff] [blame] | 47 | #define dmar_disabled (1) |
Eugeni Dodonov | 8bc1f85 | 2011-11-23 16:42:14 -0200 | [diff] [blame] | 48 | #define intel_iommu_enabled (0) |
Ingo Molnar | c66b990 | 2009-01-04 10:55:02 +0100 | [diff] [blame] | 49 | #endif |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 50 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 51 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 52 | #endif |