Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 1 | #ifndef __IO_PGTABLE_H |
| 2 | #define __IO_PGTABLE_H |
Robin Murphy | e5fc975 | 2016-01-26 17:13:13 +0000 | [diff] [blame] | 3 | #include <linux/bitops.h> |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 4 | |
Mitchel Humpherys | cd6ff13 | 2015-04-23 16:16:56 -0700 | [diff] [blame] | 5 | #include <linux/scatterlist.h> |
| 6 | |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 7 | /* |
| 8 | * Public API for use by IOMMU drivers |
| 9 | */ |
| 10 | enum io_pgtable_fmt { |
Will Deacon | e1d3c0f | 2014-11-14 17:18:23 +0000 | [diff] [blame] | 11 | ARM_32_LPAE_S1, |
| 12 | ARM_32_LPAE_S2, |
| 13 | ARM_64_LPAE_S1, |
| 14 | ARM_64_LPAE_S2, |
Robin Murphy | e5fc975 | 2016-01-26 17:13:13 +0000 | [diff] [blame] | 15 | ARM_V7S, |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 16 | IO_PGTABLE_NUM_FMTS, |
| 17 | }; |
| 18 | |
| 19 | /** |
| 20 | * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. |
| 21 | * |
| 22 | * @tlb_flush_all: Synchronously invalidate the entire TLB context. |
| 23 | * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. |
Robin Murphy | 87a91b1 | 2015-07-29 19:46:09 +0100 | [diff] [blame] | 24 | * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and |
| 25 | * any corresponding page table updates are visible to the |
| 26 | * IOMMU. |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 27 | * |
| 28 | * Note that these can all be called in atomic context and must therefore |
| 29 | * not block. |
| 30 | */ |
| 31 | struct iommu_gather_ops { |
| 32 | void (*tlb_flush_all)(void *cookie); |
Robin Murphy | 06c610e | 2015-12-07 18:18:53 +0000 | [diff] [blame] | 33 | void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, |
| 34 | bool leaf, void *cookie); |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 35 | void (*tlb_sync)(void *cookie); |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 36 | }; |
| 37 | |
| 38 | /** |
| 39 | * struct io_pgtable_cfg - Configuration data for a set of page tables. |
| 40 | * |
| 41 | * @quirks: A bitmap of hardware quirks that require some special |
| 42 | * action by the low-level page table allocator. |
| 43 | * @pgsize_bitmap: A bitmap of page sizes supported by this set of page |
| 44 | * tables. |
| 45 | * @ias: Input address (iova) size, in bits. |
| 46 | * @oas: Output address (paddr) size, in bits. |
| 47 | * @tlb: TLB management callbacks for this set of tables. |
Robin Murphy | f8d5496 | 2015-07-29 19:46:04 +0100 | [diff] [blame] | 48 | * @iommu_dev: The device representing the DMA configuration for the |
| 49 | * page table walker. |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 50 | */ |
| 51 | struct io_pgtable_cfg { |
Robin Murphy | 3850db4 | 2016-02-12 17:09:46 +0000 | [diff] [blame] | 52 | /* |
| 53 | * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in |
| 54 | * stage 1 PTEs, for hardware which insists on validating them |
| 55 | * even in non-secure state where they should normally be ignored. |
| 56 | * |
| 57 | * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and |
| 58 | * IOMMU_NOEXEC flags and map everything with full access, for |
| 59 | * hardware which does not implement the permissions of a given |
| 60 | * format, and/or requires some format-specific default value. |
| 61 | * |
| 62 | * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid |
| 63 | * (unmapped) entries but the hardware might do so anyway, perform |
| 64 | * TLB maintenance when mapping as well as when unmapping. |
Yong Wu | 1afe231 | 2016-03-14 06:01:10 +0800 | [diff] [blame] | 65 | * |
| 66 | * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all |
| 67 | * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit |
| 68 | * when the SoC is in "4GB mode" and they can only access the high |
| 69 | * remap of DRAM (0x1_00000000 to 0x1_ffffffff). |
Robin Murphy | 3850db4 | 2016-02-12 17:09:46 +0000 | [diff] [blame] | 70 | */ |
| 71 | #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) |
| 72 | #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) |
| 73 | #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) |
Yong Wu | 1afe231 | 2016-03-14 06:01:10 +0800 | [diff] [blame] | 74 | #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) |
Robin Murphy | 3850db4 | 2016-02-12 17:09:46 +0000 | [diff] [blame] | 75 | unsigned long quirks; |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 76 | unsigned long pgsize_bitmap; |
| 77 | unsigned int ias; |
| 78 | unsigned int oas; |
| 79 | const struct iommu_gather_ops *tlb; |
Robin Murphy | f8d5496 | 2015-07-29 19:46:04 +0100 | [diff] [blame] | 80 | struct device *iommu_dev; |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 81 | |
| 82 | /* Low-level data specific to the table format */ |
| 83 | union { |
Will Deacon | e1d3c0f | 2014-11-14 17:18:23 +0000 | [diff] [blame] | 84 | struct { |
| 85 | u64 ttbr[2]; |
| 86 | u64 tcr; |
| 87 | u64 mair[2]; |
| 88 | } arm_lpae_s1_cfg; |
| 89 | |
| 90 | struct { |
| 91 | u64 vttbr; |
| 92 | u64 vtcr; |
| 93 | } arm_lpae_s2_cfg; |
Robin Murphy | e5fc975 | 2016-01-26 17:13:13 +0000 | [diff] [blame] | 94 | |
| 95 | struct { |
| 96 | u32 ttbr[2]; |
| 97 | u32 tcr; |
| 98 | u32 nmrr; |
| 99 | u32 prrr; |
| 100 | } arm_v7s_cfg; |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 101 | }; |
| 102 | }; |
| 103 | |
| 104 | /** |
| 105 | * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. |
| 106 | * |
| 107 | * @map: Map a physically contiguous memory region. |
Mitchel Humpherys | 5123b49 | 2015-09-30 14:22:39 -0700 | [diff] [blame] | 108 | * @map_sg: Map a scatterlist. Returns the number of bytes mapped, |
| 109 | * or 0 on failure. The size parameter contains the size |
Rohit Vaswani | 4d7cdd9 | 2015-08-18 17:57:44 -0700 | [diff] [blame] | 110 | * of the partial mapping in case of failure. |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 111 | * @unmap: Unmap a physically contiguous memory region. |
| 112 | * @iova_to_phys: Translate iova to physical address. |
| 113 | * |
| 114 | * These functions map directly onto the iommu_ops member functions with |
| 115 | * the same names. |
| 116 | */ |
| 117 | struct io_pgtable_ops { |
| 118 | int (*map)(struct io_pgtable_ops *ops, unsigned long iova, |
| 119 | phys_addr_t paddr, size_t size, int prot); |
Mitchel Humpherys | cd6ff13 | 2015-04-23 16:16:56 -0700 | [diff] [blame] | 120 | int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova, |
Rohit Vaswani | 4d7cdd9 | 2015-08-18 17:57:44 -0700 | [diff] [blame] | 121 | struct scatterlist *sg, unsigned int nents, |
| 122 | int prot, size_t *size); |
Mitchel Humpherys | 5e05059 | 2015-05-21 14:11:22 -0700 | [diff] [blame] | 123 | size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 124 | size_t size); |
| 125 | phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, |
| 126 | unsigned long iova); |
| 127 | }; |
| 128 | |
| 129 | /** |
| 130 | * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. |
| 131 | * |
| 132 | * @fmt: The page table format. |
| 133 | * @cfg: The page table configuration. This will be modified to represent |
| 134 | * the configuration actually provided by the allocator (e.g. the |
| 135 | * pgsize_bitmap may be restricted). |
| 136 | * @cookie: An opaque token provided by the IOMMU driver and passed back to |
| 137 | * the callback routines in cfg->tlb. |
| 138 | */ |
| 139 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, |
| 140 | struct io_pgtable_cfg *cfg, |
| 141 | void *cookie); |
| 142 | |
| 143 | /** |
| 144 | * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller |
| 145 | * *must* ensure that the page table is no longer |
| 146 | * live, but the TLB can be dirty. |
| 147 | * |
| 148 | * @ops: The ops returned from alloc_io_pgtable_ops. |
| 149 | */ |
| 150 | void free_io_pgtable_ops(struct io_pgtable_ops *ops); |
| 151 | |
| 152 | |
| 153 | /* |
| 154 | * Internal structures for page table allocator implementations. |
| 155 | */ |
| 156 | |
| 157 | /** |
| 158 | * struct io_pgtable - Internal structure describing a set of page tables. |
| 159 | * |
| 160 | * @fmt: The page table format. |
| 161 | * @cookie: An opaque token provided by the IOMMU driver and passed back to |
| 162 | * any callback routines. |
Robin Murphy | 88492a4 | 2016-01-26 17:13:15 +0000 | [diff] [blame] | 163 | * @tlb_sync_pending: Private flag for optimising out redundant syncs. |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 164 | * @cfg: A copy of the page table configuration. |
| 165 | * @ops: The page table operations in use for this set of page tables. |
| 166 | */ |
| 167 | struct io_pgtable { |
| 168 | enum io_pgtable_fmt fmt; |
| 169 | void *cookie; |
Robin Murphy | 88492a4 | 2016-01-26 17:13:15 +0000 | [diff] [blame] | 170 | bool tlb_sync_pending; |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 171 | struct io_pgtable_cfg cfg; |
| 172 | struct io_pgtable_ops ops; |
| 173 | }; |
| 174 | |
Robin Murphy | fdc3896 | 2015-12-04 17:53:01 +0000 | [diff] [blame] | 175 | #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) |
| 176 | |
Robin Murphy | 507e4c9 | 2016-01-26 17:13:14 +0000 | [diff] [blame] | 177 | static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) |
| 178 | { |
| 179 | iop->cfg.tlb->tlb_flush_all(iop->cookie); |
Robin Murphy | 88492a4 | 2016-01-26 17:13:15 +0000 | [diff] [blame] | 180 | iop->tlb_sync_pending = true; |
Robin Murphy | 507e4c9 | 2016-01-26 17:13:14 +0000 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, |
| 184 | unsigned long iova, size_t size, size_t granule, bool leaf) |
| 185 | { |
| 186 | iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); |
Robin Murphy | 88492a4 | 2016-01-26 17:13:15 +0000 | [diff] [blame] | 187 | iop->tlb_sync_pending = true; |
Robin Murphy | 507e4c9 | 2016-01-26 17:13:14 +0000 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) |
| 191 | { |
Robin Murphy | 88492a4 | 2016-01-26 17:13:15 +0000 | [diff] [blame] | 192 | if (iop->tlb_sync_pending) { |
| 193 | iop->cfg.tlb->tlb_sync(iop->cookie); |
| 194 | iop->tlb_sync_pending = false; |
| 195 | } |
Robin Murphy | 507e4c9 | 2016-01-26 17:13:14 +0000 | [diff] [blame] | 196 | } |
| 197 | |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 198 | /** |
| 199 | * struct io_pgtable_init_fns - Alloc/free a set of page tables for a |
| 200 | * particular format. |
| 201 | * |
| 202 | * @alloc: Allocate a set of page tables described by cfg. |
| 203 | * @free: Free the page tables associated with iop. |
| 204 | */ |
| 205 | struct io_pgtable_init_fns { |
| 206 | struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); |
| 207 | void (*free)(struct io_pgtable *iop); |
| 208 | }; |
| 209 | |
Joerg Roedel | 2e169bb | 2015-08-13 12:01:10 +0200 | [diff] [blame] | 210 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; |
| 211 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; |
| 212 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; |
| 213 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; |
Robin Murphy | e5fc975 | 2016-01-26 17:13:13 +0000 | [diff] [blame] | 214 | extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; |
Joerg Roedel | 2e169bb | 2015-08-13 12:01:10 +0200 | [diff] [blame] | 215 | |
Mitchel Humpherys | f01d6e3 | 2015-07-15 18:25:07 -0700 | [diff] [blame^] | 216 | /** |
| 217 | * io_pgtable_alloc_pages_exact: |
| 218 | * allocate an exact number of physically-contiguous pages. |
| 219 | * @size: the number of bytes to allocate |
| 220 | * @gfp_mask: GFP flags for the allocation |
| 221 | * |
| 222 | * Like alloc_pages_exact(), but with some additional accounting for debug |
| 223 | * purposes. |
| 224 | */ |
| 225 | void *io_pgtable_alloc_pages_exact(size_t size, gfp_t gfp_mask); |
| 226 | |
| 227 | /** |
| 228 | * io_pgtable_free_pages_exact: |
| 229 | * release memory allocated via io_pgtable_alloc_pages_exact() |
| 230 | * @virt: the value returned by alloc_pages_exact. |
| 231 | * @size: size of allocation, same value as passed to alloc_pages_exact(). |
| 232 | * |
| 233 | * Like free_pages_exact(), but with some additional accounting for debug |
| 234 | * purposes. |
| 235 | */ |
| 236 | void io_pgtable_free_pages_exact(void *virt, size_t size); |
| 237 | |
Will Deacon | fdb1d7b | 2014-11-14 17:16:49 +0000 | [diff] [blame] | 238 | #endif /* __IO_PGTABLE_H */ |