blob: 793488d14c024e80cb512a5d45b3197d30e2197d [file] [log] [blame]
Will Deaconfdb1d7b2014-11-14 17:16:49 +00001#ifndef __IO_PGTABLE_H
2#define __IO_PGTABLE_H
Robin Murphye5fc9752016-01-26 17:13:13 +00003#include <linux/bitops.h>
Will Deaconfdb1d7b2014-11-14 17:16:49 +00004
Mitchel Humpheryscd6ff132015-04-23 16:16:56 -07005#include <linux/scatterlist.h>
6
Will Deaconfdb1d7b2014-11-14 17:16:49 +00007/*
8 * Public API for use by IOMMU drivers
9 */
10enum io_pgtable_fmt {
Will Deacone1d3c0f2014-11-14 17:18:23 +000011 ARM_32_LPAE_S1,
12 ARM_32_LPAE_S2,
13 ARM_64_LPAE_S1,
14 ARM_64_LPAE_S2,
Robin Murphye5fc9752016-01-26 17:13:13 +000015 ARM_V7S,
Will Deaconfdb1d7b2014-11-14 17:16:49 +000016 IO_PGTABLE_NUM_FMTS,
17};
18
19/**
20 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
21 *
22 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
23 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
Robin Murphy87a91b12015-07-29 19:46:09 +010024 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
25 * any corresponding page table updates are visible to the
26 * IOMMU.
Will Deaconfdb1d7b2014-11-14 17:16:49 +000027 *
28 * Note that these can all be called in atomic context and must therefore
29 * not block.
30 */
31struct iommu_gather_ops {
32 void (*tlb_flush_all)(void *cookie);
Robin Murphy06c610e2015-12-07 18:18:53 +000033 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
34 bool leaf, void *cookie);
Will Deaconfdb1d7b2014-11-14 17:16:49 +000035 void (*tlb_sync)(void *cookie);
Will Deaconfdb1d7b2014-11-14 17:16:49 +000036};
37
38/**
39 * struct io_pgtable_cfg - Configuration data for a set of page tables.
40 *
41 * @quirks: A bitmap of hardware quirks that require some special
42 * action by the low-level page table allocator.
43 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
44 * tables.
45 * @ias: Input address (iova) size, in bits.
46 * @oas: Output address (paddr) size, in bits.
47 * @tlb: TLB management callbacks for this set of tables.
Robin Murphyf8d54962015-07-29 19:46:04 +010048 * @iommu_dev: The device representing the DMA configuration for the
49 * page table walker.
Will Deaconfdb1d7b2014-11-14 17:16:49 +000050 */
51struct io_pgtable_cfg {
Robin Murphy3850db42016-02-12 17:09:46 +000052 /*
53 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
54 * stage 1 PTEs, for hardware which insists on validating them
55 * even in non-secure state where they should normally be ignored.
56 *
57 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
58 * IOMMU_NOEXEC flags and map everything with full access, for
59 * hardware which does not implement the permissions of a given
60 * format, and/or requires some format-specific default value.
61 *
62 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
63 * (unmapped) entries but the hardware might do so anyway, perform
64 * TLB maintenance when mapping as well as when unmapping.
Yong Wu1afe2312016-03-14 06:01:10 +080065 *
66 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
67 * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
68 * when the SoC is in "4GB mode" and they can only access the high
69 * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
Robin Murphy3850db42016-02-12 17:09:46 +000070 */
71 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
72 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
73 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
Yong Wu1afe2312016-03-14 06:01:10 +080074 #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
Robin Murphy3850db42016-02-12 17:09:46 +000075 unsigned long quirks;
Will Deaconfdb1d7b2014-11-14 17:16:49 +000076 unsigned long pgsize_bitmap;
77 unsigned int ias;
78 unsigned int oas;
79 const struct iommu_gather_ops *tlb;
Robin Murphyf8d54962015-07-29 19:46:04 +010080 struct device *iommu_dev;
Will Deaconfdb1d7b2014-11-14 17:16:49 +000081
82 /* Low-level data specific to the table format */
83 union {
Will Deacone1d3c0f2014-11-14 17:18:23 +000084 struct {
85 u64 ttbr[2];
86 u64 tcr;
87 u64 mair[2];
88 } arm_lpae_s1_cfg;
89
90 struct {
91 u64 vttbr;
92 u64 vtcr;
93 } arm_lpae_s2_cfg;
Robin Murphye5fc9752016-01-26 17:13:13 +000094
95 struct {
96 u32 ttbr[2];
97 u32 tcr;
98 u32 nmrr;
99 u32 prrr;
100 } arm_v7s_cfg;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000101 };
102};
103
104/**
105 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
106 *
107 * @map: Map a physically contiguous memory region.
Mitchel Humpherys5123b492015-09-30 14:22:39 -0700108 * @map_sg: Map a scatterlist. Returns the number of bytes mapped,
109 * or 0 on failure. The size parameter contains the size
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700110 * of the partial mapping in case of failure.
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000111 * @unmap: Unmap a physically contiguous memory region.
112 * @iova_to_phys: Translate iova to physical address.
113 *
114 * These functions map directly onto the iommu_ops member functions with
115 * the same names.
116 */
117struct io_pgtable_ops {
118 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
119 phys_addr_t paddr, size_t size, int prot);
Mitchel Humpheryscd6ff132015-04-23 16:16:56 -0700120 int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700121 struct scatterlist *sg, unsigned int nents,
122 int prot, size_t *size);
Mitchel Humpherys5e050592015-05-21 14:11:22 -0700123 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000124 size_t size);
125 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
126 unsigned long iova);
127};
128
129/**
130 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
131 *
132 * @fmt: The page table format.
133 * @cfg: The page table configuration. This will be modified to represent
134 * the configuration actually provided by the allocator (e.g. the
135 * pgsize_bitmap may be restricted).
136 * @cookie: An opaque token provided by the IOMMU driver and passed back to
137 * the callback routines in cfg->tlb.
138 */
139struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
140 struct io_pgtable_cfg *cfg,
141 void *cookie);
142
143/**
144 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
145 * *must* ensure that the page table is no longer
146 * live, but the TLB can be dirty.
147 *
148 * @ops: The ops returned from alloc_io_pgtable_ops.
149 */
150void free_io_pgtable_ops(struct io_pgtable_ops *ops);
151
152
153/*
154 * Internal structures for page table allocator implementations.
155 */
156
157/**
158 * struct io_pgtable - Internal structure describing a set of page tables.
159 *
160 * @fmt: The page table format.
161 * @cookie: An opaque token provided by the IOMMU driver and passed back to
162 * any callback routines.
Robin Murphy88492a42016-01-26 17:13:15 +0000163 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000164 * @cfg: A copy of the page table configuration.
165 * @ops: The page table operations in use for this set of page tables.
166 */
167struct io_pgtable {
168 enum io_pgtable_fmt fmt;
169 void *cookie;
Robin Murphy88492a42016-01-26 17:13:15 +0000170 bool tlb_sync_pending;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000171 struct io_pgtable_cfg cfg;
172 struct io_pgtable_ops ops;
173};
174
Robin Murphyfdc38962015-12-04 17:53:01 +0000175#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
176
Robin Murphy507e4c92016-01-26 17:13:14 +0000177static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
178{
179 iop->cfg.tlb->tlb_flush_all(iop->cookie);
Robin Murphy88492a42016-01-26 17:13:15 +0000180 iop->tlb_sync_pending = true;
Robin Murphy507e4c92016-01-26 17:13:14 +0000181}
182
183static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
184 unsigned long iova, size_t size, size_t granule, bool leaf)
185{
186 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
Robin Murphy88492a42016-01-26 17:13:15 +0000187 iop->tlb_sync_pending = true;
Robin Murphy507e4c92016-01-26 17:13:14 +0000188}
189
190static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
191{
Robin Murphy88492a42016-01-26 17:13:15 +0000192 if (iop->tlb_sync_pending) {
193 iop->cfg.tlb->tlb_sync(iop->cookie);
194 iop->tlb_sync_pending = false;
195 }
Robin Murphy507e4c92016-01-26 17:13:14 +0000196}
197
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000198/**
199 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
200 * particular format.
201 *
202 * @alloc: Allocate a set of page tables described by cfg.
203 * @free: Free the page tables associated with iop.
204 */
205struct io_pgtable_init_fns {
206 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
207 void (*free)(struct io_pgtable *iop);
208};
209
Joerg Roedel2e169bb2015-08-13 12:01:10 +0200210extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
211extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
212extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
213extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
Robin Murphye5fc9752016-01-26 17:13:13 +0000214extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
Joerg Roedel2e169bb2015-08-13 12:01:10 +0200215
Mitchel Humpherysf01d6e32015-07-15 18:25:07 -0700216/**
217 * io_pgtable_alloc_pages_exact:
218 * allocate an exact number of physically-contiguous pages.
219 * @size: the number of bytes to allocate
220 * @gfp_mask: GFP flags for the allocation
221 *
222 * Like alloc_pages_exact(), but with some additional accounting for debug
223 * purposes.
224 */
225void *io_pgtable_alloc_pages_exact(size_t size, gfp_t gfp_mask);
226
227/**
228 * io_pgtable_free_pages_exact:
229 * release memory allocated via io_pgtable_alloc_pages_exact()
230 * @virt: the value returned by alloc_pages_exact.
231 * @size: size of allocation, same value as passed to alloc_pages_exact().
232 *
233 * Like free_pages_exact(), but with some additional accounting for debug
234 * purposes.
235 */
236void io_pgtable_free_pages_exact(void *virt, size_t size);
237
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000238#endif /* __IO_PGTABLE_H */