blob: 2f5488cb7111cab410678abea228218f83072633 [file] [log] [blame]
Will Deaconfdb1d7b2014-11-14 17:16:49 +00001#ifndef __IO_PGTABLE_H
2#define __IO_PGTABLE_H
Robin Murphye5fc9752016-01-26 17:13:13 +00003#include <linux/bitops.h>
Will Deaconfdb1d7b2014-11-14 17:16:49 +00004
Mitchel Humpheryscd6ff132015-04-23 16:16:56 -07005#include <linux/scatterlist.h>
Charan Teja Reddy35144b02017-09-05 16:20:46 +05306#include <soc/qcom/msm_tz_smmu.h>
Mitchel Humpheryscd6ff132015-04-23 16:16:56 -07007
Will Deaconfdb1d7b2014-11-14 17:16:49 +00008/*
9 * Public API for use by IOMMU drivers
10 */
11enum io_pgtable_fmt {
Will Deacone1d3c0f2014-11-14 17:18:23 +000012 ARM_32_LPAE_S1,
13 ARM_32_LPAE_S2,
14 ARM_64_LPAE_S1,
15 ARM_64_LPAE_S2,
Robin Murphye5fc9752016-01-26 17:13:13 +000016 ARM_V7S,
Mitchel Humpherys86a560e2015-09-30 14:23:58 -070017 ARM_V8L_FAST,
Charan Teja Reddy35144b02017-09-05 16:20:46 +053018 ARM_MSM_SECURE,
Will Deaconfdb1d7b2014-11-14 17:16:49 +000019 IO_PGTABLE_NUM_FMTS,
20};
21
22/**
23 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
24 *
25 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
26 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
Robin Murphy87a91b12015-07-29 19:46:09 +010027 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
28 * any corresponding page table updates are visible to the
29 * IOMMU.
Patrick Dalyc11d1082016-09-01 15:52:44 -070030 * @alloc_pages_exact: Allocate page table memory (optional, defaults to
31 * alloc_pages_exact)
32 * @free_pages_exact: Free page table memory (optional, defaults to
33 * free_pages_exact)
Will Deaconfdb1d7b2014-11-14 17:16:49 +000034 *
35 * Note that these can all be called in atomic context and must therefore
36 * not block.
37 */
38struct iommu_gather_ops {
39 void (*tlb_flush_all)(void *cookie);
Robin Murphy06c610e2015-12-07 18:18:53 +000040 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
41 bool leaf, void *cookie);
Will Deaconfdb1d7b2014-11-14 17:16:49 +000042 void (*tlb_sync)(void *cookie);
Patrick Dalyc11d1082016-09-01 15:52:44 -070043 void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
44 void (*free_pages_exact)(void *cookie, void *virt, size_t size);
Will Deaconfdb1d7b2014-11-14 17:16:49 +000045};
46
47/**
48 * struct io_pgtable_cfg - Configuration data for a set of page tables.
49 *
50 * @quirks: A bitmap of hardware quirks that require some special
51 * action by the low-level page table allocator.
52 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
53 * tables.
54 * @ias: Input address (iova) size, in bits.
55 * @oas: Output address (paddr) size, in bits.
56 * @tlb: TLB management callbacks for this set of tables.
Robin Murphyf8d54962015-07-29 19:46:04 +010057 * @iommu_dev: The device representing the DMA configuration for the
58 * page table walker.
Will Deaconfdb1d7b2014-11-14 17:16:49 +000059 */
60struct io_pgtable_cfg {
Robin Murphy3850db42016-02-12 17:09:46 +000061 /*
62 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
63 * stage 1 PTEs, for hardware which insists on validating them
64 * even in non-secure state where they should normally be ignored.
65 *
66 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
67 * IOMMU_NOEXEC flags and map everything with full access, for
68 * hardware which does not implement the permissions of a given
69 * format, and/or requires some format-specific default value.
70 *
71 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
72 * (unmapped) entries but the hardware might do so anyway, perform
73 * TLB maintenance when mapping as well as when unmapping.
Yong Wu1afe2312016-03-14 06:01:10 +080074 *
75 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
76 * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
77 * when the SoC is in "4GB mode" and they can only access the high
78 * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
Patrick Dalyce6786f2016-11-09 14:19:23 -080079 *
80 * IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT: Override the attributes
81 * set in TCR for the page table walker. Use attributes specified
82 * by the upstream hw instead.
Liam Markb4223cf2016-12-20 11:32:56 -080083 *
84 * IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT: Set the page table as
85 * coherent.
Patrick Daly49ccf332017-09-27 15:10:29 -070086 *
87 * IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE:
88 * Having page tables which are non coherent, but cached in a
89 * system cache requires SH=Non-Shareable. This applies to the
90 * qsmmuv500 model. For data buffers SH=Non-Shareable is not
91 * required.
Robin Murphy3850db42016-02-12 17:09:46 +000092 */
93 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
94 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
95 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
Yong Wu1afe2312016-03-14 06:01:10 +080096 #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
Patrick Dalyce6786f2016-11-09 14:19:23 -080097 #define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT BIT(4)
Liam Markb4223cf2016-12-20 11:32:56 -080098 #define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT BIT(5)
Patrick Daly49ccf332017-09-27 15:10:29 -070099 #define IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE BIT(6)
Robin Murphy3850db42016-02-12 17:09:46 +0000100 unsigned long quirks;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000101 unsigned long pgsize_bitmap;
102 unsigned int ias;
103 unsigned int oas;
104 const struct iommu_gather_ops *tlb;
Robin Murphyf8d54962015-07-29 19:46:04 +0100105 struct device *iommu_dev;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000106
107 /* Low-level data specific to the table format */
108 union {
Will Deacone1d3c0f2014-11-14 17:18:23 +0000109 struct {
110 u64 ttbr[2];
111 u64 tcr;
112 u64 mair[2];
113 } arm_lpae_s1_cfg;
114
115 struct {
116 u64 vttbr;
117 u64 vtcr;
118 } arm_lpae_s2_cfg;
Robin Murphye5fc9752016-01-26 17:13:13 +0000119
120 struct {
121 u32 ttbr[2];
122 u32 tcr;
123 u32 nmrr;
124 u32 prrr;
125 } arm_v7s_cfg;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700126
127 struct {
128 u64 ttbr[2];
129 u64 tcr;
130 u64 mair[2];
131 void *pmds;
132 } av8l_fast_cfg;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530133
134 struct {
135 enum tz_smmu_device_id sec_id;
136 int cbndx;
137 } arm_msm_secure_cfg;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000138 };
139};
140
141/**
142 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
143 *
144 * @map: Map a physically contiguous memory region.
Mitchel Humpherys5123b492015-09-30 14:22:39 -0700145 * @map_sg: Map a scatterlist. Returns the number of bytes mapped,
146 * or 0 on failure. The size parameter contains the size
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700147 * of the partial mapping in case of failure.
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000148 * @unmap: Unmap a physically contiguous memory region.
149 * @iova_to_phys: Translate iova to physical address.
150 *
151 * These functions map directly onto the iommu_ops member functions with
152 * the same names.
153 */
154struct io_pgtable_ops {
155 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
156 phys_addr_t paddr, size_t size, int prot);
Mitchel Humpheryscd6ff132015-04-23 16:16:56 -0700157 int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
Rohit Vaswani4d7cdd92015-08-18 17:57:44 -0700158 struct scatterlist *sg, unsigned int nents,
159 int prot, size_t *size);
Mitchel Humpherys5e050592015-05-21 14:11:22 -0700160 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000161 size_t size);
162 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
163 unsigned long iova);
Liam Mark17f31802016-12-09 14:30:10 -0800164 bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
165 unsigned long iova);
Sudarshan Rajagopalan7a0b4bb2017-04-04 19:10:06 -0700166 uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
167 unsigned long iova);
Liam Mark17f31802016-12-09 14:30:10 -0800168
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000169};
170
171/**
172 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
173 *
174 * @fmt: The page table format.
175 * @cfg: The page table configuration. This will be modified to represent
176 * the configuration actually provided by the allocator (e.g. the
177 * pgsize_bitmap may be restricted).
178 * @cookie: An opaque token provided by the IOMMU driver and passed back to
179 * the callback routines in cfg->tlb.
180 */
181struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
182 struct io_pgtable_cfg *cfg,
183 void *cookie);
184
185/**
186 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
187 * *must* ensure that the page table is no longer
188 * live, but the TLB can be dirty.
189 *
190 * @ops: The ops returned from alloc_io_pgtable_ops.
191 */
192void free_io_pgtable_ops(struct io_pgtable_ops *ops);
193
194
195/*
196 * Internal structures for page table allocator implementations.
197 */
198
199/**
200 * struct io_pgtable - Internal structure describing a set of page tables.
201 *
202 * @fmt: The page table format.
203 * @cookie: An opaque token provided by the IOMMU driver and passed back to
204 * any callback routines.
Robin Murphy88492a42016-01-26 17:13:15 +0000205 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000206 * @cfg: A copy of the page table configuration.
207 * @ops: The page table operations in use for this set of page tables.
208 */
209struct io_pgtable {
210 enum io_pgtable_fmt fmt;
211 void *cookie;
Robin Murphy88492a42016-01-26 17:13:15 +0000212 bool tlb_sync_pending;
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000213 struct io_pgtable_cfg cfg;
214 struct io_pgtable_ops ops;
215};
216
Robin Murphyfdc38962015-12-04 17:53:01 +0000217#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
218
Robin Murphy507e4c92016-01-26 17:13:14 +0000219static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
220{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530221 if (!iop->cfg.tlb)
222 return;
Robin Murphy507e4c92016-01-26 17:13:14 +0000223 iop->cfg.tlb->tlb_flush_all(iop->cookie);
Robin Murphy88492a42016-01-26 17:13:15 +0000224 iop->tlb_sync_pending = true;
Robin Murphy507e4c92016-01-26 17:13:14 +0000225}
226
227static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
228 unsigned long iova, size_t size, size_t granule, bool leaf)
229{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530230 if (!iop->cfg.tlb)
231 return;
Robin Murphy507e4c92016-01-26 17:13:14 +0000232 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
Robin Murphy88492a42016-01-26 17:13:15 +0000233 iop->tlb_sync_pending = true;
Robin Murphy507e4c92016-01-26 17:13:14 +0000234}
235
236static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
237{
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530238 if (!iop->cfg.tlb)
239 return;
Robin Murphy88492a42016-01-26 17:13:15 +0000240 if (iop->tlb_sync_pending) {
241 iop->cfg.tlb->tlb_sync(iop->cookie);
242 iop->tlb_sync_pending = false;
243 }
Robin Murphy507e4c92016-01-26 17:13:14 +0000244}
245
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000246/**
247 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
248 * particular format.
249 *
250 * @alloc: Allocate a set of page tables described by cfg.
251 * @free: Free the page tables associated with iop.
252 */
253struct io_pgtable_init_fns {
254 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
255 void (*free)(struct io_pgtable *iop);
256};
257
Joerg Roedel2e169bb2015-08-13 12:01:10 +0200258extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
259extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
260extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
261extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
Robin Murphye5fc9752016-01-26 17:13:13 +0000262extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
Mitchel Humpherys86a560e2015-09-30 14:23:58 -0700263extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns;
Charan Teja Reddy35144b02017-09-05 16:20:46 +0530264extern struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns;
Joerg Roedel2e169bb2015-08-13 12:01:10 +0200265
Mitchel Humpherysf01d6e32015-07-15 18:25:07 -0700266/**
267 * io_pgtable_alloc_pages_exact:
268 * allocate an exact number of physically-contiguous pages.
269 * @size: the number of bytes to allocate
270 * @gfp_mask: GFP flags for the allocation
271 *
272 * Like alloc_pages_exact(), but with some additional accounting for debug
273 * purposes.
274 */
Patrick Dalyc11d1082016-09-01 15:52:44 -0700275void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
276 size_t size, gfp_t gfp_mask);
Mitchel Humpherysf01d6e32015-07-15 18:25:07 -0700277
278/**
279 * io_pgtable_free_pages_exact:
280 * release memory allocated via io_pgtable_alloc_pages_exact()
281 * @virt: the value returned by alloc_pages_exact.
282 * @size: size of allocation, same value as passed to alloc_pages_exact().
283 *
284 * Like free_pages_exact(), but with some additional accounting for debug
285 * purposes.
286 */
Patrick Dalyc11d1082016-09-01 15:52:44 -0700287void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
288 void *virt, size_t size);
Mitchel Humpherysf01d6e32015-07-15 18:25:07 -0700289
Will Deaconfdb1d7b2014-11-14 17:16:49 +0000290#endif /* __IO_PGTABLE_H */