blob: 8ec5ac45caabff8d069cc9250c3c5b677b0fcd8d [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
Thierry Reding89184652014-04-16 09:24:44 +02002 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02003 *
Thierry Reding89184652014-04-16 09:24:44 +02004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02007 */
8
Thierry Reding804cb542015-03-27 11:07:27 +01009#include <linux/bitops.h>
Thierry Redingd1313e72015-01-23 09:49:25 +010010#include <linux/debugfs.h>
Thierry Redingbc5e6de2013-01-21 11:09:06 +010011#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020012#include <linux/iommu.h>
Thierry Reding89184652014-04-16 09:24:44 +020013#include <linux/kernel.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030014#include <linux/of.h>
Thierry Reding89184652014-04-16 09:24:44 +020015#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020018
19#include <soc/tegra/ahb.h>
Thierry Reding89184652014-04-16 09:24:44 +020020#include <soc/tegra/mc.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020021
Thierry Reding89184652014-04-16 09:24:44 +020022struct tegra_smmu {
23 void __iomem *regs;
24 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020025
Thierry Reding89184652014-04-16 09:24:44 +020026 struct tegra_mc *mc;
27 const struct tegra_smmu_soc *soc;
Stephen Warrene6bc5932012-09-04 16:36:15 -060028
Thierry Reding804cb542015-03-27 11:07:27 +010029 unsigned long pfn_mask;
30
Thierry Reding89184652014-04-16 09:24:44 +020031 unsigned long *asids;
32 struct mutex lock;
Stephen Warrene6bc5932012-09-04 16:36:15 -060033
Thierry Reding89184652014-04-16 09:24:44 +020034 struct list_head list;
Thierry Redingd1313e72015-01-23 09:49:25 +010035
36 struct dentry *debugfs;
Stephen Warrene6bc5932012-09-04 16:36:15 -060037};
38
Thierry Reding89184652014-04-16 09:24:44 +020039struct tegra_smmu_as {
Joerg Roedeld5f1a812015-03-26 13:43:12 +010040 struct iommu_domain domain;
Thierry Reding89184652014-04-16 09:24:44 +020041 struct tegra_smmu *smmu;
42 unsigned int use_count;
43 struct page *count;
Russell King853520f2015-07-27 13:29:26 +010044 struct page **pts;
Thierry Reding89184652014-04-16 09:24:44 +020045 struct page *pd;
46 unsigned id;
47 u32 attr;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030048};
49
Joerg Roedeld5f1a812015-03-26 13:43:12 +010050static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
51{
52 return container_of(dom, struct tegra_smmu_as, domain);
53}
54
Thierry Reding89184652014-04-16 09:24:44 +020055static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
56 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020057{
Thierry Reding89184652014-04-16 09:24:44 +020058 writel(value, smmu->regs + offset);
Joerg Roedelfe1229b2013-02-04 20:40:58 +010059}
60
Thierry Reding89184652014-04-16 09:24:44 +020061static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020062{
Thierry Reding89184652014-04-16 09:24:44 +020063 return readl(smmu->regs + offset);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020064}
65
Thierry Reding89184652014-04-16 09:24:44 +020066#define SMMU_CONFIG 0x010
67#define SMMU_CONFIG_ENABLE (1 << 0)
68
69#define SMMU_TLB_CONFIG 0x14
70#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
71#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
72#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
73
74#define SMMU_PTC_CONFIG 0x18
75#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
76#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
77#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
78
79#define SMMU_PTB_ASID 0x01c
80#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
81
82#define SMMU_PTB_DATA 0x020
83#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
84
85#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
86
87#define SMMU_TLB_FLUSH 0x030
88#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
89#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
90#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
91#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
92#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
93 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
94#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
95 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
96#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
97
98#define SMMU_PTC_FLUSH 0x034
99#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
100#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
101
102#define SMMU_PTC_FLUSH_HI 0x9b8
103#define SMMU_PTC_FLUSH_HI_MASK 0x3
104
105/* per-SWGROUP SMMU_*_ASID register */
106#define SMMU_ASID_ENABLE (1 << 31)
107#define SMMU_ASID_MASK 0x7f
108#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
109
110/* page table definitions */
111#define SMMU_NUM_PDE 1024
112#define SMMU_NUM_PTE 1024
113
114#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
115#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
116
117#define SMMU_PDE_SHIFT 22
118#define SMMU_PTE_SHIFT 12
119
Thierry Reding89184652014-04-16 09:24:44 +0200120#define SMMU_PD_READABLE (1 << 31)
121#define SMMU_PD_WRITABLE (1 << 30)
122#define SMMU_PD_NONSECURE (1 << 29)
123
124#define SMMU_PDE_READABLE (1 << 31)
125#define SMMU_PDE_WRITABLE (1 << 30)
126#define SMMU_PDE_NONSECURE (1 << 29)
127#define SMMU_PDE_NEXT (1 << 28)
128
129#define SMMU_PTE_READABLE (1 << 31)
130#define SMMU_PTE_WRITABLE (1 << 30)
131#define SMMU_PTE_NONSECURE (1 << 29)
132
133#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
134 SMMU_PDE_NONSECURE)
135#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
136 SMMU_PTE_NONSECURE)
137
Russell King34d35f82015-07-27 13:29:16 +0100138static unsigned int iova_pd_index(unsigned long iova)
139{
140 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
141}
142
143static unsigned int iova_pt_index(unsigned long iova)
144{
145 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
146}
147
Thierry Reding89184652014-04-16 09:24:44 +0200148static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
149 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200150{
Thierry Reding89184652014-04-16 09:24:44 +0200151 phys_addr_t phys = page ? page_to_phys(page) : 0;
152 u32 value;
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200153
Thierry Reding89184652014-04-16 09:24:44 +0200154 if (page) {
155 offset &= ~(smmu->mc->soc->atom_size - 1);
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200156
Thierry Reding89184652014-04-16 09:24:44 +0200157 if (smmu->mc->soc->num_address_bits > 32) {
158#ifdef CONFIG_PHYS_ADDR_T_64BIT
159 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200160#else
Thierry Reding89184652014-04-16 09:24:44 +0200161 value = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200162#endif
Thierry Reding89184652014-04-16 09:24:44 +0200163 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
164 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200165
Thierry Reding89184652014-04-16 09:24:44 +0200166 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
167 } else {
168 value = SMMU_PTC_FLUSH_TYPE_ALL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200169 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300170
Thierry Reding89184652014-04-16 09:24:44 +0200171 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
172}
173
174static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
175{
176 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
177}
178
179static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
180 unsigned long asid)
181{
182 u32 value;
183
184 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
185 SMMU_TLB_FLUSH_VA_MATCH_ALL;
186 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
187}
188
189static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
190 unsigned long asid,
191 unsigned long iova)
192{
193 u32 value;
194
195 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
196 SMMU_TLB_FLUSH_VA_SECTION(iova);
197 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
198}
199
200static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
201 unsigned long asid,
202 unsigned long iova)
203{
204 u32 value;
205
206 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
207 SMMU_TLB_FLUSH_VA_GROUP(iova);
208 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
209}
210
211static inline void smmu_flush(struct tegra_smmu *smmu)
212{
213 smmu_readl(smmu, SMMU_CONFIG);
214}
215
216static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
217{
218 unsigned long id;
219
220 mutex_lock(&smmu->lock);
221
222 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
223 if (id >= smmu->soc->num_asids) {
224 mutex_unlock(&smmu->lock);
225 return -ENOSPC;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200226 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300227
Thierry Reding89184652014-04-16 09:24:44 +0200228 set_bit(id, smmu->asids);
229 *idp = id;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300230
Thierry Reding89184652014-04-16 09:24:44 +0200231 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200232 return 0;
233}
234
Thierry Reding89184652014-04-16 09:24:44 +0200235static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200236{
Thierry Reding89184652014-04-16 09:24:44 +0200237 mutex_lock(&smmu->lock);
238 clear_bit(id, smmu->asids);
239 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200240}
241
Thierry Reding89184652014-04-16 09:24:44 +0200242static bool tegra_smmu_capable(enum iommu_cap cap)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200243{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200244 return false;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200245}
246
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100247static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200248{
Thierry Reding89184652014-04-16 09:24:44 +0200249 struct tegra_smmu_as *as;
250 unsigned int i;
251 uint32_t *pd;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200252
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100253 if (type != IOMMU_DOMAIN_UNMANAGED)
254 return NULL;
255
Thierry Reding89184652014-04-16 09:24:44 +0200256 as = kzalloc(sizeof(*as), GFP_KERNEL);
257 if (!as)
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100258 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200259
Thierry Reding89184652014-04-16 09:24:44 +0200260 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200261
Thierry Reding89184652014-04-16 09:24:44 +0200262 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
263 if (!as->pd) {
264 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100265 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200266 }
267
Thierry Reding89184652014-04-16 09:24:44 +0200268 as->count = alloc_page(GFP_KERNEL);
269 if (!as->count) {
270 __free_page(as->pd);
271 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100272 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200273 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200274
Russell King853520f2015-07-27 13:29:26 +0100275 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
276 if (!as->pts) {
277 __free_page(as->count);
278 __free_page(as->pd);
279 kfree(as);
280 return NULL;
281 }
282
Thierry Reding89184652014-04-16 09:24:44 +0200283 /* clear PDEs */
284 pd = page_address(as->pd);
285 SetPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200286
Thierry Reding89184652014-04-16 09:24:44 +0200287 for (i = 0; i < SMMU_NUM_PDE; i++)
288 pd[i] = 0;
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300289
Thierry Reding89184652014-04-16 09:24:44 +0200290 /* clear PDE usage counters */
291 pd = page_address(as->count);
292 SetPageReserved(as->count);
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300293
Thierry Reding89184652014-04-16 09:24:44 +0200294 for (i = 0; i < SMMU_NUM_PDE; i++)
295 pd[i] = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200296
Thierry Reding471d9142015-03-27 11:07:25 +0100297 /* setup aperture */
Joerg Roedel7f65ef02015-04-02 13:33:19 +0200298 as->domain.geometry.aperture_start = 0;
299 as->domain.geometry.aperture_end = 0xffffffff;
300 as->domain.geometry.force_aperture = true;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200301
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100302 return &as->domain;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200303}
304
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100305static void tegra_smmu_domain_free(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200306{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100307 struct tegra_smmu_as *as = to_smmu_as(domain);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200308
Thierry Reding89184652014-04-16 09:24:44 +0200309 /* TODO: free page directory and page tables */
310 ClearPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200311
Thierry Reding89184652014-04-16 09:24:44 +0200312 kfree(as);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200313}
314
Thierry Reding89184652014-04-16 09:24:44 +0200315static const struct tegra_smmu_swgroup *
316tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300317{
Thierry Reding89184652014-04-16 09:24:44 +0200318 const struct tegra_smmu_swgroup *group = NULL;
319 unsigned int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300320
Thierry Reding89184652014-04-16 09:24:44 +0200321 for (i = 0; i < smmu->soc->num_swgroups; i++) {
322 if (smmu->soc->swgroups[i].swgroup == swgroup) {
323 group = &smmu->soc->swgroups[i];
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300324 break;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300325 }
326 }
327
Thierry Reding89184652014-04-16 09:24:44 +0200328 return group;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300329}
330
Thierry Reding89184652014-04-16 09:24:44 +0200331static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
332 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200333{
Thierry Reding89184652014-04-16 09:24:44 +0200334 const struct tegra_smmu_swgroup *group;
335 unsigned int i;
336 u32 value;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200337
Thierry Reding89184652014-04-16 09:24:44 +0200338 for (i = 0; i < smmu->soc->num_clients; i++) {
339 const struct tegra_mc_client *client = &smmu->soc->clients[i];
340
341 if (client->swgroup != swgroup)
342 continue;
343
344 value = smmu_readl(smmu, client->smmu.reg);
345 value |= BIT(client->smmu.bit);
346 smmu_writel(smmu, value, client->smmu.reg);
347 }
348
349 group = tegra_smmu_find_swgroup(smmu, swgroup);
350 if (group) {
351 value = smmu_readl(smmu, group->reg);
352 value &= ~SMMU_ASID_MASK;
353 value |= SMMU_ASID_VALUE(asid);
354 value |= SMMU_ASID_ENABLE;
355 smmu_writel(smmu, value, group->reg);
356 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200357}
358
Thierry Reding89184652014-04-16 09:24:44 +0200359static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
360 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200361{
Thierry Reding89184652014-04-16 09:24:44 +0200362 const struct tegra_smmu_swgroup *group;
363 unsigned int i;
364 u32 value;
365
366 group = tegra_smmu_find_swgroup(smmu, swgroup);
367 if (group) {
368 value = smmu_readl(smmu, group->reg);
369 value &= ~SMMU_ASID_MASK;
370 value |= SMMU_ASID_VALUE(asid);
371 value &= ~SMMU_ASID_ENABLE;
372 smmu_writel(smmu, value, group->reg);
373 }
374
375 for (i = 0; i < smmu->soc->num_clients; i++) {
376 const struct tegra_mc_client *client = &smmu->soc->clients[i];
377
378 if (client->swgroup != swgroup)
379 continue;
380
381 value = smmu_readl(smmu, client->smmu.reg);
382 value &= ~BIT(client->smmu.bit);
383 smmu_writel(smmu, value, client->smmu.reg);
384 }
385}
386
387static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
388 struct tegra_smmu_as *as)
389{
390 u32 value;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300391 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200392
Thierry Reding89184652014-04-16 09:24:44 +0200393 if (as->use_count > 0) {
394 as->use_count++;
395 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200396 }
397
Thierry Reding89184652014-04-16 09:24:44 +0200398 err = tegra_smmu_alloc_asid(smmu, &as->id);
399 if (err < 0)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300400 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200401
Thierry Reding89184652014-04-16 09:24:44 +0200402 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
403 smmu_flush_ptc(smmu, as->pd, 0);
404 smmu_flush_tlb_asid(smmu, as->id);
405
406 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
407 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
408 smmu_writel(smmu, value, SMMU_PTB_DATA);
409 smmu_flush(smmu);
410
411 as->smmu = smmu;
412 as->use_count++;
413
414 return 0;
415}
416
417static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
418 struct tegra_smmu_as *as)
419{
420 if (--as->use_count > 0)
421 return;
422
423 tegra_smmu_free_asid(smmu, as->id);
424 as->smmu = NULL;
425}
426
427static int tegra_smmu_attach_dev(struct iommu_domain *domain,
428 struct device *dev)
429{
430 struct tegra_smmu *smmu = dev->archdata.iommu;
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100431 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200432 struct device_node *np = dev->of_node;
433 struct of_phandle_args args;
434 unsigned int index = 0;
435 int err = 0;
436
437 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
438 &args)) {
439 unsigned int swgroup = args.args[0];
440
441 if (args.np != smmu->dev->of_node) {
442 of_node_put(args.np);
443 continue;
444 }
445
446 of_node_put(args.np);
447
448 err = tegra_smmu_as_prepare(smmu, as);
449 if (err < 0)
450 return err;
451
452 tegra_smmu_enable(smmu, swgroup, as->id);
453 index++;
454 }
455
456 if (index == 0)
457 return -ENODEV;
458
459 return 0;
460}
461
462static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
463{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100464 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200465 struct device_node *np = dev->of_node;
466 struct tegra_smmu *smmu = as->smmu;
467 struct of_phandle_args args;
468 unsigned int index = 0;
469
470 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
471 &args)) {
472 unsigned int swgroup = args.args[0];
473
474 if (args.np != smmu->dev->of_node) {
475 of_node_put(args.np);
476 continue;
477 }
478
479 of_node_put(args.np);
480
481 tegra_smmu_disable(smmu, swgroup, as->id);
482 tegra_smmu_as_unprepare(smmu, as);
483 index++;
484 }
485}
486
Russell King0b42c7c2015-07-27 13:29:21 +0100487static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
488{
489 u32 *pt = page_address(pt_page);
490
491 return pt + iova_pt_index(iova);
492}
493
494static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
495 struct page **pagep)
496{
497 unsigned int pd_index = iova_pd_index(iova);
498 struct page *pt_page;
Russell King0b42c7c2015-07-27 13:29:21 +0100499
Russell King853520f2015-07-27 13:29:26 +0100500 pt_page = as->pts[pd_index];
501 if (!pt_page)
Russell King0b42c7c2015-07-27 13:29:21 +0100502 return NULL;
503
Russell King0b42c7c2015-07-27 13:29:21 +0100504 *pagep = pt_page;
505
506 return tegra_smmu_pte_offset(pt_page, iova);
507}
508
Thierry Reding89184652014-04-16 09:24:44 +0200509static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
510 struct page **pagep)
511{
512 u32 *pd = page_address(as->pd), *pt, *count;
Russell King34d35f82015-07-27 13:29:16 +0100513 unsigned int pde = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200514 struct tegra_smmu *smmu = as->smmu;
515 struct page *page;
516 unsigned int i;
517
Russell King853520f2015-07-27 13:29:26 +0100518 if (!as->pts[pde]) {
Thierry Reding89184652014-04-16 09:24:44 +0200519 page = alloc_page(GFP_KERNEL | __GFP_DMA);
520 if (!page)
521 return NULL;
522
523 pt = page_address(page);
524 SetPageReserved(page);
525
526 for (i = 0; i < SMMU_NUM_PTE; i++)
527 pt[i] = 0;
528
Russell King853520f2015-07-27 13:29:26 +0100529 as->pts[pde] = page;
530
Thierry Reding89184652014-04-16 09:24:44 +0200531 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
532
533 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
534
535 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
536 smmu_flush_ptc(smmu, as->pd, pde << 2);
537 smmu_flush_tlb_section(smmu, as->id, iova);
538 smmu_flush(smmu);
539 } else {
Russell King853520f2015-07-27 13:29:26 +0100540 page = as->pts[pde];
Thierry Reding89184652014-04-16 09:24:44 +0200541 }
542
543 *pagep = page;
544
Russell King0b42c7c2015-07-27 13:29:21 +0100545 pt = page_address(page);
546
Thierry Reding89184652014-04-16 09:24:44 +0200547 /* Keep track of entries in this page table. */
548 count = page_address(as->count);
Russell King0b42c7c2015-07-27 13:29:21 +0100549 if (pt[iova_pt_index(iova)] == 0)
Thierry Reding89184652014-04-16 09:24:44 +0200550 count[pde]++;
551
Russell King0b42c7c2015-07-27 13:29:21 +0100552 return tegra_smmu_pte_offset(page, iova);
Thierry Reding89184652014-04-16 09:24:44 +0200553}
554
Russell Kingb98e34f2015-07-27 13:29:05 +0100555static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
Thierry Reding89184652014-04-16 09:24:44 +0200556{
Russell Kingb98e34f2015-07-27 13:29:05 +0100557 struct tegra_smmu *smmu = as->smmu;
Russell King34d35f82015-07-27 13:29:16 +0100558 unsigned int pde = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200559 u32 *count = page_address(as->count);
Russell Kingb98e34f2015-07-27 13:29:05 +0100560 u32 *pd = page_address(as->pd);
Russell King853520f2015-07-27 13:29:26 +0100561 struct page *page = as->pts[pde];
Thierry Reding89184652014-04-16 09:24:44 +0200562
563 /*
564 * When no entries in this page table are used anymore, return the
565 * memory page to the system.
566 */
Russell Kingb98e34f2015-07-27 13:29:05 +0100567 if (--count[pde] == 0) {
568 unsigned int offset = pde * sizeof(*pd);
Thierry Reding89184652014-04-16 09:24:44 +0200569
Russell Kingb98e34f2015-07-27 13:29:05 +0100570 /* Clear the page directory entry first */
571 pd[pde] = 0;
572
573 /* Flush the page directory entry */
574 smmu->soc->ops->flush_dcache(as->pd, offset, sizeof(*pd));
575 smmu_flush_ptc(smmu, as->pd, offset);
576 smmu_flush_tlb_section(smmu, as->id, iova);
577 smmu_flush(smmu);
578
579 /* Finally, free the page */
580 ClearPageReserved(page);
581 __free_page(page);
Russell King853520f2015-07-27 13:29:26 +0100582 as->pts[pde] = NULL;
Thierry Reding89184652014-04-16 09:24:44 +0200583 }
584}
585
Russell King8482ee52015-07-27 13:29:10 +0100586static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
587 u32 *pte, struct page *pte_page, u32 val)
588{
589 struct tegra_smmu *smmu = as->smmu;
590 unsigned long offset = offset_in_page(pte);
591
592 *pte = val;
593
594 smmu->soc->ops->flush_dcache(pte_page, offset, 4);
595 smmu_flush_ptc(smmu, pte_page, offset);
596 smmu_flush_tlb_group(smmu, as->id, iova);
597 smmu_flush(smmu);
598}
599
Thierry Reding89184652014-04-16 09:24:44 +0200600static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
601 phys_addr_t paddr, size_t size, int prot)
602{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100603 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200604 struct page *page;
605 u32 *pte;
606
607 pte = as_get_pte(as, iova, &page);
608 if (!pte)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300609 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200610
Russell King8482ee52015-07-27 13:29:10 +0100611 tegra_smmu_set_pte(as, iova, pte, page,
612 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
Thierry Reding89184652014-04-16 09:24:44 +0200613
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200614 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200615}
616
Thierry Reding89184652014-04-16 09:24:44 +0200617static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
618 size_t size)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200619{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100620 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell King0b42c7c2015-07-27 13:29:21 +0100621 struct page *pte_page;
Thierry Reding89184652014-04-16 09:24:44 +0200622 u32 *pte;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200623
Russell King0b42c7c2015-07-27 13:29:21 +0100624 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
Russell Kingb98e34f2015-07-27 13:29:05 +0100625 if (!pte || !*pte)
Thierry Reding89184652014-04-16 09:24:44 +0200626 return 0;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300627
Russell King0b42c7c2015-07-27 13:29:21 +0100628 tegra_smmu_set_pte(as, iova, pte, pte_page, 0);
Russell Kingb98e34f2015-07-27 13:29:05 +0100629 tegra_smmu_pte_put_use(as, iova);
630
Thierry Reding89184652014-04-16 09:24:44 +0200631 return size;
632}
633
634static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
635 dma_addr_t iova)
636{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100637 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell King0b42c7c2015-07-27 13:29:21 +0100638 struct page *pte_page;
Thierry Reding89184652014-04-16 09:24:44 +0200639 unsigned long pfn;
640 u32 *pte;
641
Russell King0b42c7c2015-07-27 13:29:21 +0100642 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
Russell King91137852015-07-27 13:29:00 +0100643 if (!pte || !*pte)
644 return 0;
645
Thierry Reding804cb542015-03-27 11:07:27 +0100646 pfn = *pte & as->smmu->pfn_mask;
Thierry Reding89184652014-04-16 09:24:44 +0200647
648 return PFN_PHYS(pfn);
649}
650
651static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
652{
653 struct platform_device *pdev;
654 struct tegra_mc *mc;
655
656 pdev = of_find_device_by_node(np);
657 if (!pdev)
658 return NULL;
659
660 mc = platform_get_drvdata(pdev);
661 if (!mc)
662 return NULL;
663
664 return mc->smmu;
665}
666
667static int tegra_smmu_add_device(struct device *dev)
668{
669 struct device_node *np = dev->of_node;
670 struct of_phandle_args args;
671 unsigned int index = 0;
672
673 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
674 &args) == 0) {
675 struct tegra_smmu *smmu;
676
677 smmu = tegra_smmu_find(args.np);
678 if (smmu) {
679 /*
680 * Only a single IOMMU master interface is currently
681 * supported by the Linux kernel, so abort after the
682 * first match.
683 */
684 dev->archdata.iommu = smmu;
685 break;
686 }
687
688 index++;
689 }
690
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200691 return 0;
692}
693
Thierry Reding89184652014-04-16 09:24:44 +0200694static void tegra_smmu_remove_device(struct device *dev)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200695{
Thierry Reding89184652014-04-16 09:24:44 +0200696 dev->archdata.iommu = NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200697}
698
Thierry Reding89184652014-04-16 09:24:44 +0200699static const struct iommu_ops tegra_smmu_ops = {
700 .capable = tegra_smmu_capable,
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100701 .domain_alloc = tegra_smmu_domain_alloc,
702 .domain_free = tegra_smmu_domain_free,
Thierry Reding89184652014-04-16 09:24:44 +0200703 .attach_dev = tegra_smmu_attach_dev,
704 .detach_dev = tegra_smmu_detach_dev,
705 .add_device = tegra_smmu_add_device,
706 .remove_device = tegra_smmu_remove_device,
707 .map = tegra_smmu_map,
708 .unmap = tegra_smmu_unmap,
709 .map_sg = default_iommu_map_sg,
710 .iova_to_phys = tegra_smmu_iova_to_phys,
711
712 .pgsize_bitmap = SZ_4K,
713};
714
715static void tegra_smmu_ahb_enable(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200716{
Thierry Reding89184652014-04-16 09:24:44 +0200717 static const struct of_device_id ahb_match[] = {
718 { .compatible = "nvidia,tegra30-ahb", },
719 { }
720 };
721 struct device_node *ahb;
722
723 ahb = of_find_matching_node(NULL, ahb_match);
724 if (ahb) {
725 tegra_ahb_enable_smmu(ahb);
726 of_node_put(ahb);
727 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200728}
729
Thierry Redingd1313e72015-01-23 09:49:25 +0100730static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
731{
732 struct tegra_smmu *smmu = s->private;
733 unsigned int i;
734 u32 value;
735
736 seq_printf(s, "swgroup enabled ASID\n");
737 seq_printf(s, "------------------------\n");
738
739 for (i = 0; i < smmu->soc->num_swgroups; i++) {
740 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
741 const char *status;
742 unsigned int asid;
743
744 value = smmu_readl(smmu, group->reg);
745
746 if (value & SMMU_ASID_ENABLE)
747 status = "yes";
748 else
749 status = "no";
750
751 asid = value & SMMU_ASID_MASK;
752
753 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
754 asid);
755 }
756
757 return 0;
758}
759
760static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
763}
764
765static const struct file_operations tegra_smmu_swgroups_fops = {
766 .open = tegra_smmu_swgroups_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
772static int tegra_smmu_clients_show(struct seq_file *s, void *data)
773{
774 struct tegra_smmu *smmu = s->private;
775 unsigned int i;
776 u32 value;
777
778 seq_printf(s, "client enabled\n");
779 seq_printf(s, "--------------------\n");
780
781 for (i = 0; i < smmu->soc->num_clients; i++) {
782 const struct tegra_mc_client *client = &smmu->soc->clients[i];
783 const char *status;
784
785 value = smmu_readl(smmu, client->smmu.reg);
786
787 if (value & BIT(client->smmu.bit))
788 status = "yes";
789 else
790 status = "no";
791
792 seq_printf(s, "%-12s %s\n", client->name, status);
793 }
794
795 return 0;
796}
797
798static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
799{
800 return single_open(file, tegra_smmu_clients_show, inode->i_private);
801}
802
803static const struct file_operations tegra_smmu_clients_fops = {
804 .open = tegra_smmu_clients_open,
805 .read = seq_read,
806 .llseek = seq_lseek,
807 .release = single_release,
808};
809
810static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
811{
812 smmu->debugfs = debugfs_create_dir("smmu", NULL);
813 if (!smmu->debugfs)
814 return;
815
816 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
817 &tegra_smmu_swgroups_fops);
818 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
819 &tegra_smmu_clients_fops);
820}
821
822static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
823{
824 debugfs_remove_recursive(smmu->debugfs);
825}
826
Thierry Reding89184652014-04-16 09:24:44 +0200827struct tegra_smmu *tegra_smmu_probe(struct device *dev,
828 const struct tegra_smmu_soc *soc,
829 struct tegra_mc *mc)
830{
831 struct tegra_smmu *smmu;
832 size_t size;
833 u32 value;
834 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200835
Thierry Reding89184652014-04-16 09:24:44 +0200836 /* This can happen on Tegra20 which doesn't have an SMMU */
837 if (!soc)
838 return NULL;
839
840 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
841 if (!smmu)
842 return ERR_PTR(-ENOMEM);
843
844 /*
845 * This is a bit of a hack. Ideally we'd want to simply return this
846 * value. However the IOMMU registration process will attempt to add
847 * all devices to the IOMMU when bus_set_iommu() is called. In order
848 * not to rely on global variables to track the IOMMU instance, we
849 * set it here so that it can be looked up from the .add_device()
850 * callback via the IOMMU device's .drvdata field.
851 */
852 mc->smmu = smmu;
853
854 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
855
856 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
857 if (!smmu->asids)
858 return ERR_PTR(-ENOMEM);
859
860 mutex_init(&smmu->lock);
861
862 smmu->regs = mc->regs;
863 smmu->soc = soc;
864 smmu->dev = dev;
865 smmu->mc = mc;
866
Thierry Reding804cb542015-03-27 11:07:27 +0100867 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
868 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
869 mc->soc->num_address_bits, smmu->pfn_mask);
870
Thierry Reding89184652014-04-16 09:24:44 +0200871 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
872
873 if (soc->supports_request_limit)
874 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
875
876 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
877
878 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
879 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
880
881 if (soc->supports_round_robin_arbitration)
882 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
883
884 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
885
886 smmu_flush_ptc(smmu, NULL, 0);
887 smmu_flush_tlb(smmu);
888 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
889 smmu_flush(smmu);
890
891 tegra_smmu_ahb_enable();
892
893 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
894 if (err < 0)
895 return ERR_PTR(err);
896
Thierry Redingd1313e72015-01-23 09:49:25 +0100897 if (IS_ENABLED(CONFIG_DEBUG_FS))
898 tegra_smmu_debugfs_init(smmu);
899
Thierry Reding89184652014-04-16 09:24:44 +0200900 return smmu;
901}
Thierry Redingd1313e72015-01-23 09:49:25 +0100902
903void tegra_smmu_remove(struct tegra_smmu *smmu)
904{
905 if (IS_ENABLED(CONFIG_DEBUG_FS))
906 tegra_smmu_debugfs_exit(smmu);
907}