blob: 27b1249f0773a765e730312cab0bdc8460095b09 [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
Thierry Reding89184652014-04-16 09:24:44 +02002 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02003 *
Thierry Reding89184652014-04-16 09:24:44 +02004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02007 */
8
Thierry Reding804cb542015-03-27 11:07:27 +01009#include <linux/bitops.h>
Thierry Redingd1313e72015-01-23 09:49:25 +010010#include <linux/debugfs.h>
Thierry Redingbc5e6de2013-01-21 11:09:06 +010011#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020012#include <linux/iommu.h>
Thierry Reding89184652014-04-16 09:24:44 +020013#include <linux/kernel.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030014#include <linux/of.h>
Thierry Reding89184652014-04-16 09:24:44 +020015#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020018#include <linux/dma-mapping.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020019
20#include <soc/tegra/ahb.h>
Thierry Reding89184652014-04-16 09:24:44 +020021#include <soc/tegra/mc.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020022
Thierry Reding7f4c9172017-10-12 16:19:16 +020023struct tegra_smmu_group {
24 struct list_head list;
25 const struct tegra_smmu_group_soc *soc;
26 struct iommu_group *group;
27};
28
Thierry Reding89184652014-04-16 09:24:44 +020029struct tegra_smmu {
30 void __iomem *regs;
31 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020032
Thierry Reding89184652014-04-16 09:24:44 +020033 struct tegra_mc *mc;
34 const struct tegra_smmu_soc *soc;
Stephen Warrene6bc5932012-09-04 16:36:15 -060035
Thierry Reding7f4c9172017-10-12 16:19:16 +020036 struct list_head groups;
37
Thierry Reding804cb542015-03-27 11:07:27 +010038 unsigned long pfn_mask;
Thierry Reding11cec152015-08-06 14:20:31 +020039 unsigned long tlb_mask;
Thierry Reding804cb542015-03-27 11:07:27 +010040
Thierry Reding89184652014-04-16 09:24:44 +020041 unsigned long *asids;
42 struct mutex lock;
Stephen Warrene6bc5932012-09-04 16:36:15 -060043
Thierry Reding89184652014-04-16 09:24:44 +020044 struct list_head list;
Thierry Redingd1313e72015-01-23 09:49:25 +010045
46 struct dentry *debugfs;
Joerg Roedel0b480e42017-08-09 17:41:52 +020047
48 struct iommu_device iommu; /* IOMMU Core code handle */
Stephen Warrene6bc5932012-09-04 16:36:15 -060049};
50
Thierry Reding89184652014-04-16 09:24:44 +020051struct tegra_smmu_as {
Joerg Roedeld5f1a812015-03-26 13:43:12 +010052 struct iommu_domain domain;
Thierry Reding89184652014-04-16 09:24:44 +020053 struct tegra_smmu *smmu;
54 unsigned int use_count;
Russell King32924c72015-07-27 13:29:31 +010055 u32 *count;
Russell King853520f2015-07-27 13:29:26 +010056 struct page **pts;
Thierry Reding89184652014-04-16 09:24:44 +020057 struct page *pd;
Russell Kinge3c97192015-07-27 13:29:52 +010058 dma_addr_t pd_dma;
Thierry Reding89184652014-04-16 09:24:44 +020059 unsigned id;
60 u32 attr;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030061};
62
Joerg Roedeld5f1a812015-03-26 13:43:12 +010063static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
64{
65 return container_of(dom, struct tegra_smmu_as, domain);
66}
67
Thierry Reding89184652014-04-16 09:24:44 +020068static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
69 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020070{
Thierry Reding89184652014-04-16 09:24:44 +020071 writel(value, smmu->regs + offset);
Joerg Roedelfe1229b2013-02-04 20:40:58 +010072}
73
Thierry Reding89184652014-04-16 09:24:44 +020074static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020075{
Thierry Reding89184652014-04-16 09:24:44 +020076 return readl(smmu->regs + offset);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020077}
78
Thierry Reding89184652014-04-16 09:24:44 +020079#define SMMU_CONFIG 0x010
80#define SMMU_CONFIG_ENABLE (1 << 0)
81
82#define SMMU_TLB_CONFIG 0x14
83#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
84#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
Thierry Reding11cec152015-08-06 14:20:31 +020085#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
86 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
Thierry Reding89184652014-04-16 09:24:44 +020087
88#define SMMU_PTC_CONFIG 0x18
89#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
90#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
91#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
92
93#define SMMU_PTB_ASID 0x01c
94#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
95
96#define SMMU_PTB_DATA 0x020
Russell Kinge3c97192015-07-27 13:29:52 +010097#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
Thierry Reding89184652014-04-16 09:24:44 +020098
Russell Kinge3c97192015-07-27 13:29:52 +010099#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
Thierry Reding89184652014-04-16 09:24:44 +0200100
101#define SMMU_TLB_FLUSH 0x030
102#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
103#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
104#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
Thierry Reding89184652014-04-16 09:24:44 +0200105#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
106 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
107#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
108 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
109#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
110
111#define SMMU_PTC_FLUSH 0x034
112#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
113#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
114
115#define SMMU_PTC_FLUSH_HI 0x9b8
116#define SMMU_PTC_FLUSH_HI_MASK 0x3
117
118/* per-SWGROUP SMMU_*_ASID register */
119#define SMMU_ASID_ENABLE (1 << 31)
120#define SMMU_ASID_MASK 0x7f
121#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
122
123/* page table definitions */
124#define SMMU_NUM_PDE 1024
125#define SMMU_NUM_PTE 1024
126
127#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
128#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
129
130#define SMMU_PDE_SHIFT 22
131#define SMMU_PTE_SHIFT 12
132
Thierry Reding89184652014-04-16 09:24:44 +0200133#define SMMU_PD_READABLE (1 << 31)
134#define SMMU_PD_WRITABLE (1 << 30)
135#define SMMU_PD_NONSECURE (1 << 29)
136
137#define SMMU_PDE_READABLE (1 << 31)
138#define SMMU_PDE_WRITABLE (1 << 30)
139#define SMMU_PDE_NONSECURE (1 << 29)
140#define SMMU_PDE_NEXT (1 << 28)
141
142#define SMMU_PTE_READABLE (1 << 31)
143#define SMMU_PTE_WRITABLE (1 << 30)
144#define SMMU_PTE_NONSECURE (1 << 29)
145
146#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
147 SMMU_PDE_NONSECURE)
148#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
149 SMMU_PTE_NONSECURE)
150
Russell King34d35f82015-07-27 13:29:16 +0100151static unsigned int iova_pd_index(unsigned long iova)
152{
153 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
154}
155
156static unsigned int iova_pt_index(unsigned long iova)
157{
158 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
159}
160
Russell Kinge3c97192015-07-27 13:29:52 +0100161static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
Russell King4b3c7d12015-07-27 13:29:36 +0100162{
Russell Kinge3c97192015-07-27 13:29:52 +0100163 addr >>= 12;
164 return (addr & smmu->pfn_mask) == addr;
165}
Russell King4b3c7d12015-07-27 13:29:36 +0100166
Russell Kinge3c97192015-07-27 13:29:52 +0100167static dma_addr_t smmu_pde_to_dma(u32 pde)
168{
169 return pde << 12;
Russell King4b3c7d12015-07-27 13:29:36 +0100170}
171
Russell Kingb8fe0382015-07-27 13:29:41 +0100172static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
173{
174 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
175}
176
Russell Kinge3c97192015-07-27 13:29:52 +0100177static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
Thierry Reding89184652014-04-16 09:24:44 +0200178 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200179{
Thierry Reding89184652014-04-16 09:24:44 +0200180 u32 value;
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200181
Russell Kingb8fe0382015-07-27 13:29:41 +0100182 offset &= ~(smmu->mc->soc->atom_size - 1);
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200183
Russell Kingb8fe0382015-07-27 13:29:41 +0100184 if (smmu->mc->soc->num_address_bits > 32) {
Russell Kinge3c97192015-07-27 13:29:52 +0100185#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
186 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200187#else
Russell Kingb8fe0382015-07-27 13:29:41 +0100188 value = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200189#endif
Russell Kingb8fe0382015-07-27 13:29:41 +0100190 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200191 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300192
Russell Kinge3c97192015-07-27 13:29:52 +0100193 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
Thierry Reding89184652014-04-16 09:24:44 +0200194 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
195}
196
197static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
198{
199 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
200}
201
202static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
203 unsigned long asid)
204{
205 u32 value;
206
Dmitry Osipenko43a05412019-03-07 01:50:07 +0300207 if (smmu->soc->num_asids == 4)
208 value = (asid & 0x3) << 29;
209 else
210 value = (asid & 0x7f) << 24;
211
212 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
Thierry Reding89184652014-04-16 09:24:44 +0200213 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
214}
215
216static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
217 unsigned long asid,
218 unsigned long iova)
219{
220 u32 value;
221
Dmitry Osipenko43a05412019-03-07 01:50:07 +0300222 if (smmu->soc->num_asids == 4)
223 value = (asid & 0x3) << 29;
224 else
225 value = (asid & 0x7f) << 24;
226
227 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200228 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
229}
230
231static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
232 unsigned long asid,
233 unsigned long iova)
234{
235 u32 value;
236
Dmitry Osipenko43a05412019-03-07 01:50:07 +0300237 if (smmu->soc->num_asids == 4)
238 value = (asid & 0x3) << 29;
239 else
240 value = (asid & 0x7f) << 24;
241
242 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200243 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
244}
245
246static inline void smmu_flush(struct tegra_smmu *smmu)
247{
248 smmu_readl(smmu, SMMU_CONFIG);
249}
250
251static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
252{
253 unsigned long id;
254
255 mutex_lock(&smmu->lock);
256
257 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
258 if (id >= smmu->soc->num_asids) {
259 mutex_unlock(&smmu->lock);
260 return -ENOSPC;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200261 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300262
Thierry Reding89184652014-04-16 09:24:44 +0200263 set_bit(id, smmu->asids);
264 *idp = id;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300265
Thierry Reding89184652014-04-16 09:24:44 +0200266 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200267 return 0;
268}
269
Thierry Reding89184652014-04-16 09:24:44 +0200270static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200271{
Thierry Reding89184652014-04-16 09:24:44 +0200272 mutex_lock(&smmu->lock);
273 clear_bit(id, smmu->asids);
274 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200275}
276
Thierry Reding89184652014-04-16 09:24:44 +0200277static bool tegra_smmu_capable(enum iommu_cap cap)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200278{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200279 return false;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200280}
281
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100282static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200283{
Thierry Reding89184652014-04-16 09:24:44 +0200284 struct tegra_smmu_as *as;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200285
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100286 if (type != IOMMU_DOMAIN_UNMANAGED)
287 return NULL;
288
Thierry Reding89184652014-04-16 09:24:44 +0200289 as = kzalloc(sizeof(*as), GFP_KERNEL);
290 if (!as)
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100291 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200292
Thierry Reding89184652014-04-16 09:24:44 +0200293 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200294
Russell King707917c2015-07-27 13:30:02 +0100295 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
Thierry Reding89184652014-04-16 09:24:44 +0200296 if (!as->pd) {
297 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100298 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200299 }
300
Russell King32924c72015-07-27 13:29:31 +0100301 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
Thierry Reding89184652014-04-16 09:24:44 +0200302 if (!as->count) {
303 __free_page(as->pd);
304 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100305 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200306 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200307
Russell King853520f2015-07-27 13:29:26 +0100308 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
309 if (!as->pts) {
Russell King32924c72015-07-27 13:29:31 +0100310 kfree(as->count);
Russell King853520f2015-07-27 13:29:26 +0100311 __free_page(as->pd);
312 kfree(as);
313 return NULL;
314 }
315
Thierry Reding471d9142015-03-27 11:07:25 +0100316 /* setup aperture */
Joerg Roedel7f65ef02015-04-02 13:33:19 +0200317 as->domain.geometry.aperture_start = 0;
318 as->domain.geometry.aperture_end = 0xffffffff;
319 as->domain.geometry.force_aperture = true;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200320
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100321 return &as->domain;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200322}
323
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100324static void tegra_smmu_domain_free(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200325{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100326 struct tegra_smmu_as *as = to_smmu_as(domain);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200327
Thierry Reding89184652014-04-16 09:24:44 +0200328 /* TODO: free page directory and page tables */
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200329
Dmitry Osipenko4f970312019-03-07 01:50:08 +0300330 WARN_ON_ONCE(as->use_count);
331 kfree(as->count);
332 kfree(as->pts);
Thierry Reding89184652014-04-16 09:24:44 +0200333 kfree(as);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200334}
335
Thierry Reding89184652014-04-16 09:24:44 +0200336static const struct tegra_smmu_swgroup *
337tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300338{
Thierry Reding89184652014-04-16 09:24:44 +0200339 const struct tegra_smmu_swgroup *group = NULL;
340 unsigned int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300341
Thierry Reding89184652014-04-16 09:24:44 +0200342 for (i = 0; i < smmu->soc->num_swgroups; i++) {
343 if (smmu->soc->swgroups[i].swgroup == swgroup) {
344 group = &smmu->soc->swgroups[i];
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300345 break;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300346 }
347 }
348
Thierry Reding89184652014-04-16 09:24:44 +0200349 return group;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300350}
351
Thierry Reding89184652014-04-16 09:24:44 +0200352static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
353 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200354{
Thierry Reding89184652014-04-16 09:24:44 +0200355 const struct tegra_smmu_swgroup *group;
356 unsigned int i;
357 u32 value;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200358
Thierry Reding89184652014-04-16 09:24:44 +0200359 for (i = 0; i < smmu->soc->num_clients; i++) {
360 const struct tegra_mc_client *client = &smmu->soc->clients[i];
361
362 if (client->swgroup != swgroup)
363 continue;
364
365 value = smmu_readl(smmu, client->smmu.reg);
366 value |= BIT(client->smmu.bit);
367 smmu_writel(smmu, value, client->smmu.reg);
368 }
369
370 group = tegra_smmu_find_swgroup(smmu, swgroup);
371 if (group) {
372 value = smmu_readl(smmu, group->reg);
373 value &= ~SMMU_ASID_MASK;
374 value |= SMMU_ASID_VALUE(asid);
375 value |= SMMU_ASID_ENABLE;
376 smmu_writel(smmu, value, group->reg);
377 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200378}
379
Thierry Reding89184652014-04-16 09:24:44 +0200380static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
381 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200382{
Thierry Reding89184652014-04-16 09:24:44 +0200383 const struct tegra_smmu_swgroup *group;
384 unsigned int i;
385 u32 value;
386
387 group = tegra_smmu_find_swgroup(smmu, swgroup);
388 if (group) {
389 value = smmu_readl(smmu, group->reg);
390 value &= ~SMMU_ASID_MASK;
391 value |= SMMU_ASID_VALUE(asid);
392 value &= ~SMMU_ASID_ENABLE;
393 smmu_writel(smmu, value, group->reg);
394 }
395
396 for (i = 0; i < smmu->soc->num_clients; i++) {
397 const struct tegra_mc_client *client = &smmu->soc->clients[i];
398
399 if (client->swgroup != swgroup)
400 continue;
401
402 value = smmu_readl(smmu, client->smmu.reg);
403 value &= ~BIT(client->smmu.bit);
404 smmu_writel(smmu, value, client->smmu.reg);
405 }
406}
407
408static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
409 struct tegra_smmu_as *as)
410{
411 u32 value;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300412 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200413
Thierry Reding89184652014-04-16 09:24:44 +0200414 if (as->use_count > 0) {
415 as->use_count++;
416 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200417 }
418
Russell Kinge3c97192015-07-27 13:29:52 +0100419 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
420 DMA_TO_DEVICE);
421 if (dma_mapping_error(smmu->dev, as->pd_dma))
422 return -ENOMEM;
423
424 /* We can't handle 64-bit DMA addresses */
425 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
426 err = -ENOMEM;
427 goto err_unmap;
428 }
429
Thierry Reding89184652014-04-16 09:24:44 +0200430 err = tegra_smmu_alloc_asid(smmu, &as->id);
431 if (err < 0)
Russell Kinge3c97192015-07-27 13:29:52 +0100432 goto err_unmap;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200433
Russell Kinge3c97192015-07-27 13:29:52 +0100434 smmu_flush_ptc(smmu, as->pd_dma, 0);
Thierry Reding89184652014-04-16 09:24:44 +0200435 smmu_flush_tlb_asid(smmu, as->id);
436
437 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
Russell Kinge3c97192015-07-27 13:29:52 +0100438 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
Thierry Reding89184652014-04-16 09:24:44 +0200439 smmu_writel(smmu, value, SMMU_PTB_DATA);
440 smmu_flush(smmu);
441
442 as->smmu = smmu;
443 as->use_count++;
444
445 return 0;
Russell Kinge3c97192015-07-27 13:29:52 +0100446
447err_unmap:
448 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
449 return err;
Thierry Reding89184652014-04-16 09:24:44 +0200450}
451
452static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
453 struct tegra_smmu_as *as)
454{
455 if (--as->use_count > 0)
456 return;
457
458 tegra_smmu_free_asid(smmu, as->id);
Russell Kinge3c97192015-07-27 13:29:52 +0100459
460 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
461
Thierry Reding89184652014-04-16 09:24:44 +0200462 as->smmu = NULL;
463}
464
465static int tegra_smmu_attach_dev(struct iommu_domain *domain,
466 struct device *dev)
467{
468 struct tegra_smmu *smmu = dev->archdata.iommu;
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100469 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200470 struct device_node *np = dev->of_node;
471 struct of_phandle_args args;
472 unsigned int index = 0;
473 int err = 0;
474
475 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
476 &args)) {
477 unsigned int swgroup = args.args[0];
478
479 if (args.np != smmu->dev->of_node) {
480 of_node_put(args.np);
481 continue;
482 }
483
484 of_node_put(args.np);
485
486 err = tegra_smmu_as_prepare(smmu, as);
487 if (err < 0)
488 return err;
489
490 tegra_smmu_enable(smmu, swgroup, as->id);
491 index++;
492 }
493
494 if (index == 0)
495 return -ENODEV;
496
497 return 0;
498}
499
500static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
501{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100502 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200503 struct device_node *np = dev->of_node;
504 struct tegra_smmu *smmu = as->smmu;
505 struct of_phandle_args args;
506 unsigned int index = 0;
507
508 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
509 &args)) {
510 unsigned int swgroup = args.args[0];
511
512 if (args.np != smmu->dev->of_node) {
513 of_node_put(args.np);
514 continue;
515 }
516
517 of_node_put(args.np);
518
519 tegra_smmu_disable(smmu, swgroup, as->id);
520 tegra_smmu_as_unprepare(smmu, as);
521 index++;
522 }
523}
524
Russell King4080e992015-07-27 13:30:12 +0100525static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
526 u32 value)
527{
528 unsigned int pd_index = iova_pd_index(iova);
529 struct tegra_smmu *smmu = as->smmu;
530 u32 *pd = page_address(as->pd);
531 unsigned long offset = pd_index * sizeof(*pd);
532
533 /* Set the page directory entry first */
534 pd[pd_index] = value;
535
536 /* The flush the page directory entry from caches */
537 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
538 sizeof(*pd), DMA_TO_DEVICE);
539
540 /* And flush the iommu */
541 smmu_flush_ptc(smmu, as->pd_dma, offset);
542 smmu_flush_tlb_section(smmu, as->id, iova);
543 smmu_flush(smmu);
544}
545
Russell King0b42c7c2015-07-27 13:29:21 +0100546static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
547{
548 u32 *pt = page_address(pt_page);
549
550 return pt + iova_pt_index(iova);
551}
552
553static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
Russell Kinge3c97192015-07-27 13:29:52 +0100554 dma_addr_t *dmap)
Russell King0b42c7c2015-07-27 13:29:21 +0100555{
556 unsigned int pd_index = iova_pd_index(iova);
557 struct page *pt_page;
Russell Kinge3c97192015-07-27 13:29:52 +0100558 u32 *pd;
Russell King0b42c7c2015-07-27 13:29:21 +0100559
Russell King853520f2015-07-27 13:29:26 +0100560 pt_page = as->pts[pd_index];
561 if (!pt_page)
Russell King0b42c7c2015-07-27 13:29:21 +0100562 return NULL;
563
Russell Kinge3c97192015-07-27 13:29:52 +0100564 pd = page_address(as->pd);
565 *dmap = smmu_pde_to_dma(pd[pd_index]);
Russell King0b42c7c2015-07-27 13:29:21 +0100566
567 return tegra_smmu_pte_offset(pt_page, iova);
568}
569
Thierry Reding89184652014-04-16 09:24:44 +0200570static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
Russell Kinge3c97192015-07-27 13:29:52 +0100571 dma_addr_t *dmap)
Thierry Reding89184652014-04-16 09:24:44 +0200572{
Russell King34d35f82015-07-27 13:29:16 +0100573 unsigned int pde = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200574 struct tegra_smmu *smmu = as->smmu;
Thierry Reding89184652014-04-16 09:24:44 +0200575
Russell King853520f2015-07-27 13:29:26 +0100576 if (!as->pts[pde]) {
Russell Kinge3c97192015-07-27 13:29:52 +0100577 struct page *page;
578 dma_addr_t dma;
579
Russell King707917c2015-07-27 13:30:02 +0100580 page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
Thierry Reding89184652014-04-16 09:24:44 +0200581 if (!page)
582 return NULL;
583
Russell Kinge3c97192015-07-27 13:29:52 +0100584 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
585 DMA_TO_DEVICE);
586 if (dma_mapping_error(smmu->dev, dma)) {
587 __free_page(page);
588 return NULL;
589 }
590
591 if (!smmu_dma_addr_valid(smmu, dma)) {
592 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
593 DMA_TO_DEVICE);
594 __free_page(page);
595 return NULL;
596 }
597
Russell King853520f2015-07-27 13:29:26 +0100598 as->pts[pde] = page;
599
Russell King4080e992015-07-27 13:30:12 +0100600 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
601 SMMU_PDE_NEXT));
Russell Kinge3c97192015-07-27 13:29:52 +0100602
603 *dmap = dma;
Thierry Reding89184652014-04-16 09:24:44 +0200604 } else {
Russell King4080e992015-07-27 13:30:12 +0100605 u32 *pd = page_address(as->pd);
606
Russell Kinge3c97192015-07-27 13:29:52 +0100607 *dmap = smmu_pde_to_dma(pd[pde]);
Thierry Reding89184652014-04-16 09:24:44 +0200608 }
609
Russell King7ffc6f02015-08-06 14:56:39 +0200610 return tegra_smmu_pte_offset(as->pts[pde], iova);
611}
Russell King0b42c7c2015-07-27 13:29:21 +0100612
Russell King7ffc6f02015-08-06 14:56:39 +0200613static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
614{
615 unsigned int pd_index = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200616
Russell King7ffc6f02015-08-06 14:56:39 +0200617 as->count[pd_index]++;
Thierry Reding89184652014-04-16 09:24:44 +0200618}
619
Russell Kingb98e34f2015-07-27 13:29:05 +0100620static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
Thierry Reding89184652014-04-16 09:24:44 +0200621{
Russell King34d35f82015-07-27 13:29:16 +0100622 unsigned int pde = iova_pd_index(iova);
Russell King853520f2015-07-27 13:29:26 +0100623 struct page *page = as->pts[pde];
Thierry Reding89184652014-04-16 09:24:44 +0200624
625 /*
626 * When no entries in this page table are used anymore, return the
627 * memory page to the system.
628 */
Russell King32924c72015-07-27 13:29:31 +0100629 if (--as->count[pde] == 0) {
Russell King4080e992015-07-27 13:30:12 +0100630 struct tegra_smmu *smmu = as->smmu;
631 u32 *pd = page_address(as->pd);
Russell Kinge3c97192015-07-27 13:29:52 +0100632 dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
Thierry Reding89184652014-04-16 09:24:44 +0200633
Russell King4080e992015-07-27 13:30:12 +0100634 tegra_smmu_set_pde(as, iova, 0);
Russell Kingb98e34f2015-07-27 13:29:05 +0100635
Russell Kinge3c97192015-07-27 13:29:52 +0100636 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
Russell Kingb98e34f2015-07-27 13:29:05 +0100637 __free_page(page);
Russell King853520f2015-07-27 13:29:26 +0100638 as->pts[pde] = NULL;
Thierry Reding89184652014-04-16 09:24:44 +0200639 }
640}
641
Russell King8482ee52015-07-27 13:29:10 +0100642static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
Russell Kinge3c97192015-07-27 13:29:52 +0100643 u32 *pte, dma_addr_t pte_dma, u32 val)
Russell King8482ee52015-07-27 13:29:10 +0100644{
645 struct tegra_smmu *smmu = as->smmu;
646 unsigned long offset = offset_in_page(pte);
647
648 *pte = val;
649
Russell Kinge3c97192015-07-27 13:29:52 +0100650 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
651 4, DMA_TO_DEVICE);
652 smmu_flush_ptc(smmu, pte_dma, offset);
Russell King8482ee52015-07-27 13:29:10 +0100653 smmu_flush_tlb_group(smmu, as->id, iova);
654 smmu_flush(smmu);
655}
656
Thierry Reding89184652014-04-16 09:24:44 +0200657static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
658 phys_addr_t paddr, size_t size, int prot)
659{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100660 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell Kinge3c97192015-07-27 13:29:52 +0100661 dma_addr_t pte_dma;
Thierry Reding89184652014-04-16 09:24:44 +0200662 u32 *pte;
663
Russell Kinge3c97192015-07-27 13:29:52 +0100664 pte = as_get_pte(as, iova, &pte_dma);
Thierry Reding89184652014-04-16 09:24:44 +0200665 if (!pte)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300666 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200667
Russell King7ffc6f02015-08-06 14:56:39 +0200668 /* If we aren't overwriting a pre-existing entry, increment use */
669 if (*pte == 0)
670 tegra_smmu_pte_get_use(as, iova);
671
Russell Kinge3c97192015-07-27 13:29:52 +0100672 tegra_smmu_set_pte(as, iova, pte, pte_dma,
Russell King8482ee52015-07-27 13:29:10 +0100673 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
Thierry Reding89184652014-04-16 09:24:44 +0200674
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200675 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200676}
677
Thierry Reding89184652014-04-16 09:24:44 +0200678static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
679 size_t size)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200680{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100681 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell Kinge3c97192015-07-27 13:29:52 +0100682 dma_addr_t pte_dma;
Thierry Reding89184652014-04-16 09:24:44 +0200683 u32 *pte;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200684
Russell Kinge3c97192015-07-27 13:29:52 +0100685 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
Russell Kingb98e34f2015-07-27 13:29:05 +0100686 if (!pte || !*pte)
Thierry Reding89184652014-04-16 09:24:44 +0200687 return 0;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300688
Russell Kinge3c97192015-07-27 13:29:52 +0100689 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
Russell Kingb98e34f2015-07-27 13:29:05 +0100690 tegra_smmu_pte_put_use(as, iova);
691
Thierry Reding89184652014-04-16 09:24:44 +0200692 return size;
693}
694
695static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
696 dma_addr_t iova)
697{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100698 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200699 unsigned long pfn;
Russell Kinge3c97192015-07-27 13:29:52 +0100700 dma_addr_t pte_dma;
Thierry Reding89184652014-04-16 09:24:44 +0200701 u32 *pte;
702
Russell Kinge3c97192015-07-27 13:29:52 +0100703 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
Russell King91137852015-07-27 13:29:00 +0100704 if (!pte || !*pte)
705 return 0;
706
Thierry Reding804cb542015-03-27 11:07:27 +0100707 pfn = *pte & as->smmu->pfn_mask;
Thierry Reding89184652014-04-16 09:24:44 +0200708
709 return PFN_PHYS(pfn);
710}
711
712static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
713{
714 struct platform_device *pdev;
715 struct tegra_mc *mc;
716
717 pdev = of_find_device_by_node(np);
718 if (!pdev)
719 return NULL;
720
721 mc = platform_get_drvdata(pdev);
722 if (!mc)
723 return NULL;
724
725 return mc->smmu;
726}
727
Thierry Reding7f4c9172017-10-12 16:19:16 +0200728static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
729 struct of_phandle_args *args)
730{
731 const struct iommu_ops *ops = smmu->iommu.ops;
732 int err;
733
734 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
735 if (err < 0) {
736 dev_err(dev, "failed to initialize fwspec: %d\n", err);
737 return err;
738 }
739
740 err = ops->of_xlate(dev, args);
741 if (err < 0) {
742 dev_err(dev, "failed to parse SW group ID: %d\n", err);
743 iommu_fwspec_free(dev);
744 return err;
745 }
746
747 return 0;
748}
749
Thierry Reding89184652014-04-16 09:24:44 +0200750static int tegra_smmu_add_device(struct device *dev)
751{
752 struct device_node *np = dev->of_node;
Thierry Reding7f4c9172017-10-12 16:19:16 +0200753 struct tegra_smmu *smmu = NULL;
Robin Murphyd92e1f82017-07-21 13:12:36 +0100754 struct iommu_group *group;
Thierry Reding89184652014-04-16 09:24:44 +0200755 struct of_phandle_args args;
756 unsigned int index = 0;
Thierry Reding7f4c9172017-10-12 16:19:16 +0200757 int err;
Thierry Reding89184652014-04-16 09:24:44 +0200758
759 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
760 &args) == 0) {
Thierry Reding89184652014-04-16 09:24:44 +0200761 smmu = tegra_smmu_find(args.np);
762 if (smmu) {
Thierry Reding7f4c9172017-10-12 16:19:16 +0200763 err = tegra_smmu_configure(smmu, dev, &args);
764 of_node_put(args.np);
765
766 if (err < 0)
767 return err;
768
Thierry Reding89184652014-04-16 09:24:44 +0200769 /*
770 * Only a single IOMMU master interface is currently
771 * supported by the Linux kernel, so abort after the
772 * first match.
773 */
774 dev->archdata.iommu = smmu;
Joerg Roedel0b480e42017-08-09 17:41:52 +0200775
776 iommu_device_link(&smmu->iommu, dev);
777
Thierry Reding89184652014-04-16 09:24:44 +0200778 break;
779 }
780
Thierry Reding7f4c9172017-10-12 16:19:16 +0200781 of_node_put(args.np);
Thierry Reding89184652014-04-16 09:24:44 +0200782 index++;
783 }
784
Thierry Reding7f4c9172017-10-12 16:19:16 +0200785 if (!smmu)
786 return -ENODEV;
787
Robin Murphyd92e1f82017-07-21 13:12:36 +0100788 group = iommu_group_get_for_dev(dev);
789 if (IS_ERR(group))
790 return PTR_ERR(group);
791
792 iommu_group_put(group);
793
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200794 return 0;
795}
796
Thierry Reding89184652014-04-16 09:24:44 +0200797static void tegra_smmu_remove_device(struct device *dev)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200798{
Joerg Roedel0b480e42017-08-09 17:41:52 +0200799 struct tegra_smmu *smmu = dev->archdata.iommu;
800
801 if (smmu)
802 iommu_device_unlink(&smmu->iommu, dev);
803
Thierry Reding89184652014-04-16 09:24:44 +0200804 dev->archdata.iommu = NULL;
Robin Murphyd92e1f82017-07-21 13:12:36 +0100805 iommu_group_remove_device(dev);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200806}
807
Thierry Reding7f4c9172017-10-12 16:19:16 +0200808static const struct tegra_smmu_group_soc *
809tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
810{
811 unsigned int i, j;
812
813 for (i = 0; i < smmu->soc->num_groups; i++)
814 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
815 if (smmu->soc->groups[i].swgroups[j] == swgroup)
816 return &smmu->soc->groups[i];
817
818 return NULL;
819}
820
821static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
822 unsigned int swgroup)
823{
824 const struct tegra_smmu_group_soc *soc;
825 struct tegra_smmu_group *group;
826
827 soc = tegra_smmu_find_group(smmu, swgroup);
828 if (!soc)
829 return NULL;
830
831 mutex_lock(&smmu->lock);
832
833 list_for_each_entry(group, &smmu->groups, list)
834 if (group->soc == soc) {
835 mutex_unlock(&smmu->lock);
836 return group->group;
837 }
838
839 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
840 if (!group) {
841 mutex_unlock(&smmu->lock);
842 return NULL;
843 }
844
845 INIT_LIST_HEAD(&group->list);
846 group->soc = soc;
847
848 group->group = iommu_group_alloc();
Wei Yongjun83476bf2017-12-20 03:06:09 +0000849 if (IS_ERR(group->group)) {
Thierry Reding7f4c9172017-10-12 16:19:16 +0200850 devm_kfree(smmu->dev, group);
851 mutex_unlock(&smmu->lock);
852 return NULL;
853 }
854
855 list_add_tail(&group->list, &smmu->groups);
856 mutex_unlock(&smmu->lock);
857
858 return group->group;
859}
860
861static struct iommu_group *tegra_smmu_device_group(struct device *dev)
862{
Joerg Roedeldb5d6a72018-11-29 14:01:00 +0100863 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Thierry Reding7f4c9172017-10-12 16:19:16 +0200864 struct tegra_smmu *smmu = dev->archdata.iommu;
865 struct iommu_group *group;
866
867 group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
868 if (!group)
869 group = generic_device_group(dev);
870
871 return group;
872}
873
874static int tegra_smmu_of_xlate(struct device *dev,
875 struct of_phandle_args *args)
876{
877 u32 id = args->args[0];
878
879 return iommu_fwspec_add_ids(dev, &id, 1);
880}
881
Thierry Reding89184652014-04-16 09:24:44 +0200882static const struct iommu_ops tegra_smmu_ops = {
883 .capable = tegra_smmu_capable,
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100884 .domain_alloc = tegra_smmu_domain_alloc,
885 .domain_free = tegra_smmu_domain_free,
Thierry Reding89184652014-04-16 09:24:44 +0200886 .attach_dev = tegra_smmu_attach_dev,
887 .detach_dev = tegra_smmu_detach_dev,
888 .add_device = tegra_smmu_add_device,
889 .remove_device = tegra_smmu_remove_device,
Thierry Reding7f4c9172017-10-12 16:19:16 +0200890 .device_group = tegra_smmu_device_group,
Thierry Reding89184652014-04-16 09:24:44 +0200891 .map = tegra_smmu_map,
892 .unmap = tegra_smmu_unmap,
Thierry Reding89184652014-04-16 09:24:44 +0200893 .iova_to_phys = tegra_smmu_iova_to_phys,
Thierry Reding7f4c9172017-10-12 16:19:16 +0200894 .of_xlate = tegra_smmu_of_xlate,
Thierry Reding89184652014-04-16 09:24:44 +0200895 .pgsize_bitmap = SZ_4K,
896};
897
898static void tegra_smmu_ahb_enable(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200899{
Thierry Reding89184652014-04-16 09:24:44 +0200900 static const struct of_device_id ahb_match[] = {
901 { .compatible = "nvidia,tegra30-ahb", },
902 { }
903 };
904 struct device_node *ahb;
905
906 ahb = of_find_matching_node(NULL, ahb_match);
907 if (ahb) {
908 tegra_ahb_enable_smmu(ahb);
909 of_node_put(ahb);
910 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200911}
912
Thierry Redingd1313e72015-01-23 09:49:25 +0100913static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
914{
915 struct tegra_smmu *smmu = s->private;
916 unsigned int i;
917 u32 value;
918
919 seq_printf(s, "swgroup enabled ASID\n");
920 seq_printf(s, "------------------------\n");
921
922 for (i = 0; i < smmu->soc->num_swgroups; i++) {
923 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
924 const char *status;
925 unsigned int asid;
926
927 value = smmu_readl(smmu, group->reg);
928
929 if (value & SMMU_ASID_ENABLE)
930 status = "yes";
931 else
932 status = "no";
933
934 asid = value & SMMU_ASID_MASK;
935
936 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
937 asid);
938 }
939
940 return 0;
941}
942
Yangtao Li062e52a2018-11-22 08:30:47 -0500943DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
Thierry Redingd1313e72015-01-23 09:49:25 +0100944
945static int tegra_smmu_clients_show(struct seq_file *s, void *data)
946{
947 struct tegra_smmu *smmu = s->private;
948 unsigned int i;
949 u32 value;
950
951 seq_printf(s, "client enabled\n");
952 seq_printf(s, "--------------------\n");
953
954 for (i = 0; i < smmu->soc->num_clients; i++) {
955 const struct tegra_mc_client *client = &smmu->soc->clients[i];
956 const char *status;
957
958 value = smmu_readl(smmu, client->smmu.reg);
959
960 if (value & BIT(client->smmu.bit))
961 status = "yes";
962 else
963 status = "no";
964
965 seq_printf(s, "%-12s %s\n", client->name, status);
966 }
967
968 return 0;
969}
970
Yangtao Li062e52a2018-11-22 08:30:47 -0500971DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
Thierry Redingd1313e72015-01-23 09:49:25 +0100972
973static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
974{
975 smmu->debugfs = debugfs_create_dir("smmu", NULL);
976 if (!smmu->debugfs)
977 return;
978
979 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
980 &tegra_smmu_swgroups_fops);
981 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
982 &tegra_smmu_clients_fops);
983}
984
985static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
986{
987 debugfs_remove_recursive(smmu->debugfs);
988}
989
Thierry Reding89184652014-04-16 09:24:44 +0200990struct tegra_smmu *tegra_smmu_probe(struct device *dev,
991 const struct tegra_smmu_soc *soc,
992 struct tegra_mc *mc)
993{
994 struct tegra_smmu *smmu;
995 size_t size;
996 u32 value;
997 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200998
Thierry Reding89184652014-04-16 09:24:44 +0200999 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1000 if (!smmu)
1001 return ERR_PTR(-ENOMEM);
1002
1003 /*
1004 * This is a bit of a hack. Ideally we'd want to simply return this
1005 * value. However the IOMMU registration process will attempt to add
1006 * all devices to the IOMMU when bus_set_iommu() is called. In order
1007 * not to rely on global variables to track the IOMMU instance, we
1008 * set it here so that it can be looked up from the .add_device()
1009 * callback via the IOMMU device's .drvdata field.
1010 */
1011 mc->smmu = smmu;
1012
1013 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
1014
1015 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1016 if (!smmu->asids)
1017 return ERR_PTR(-ENOMEM);
1018
Thierry Reding7f4c9172017-10-12 16:19:16 +02001019 INIT_LIST_HEAD(&smmu->groups);
Thierry Reding89184652014-04-16 09:24:44 +02001020 mutex_init(&smmu->lock);
1021
1022 smmu->regs = mc->regs;
1023 smmu->soc = soc;
1024 smmu->dev = dev;
1025 smmu->mc = mc;
1026
Thierry Reding804cb542015-03-27 11:07:27 +01001027 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
1028 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1029 mc->soc->num_address_bits, smmu->pfn_mask);
Thierry Reding11cec152015-08-06 14:20:31 +02001030 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
1031 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1032 smmu->tlb_mask);
Thierry Reding804cb542015-03-27 11:07:27 +01001033
Thierry Reding89184652014-04-16 09:24:44 +02001034 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1035
1036 if (soc->supports_request_limit)
1037 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1038
1039 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1040
1041 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
Thierry Reding11cec152015-08-06 14:20:31 +02001042 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
Thierry Reding89184652014-04-16 09:24:44 +02001043
1044 if (soc->supports_round_robin_arbitration)
1045 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1046
1047 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1048
Russell Kingb8fe0382015-07-27 13:29:41 +01001049 smmu_flush_ptc_all(smmu);
Thierry Reding89184652014-04-16 09:24:44 +02001050 smmu_flush_tlb(smmu);
1051 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1052 smmu_flush(smmu);
1053
1054 tegra_smmu_ahb_enable();
1055
Joerg Roedel0b480e42017-08-09 17:41:52 +02001056 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1057 if (err)
1058 return ERR_PTR(err);
1059
1060 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
Thierry Reding7f4c9172017-10-12 16:19:16 +02001061 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
Joerg Roedel0b480e42017-08-09 17:41:52 +02001062
1063 err = iommu_device_register(&smmu->iommu);
1064 if (err) {
1065 iommu_device_sysfs_remove(&smmu->iommu);
1066 return ERR_PTR(err);
1067 }
1068
Joerg Roedel96302d82017-08-30 15:06:43 +02001069 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1070 if (err < 0) {
1071 iommu_device_unregister(&smmu->iommu);
1072 iommu_device_sysfs_remove(&smmu->iommu);
1073 return ERR_PTR(err);
1074 }
1075
Thierry Redingd1313e72015-01-23 09:49:25 +01001076 if (IS_ENABLED(CONFIG_DEBUG_FS))
1077 tegra_smmu_debugfs_init(smmu);
1078
Thierry Reding89184652014-04-16 09:24:44 +02001079 return smmu;
1080}
Thierry Redingd1313e72015-01-23 09:49:25 +01001081
1082void tegra_smmu_remove(struct tegra_smmu *smmu)
1083{
Joerg Roedel0b480e42017-08-09 17:41:52 +02001084 iommu_device_unregister(&smmu->iommu);
1085 iommu_device_sysfs_remove(&smmu->iommu);
1086
Thierry Redingd1313e72015-01-23 09:49:25 +01001087 if (IS_ENABLED(CONFIG_DEBUG_FS))
1088 tegra_smmu_debugfs_exit(smmu);
1089}