blob: f08dbcd2f17585875e863a7ec24c032d8ee40413 [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
2 * IOMMU API for SMMU in Tegra30
3 *
4 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
Thierry Redingbc5e6de2013-01-21 11:09:06 +010022#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020023#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/device.h>
31#include <linux/sched.h>
32#include <linux/iommu.h>
33#include <linux/io.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030034#include <linux/of.h>
35#include <linux/of_iommu.h>
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030036#include <linux/debugfs.h>
37#include <linux/seq_file.h>
Stephen Warrencc95e342012-09-19 15:51:01 -060038#include <linux/tegra-ahb.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020039
40#include <asm/page.h>
41#include <asm/cacheflush.h>
42
Stephen Warrene6bc5932012-09-04 16:36:15 -060043enum smmu_hwgrp {
44 HWGRP_AFI,
45 HWGRP_AVPC,
46 HWGRP_DC,
47 HWGRP_DCB,
48 HWGRP_EPP,
49 HWGRP_G2,
50 HWGRP_HC,
51 HWGRP_HDA,
52 HWGRP_ISP,
53 HWGRP_MPE,
54 HWGRP_NV,
55 HWGRP_NV2,
56 HWGRP_PPCS,
57 HWGRP_SATA,
58 HWGRP_VDE,
59 HWGRP_VI,
60
61 HWGRP_COUNT,
62
63 HWGRP_END = ~0,
64};
65
66#define HWG_AFI (1 << HWGRP_AFI)
67#define HWG_AVPC (1 << HWGRP_AVPC)
68#define HWG_DC (1 << HWGRP_DC)
69#define HWG_DCB (1 << HWGRP_DCB)
70#define HWG_EPP (1 << HWGRP_EPP)
71#define HWG_G2 (1 << HWGRP_G2)
72#define HWG_HC (1 << HWGRP_HC)
73#define HWG_HDA (1 << HWGRP_HDA)
74#define HWG_ISP (1 << HWGRP_ISP)
75#define HWG_MPE (1 << HWGRP_MPE)
76#define HWG_NV (1 << HWGRP_NV)
77#define HWG_NV2 (1 << HWGRP_NV2)
78#define HWG_PPCS (1 << HWGRP_PPCS)
79#define HWG_SATA (1 << HWGRP_SATA)
80#define HWG_VDE (1 << HWGRP_VDE)
81#define HWG_VI (1 << HWGRP_VI)
82
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020083/* bitmap of the page sizes currently supported */
84#define SMMU_IOMMU_PGSIZES (SZ_4K)
85
86#define SMMU_CONFIG 0x10
87#define SMMU_CONFIG_DISABLE 0
88#define SMMU_CONFIG_ENABLE 1
89
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030090/* REVISIT: To support multiple MCs */
91enum {
92 _MC = 0,
93};
94
95enum {
96 _TLB = 0,
97 _PTC,
98};
99
100#define SMMU_CACHE_CONFIG_BASE 0x14
101#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
102#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
103
104#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
105#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
106#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
107#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
108
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200109#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
110#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
111#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
112
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200113#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
114#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
115#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
116
117#define SMMU_PTB_ASID 0x1c
118#define SMMU_PTB_ASID_CURRENT_SHIFT 0
119
120#define SMMU_PTB_DATA 0x20
121#define SMMU_PTB_DATA_RESET_VAL 0
122#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
123#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
124#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
125
126#define SMMU_TLB_FLUSH 0x30
127#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
128#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
129#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
130#define SMMU_TLB_FLUSH_ASID_SHIFT 29
131#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
132#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
133#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
134
135#define SMMU_PTC_FLUSH 0x34
136#define SMMU_PTC_FLUSH_TYPE_ALL 0
137#define SMMU_PTC_FLUSH_TYPE_ADR 1
138#define SMMU_PTC_FLUSH_ADR_SHIFT 4
139
140#define SMMU_ASID_SECURITY 0x38
141
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300142#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
143
144#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
145 (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200146
147#define SMMU_TRANSLATION_ENABLE_0 0x228
148#define SMMU_TRANSLATION_ENABLE_1 0x22c
149#define SMMU_TRANSLATION_ENABLE_2 0x230
150
151#define SMMU_AFI_ASID 0x238 /* PCIE */
152#define SMMU_AVPC_ASID 0x23c /* AVP */
153#define SMMU_DC_ASID 0x240 /* Display controller */
154#define SMMU_DCB_ASID 0x244 /* Display controller B */
155#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
156#define SMMU_G2_ASID 0x24c /* 2D engine */
157#define SMMU_HC_ASID 0x250 /* Host1x */
158#define SMMU_HDA_ASID 0x254 /* High-def audio */
159#define SMMU_ISP_ASID 0x258 /* Image signal processor */
160#define SMMU_MPE_ASID 0x264 /* MPEG encoder */
161#define SMMU_NV_ASID 0x268 /* (3D) */
162#define SMMU_NV2_ASID 0x26c /* (3D) */
163#define SMMU_PPCS_ASID 0x270 /* AHB */
164#define SMMU_SATA_ASID 0x278 /* SATA */
165#define SMMU_VDE_ASID 0x27c /* Video decoder */
166#define SMMU_VI_ASID 0x280 /* Video input */
167
168#define SMMU_PDE_NEXT_SHIFT 28
169
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200170#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
171#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
172#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
173#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
174#define SMMU_TLB_FLUSH_VA(iova, which) \
175 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
176 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
177 SMMU_TLB_FLUSH_VA_MATCH_##which)
178#define SMMU_PTB_ASID_CUR(n) \
179 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
180#define SMMU_TLB_FLUSH_ASID_MATCH_disable \
181 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
182 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
183#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
184 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
185 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
186
187#define SMMU_PAGE_SHIFT 12
188#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300189#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200190
191#define SMMU_PDIR_COUNT 1024
192#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
193#define SMMU_PTBL_COUNT 1024
194#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
195#define SMMU_PDIR_SHIFT 12
196#define SMMU_PDE_SHIFT 12
197#define SMMU_PTE_SHIFT 12
198#define SMMU_PFN_MASK 0x000fffff
199
200#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
201#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
Hiro Sugawarad0078e72012-10-18 08:35:10 +0300202#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200203
204#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
205#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
206#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
207#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
208#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
209
210#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
211
212#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
213#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
214#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
215
216#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
217#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
218
219#define SMMU_MK_PDIR(page, attr) \
220 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
221#define SMMU_MK_PDE(page, attr) \
222 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
223#define SMMU_EX_PTBL_PAGE(pde) \
224 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
225#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
226
227#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
228#define SMMU_ASID_DISABLE 0
229#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
230
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300231#define NUM_SMMU_REG_BANKS 3
232
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200233#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
234#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
235#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
236#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
237
238#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
239
240static const u32 smmu_hwgrp_asid_reg[] = {
241 HWGRP_INIT(AFI),
242 HWGRP_INIT(AVPC),
243 HWGRP_INIT(DC),
244 HWGRP_INIT(DCB),
245 HWGRP_INIT(EPP),
246 HWGRP_INIT(G2),
247 HWGRP_INIT(HC),
248 HWGRP_INIT(HDA),
249 HWGRP_INIT(ISP),
250 HWGRP_INIT(MPE),
251 HWGRP_INIT(NV),
252 HWGRP_INIT(NV2),
253 HWGRP_INIT(PPCS),
254 HWGRP_INIT(SATA),
255 HWGRP_INIT(VDE),
256 HWGRP_INIT(VI),
257};
258#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
259
260/*
261 * Per client for address space
262 */
263struct smmu_client {
264 struct device *dev;
265 struct list_head list;
266 struct smmu_as *as;
267 u32 hwgrp;
268};
269
270/*
271 * Per address space
272 */
273struct smmu_as {
274 struct smmu_device *smmu; /* back pointer to container */
275 unsigned int asid;
276 spinlock_t lock; /* for pagetable */
277 struct page *pdir_page;
278 unsigned long pdir_attr;
279 unsigned long pde_attr;
280 unsigned long pte_attr;
281 unsigned int *pte_count;
282
283 struct list_head client;
284 spinlock_t client_lock; /* for client list */
285};
286
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -0600287struct smmu_debugfs_info {
288 struct smmu_device *smmu;
289 int mc;
290 int cache;
291};
292
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200293/*
294 * Per SMMU device - IOMMU device
295 */
296struct smmu_device {
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300297 void __iomem *regs[NUM_SMMU_REG_BANKS];
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200298 unsigned long iovmm_base; /* remappable base address */
299 unsigned long page_count; /* total remappable size */
300 spinlock_t lock;
301 char *name;
302 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200303 struct page *avp_vector_page; /* dummy page shared by all AS's */
304
305 /*
306 * Register image savers for suspend/resume
307 */
308 unsigned long translation_enable_0;
309 unsigned long translation_enable_1;
310 unsigned long translation_enable_2;
311 unsigned long asid_security;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300312
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300313 struct dentry *debugfs_root;
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -0600314 struct smmu_debugfs_info *debugfs_info;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300315
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300316 struct device_node *ahb;
Hiroshi Doyua3b24912012-06-25 14:23:56 +0300317
318 int num_as;
319 struct smmu_as as[0]; /* Run-time allocated array */
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200320};
321
322static struct smmu_device *smmu_handle; /* unique for a system */
323
324/*
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300325 * SMMU register accessors
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200326 */
327static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
328{
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300329 BUG_ON(offs < 0x10);
330 if (offs < 0x3c)
331 return readl(smmu->regs[0] + offs - 0x10);
332 BUG_ON(offs < 0x1f0);
333 if (offs < 0x200)
334 return readl(smmu->regs[1] + offs - 0x1f0);
335 BUG_ON(offs < 0x228);
336 if (offs < 0x284)
337 return readl(smmu->regs[2] + offs - 0x228);
338 BUG();
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200339}
340
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300341static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200342{
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300343 BUG_ON(offs < 0x10);
344 if (offs < 0x3c) {
345 writel(val, smmu->regs[0] + offs - 0x10);
346 return;
347 }
348 BUG_ON(offs < 0x1f0);
349 if (offs < 0x200) {
350 writel(val, smmu->regs[1] + offs - 0x1f0);
351 return;
352 }
353 BUG_ON(offs < 0x228);
354 if (offs < 0x284) {
355 writel(val, smmu->regs[2] + offs - 0x228);
356 return;
357 }
358 BUG();
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200359}
360
361#define VA_PAGE_TO_PA(va, page) \
362 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
363
364#define FLUSH_CPU_DCACHE(va, page, size) \
365 do { \
366 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
367 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
368 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
369 } while (0)
370
371/*
372 * Any interaction between any block on PPSB and a block on APB or AHB
373 * must have these read-back barriers to ensure the APB/AHB bus
374 * transaction is complete before initiating activity on the PPSB
375 * block.
376 */
377#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
378
379#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
380
381static int __smmu_client_set_hwgrp(struct smmu_client *c,
382 unsigned long map, int on)
383{
384 int i;
385 struct smmu_as *as = c->as;
386 u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
387 struct smmu_device *smmu = as->smmu;
388
389 WARN_ON(!on && map);
390 if (on && !map)
391 return -EINVAL;
392 if (!on)
393 map = smmu_client_hwgrp(c);
394
395 for_each_set_bit(i, &map, HWGRP_COUNT) {
396 offs = HWGRP_ASID_REG(i);
397 val = smmu_read(smmu, offs);
398 if (on) {
399 if (WARN_ON(val & mask))
400 goto err_hw_busy;
401 val |= mask;
402 } else {
403 WARN_ON((val & mask) == mask);
404 val &= ~mask;
405 }
406 smmu_write(smmu, val, offs);
407 }
408 FLUSH_SMMU_REGS(smmu);
409 c->hwgrp = map;
410 return 0;
411
412err_hw_busy:
413 for_each_set_bit(i, &map, HWGRP_COUNT) {
414 offs = HWGRP_ASID_REG(i);
415 val = smmu_read(smmu, offs);
416 val &= ~mask;
417 smmu_write(smmu, val, offs);
418 }
419 return -EBUSY;
420}
421
422static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on)
423{
424 u32 val;
425 unsigned long flags;
426 struct smmu_as *as = c->as;
427 struct smmu_device *smmu = as->smmu;
428
429 spin_lock_irqsave(&smmu->lock, flags);
430 val = __smmu_client_set_hwgrp(c, map, on);
431 spin_unlock_irqrestore(&smmu->lock, flags);
432 return val;
433}
434
435/*
436 * Flush all TLB entries and all PTC entries
437 * Caller must lock smmu
438 */
439static void smmu_flush_regs(struct smmu_device *smmu, int enable)
440{
441 u32 val;
442
443 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
444 FLUSH_SMMU_REGS(smmu);
445 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
446 SMMU_TLB_FLUSH_ASID_MATCH_disable;
447 smmu_write(smmu, val, SMMU_TLB_FLUSH);
448
449 if (enable)
450 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
451 FLUSH_SMMU_REGS(smmu);
452}
453
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300454static int smmu_setup_regs(struct smmu_device *smmu)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200455{
456 int i;
457 u32 val;
458
459 for (i = 0; i < smmu->num_as; i++) {
460 struct smmu_as *as = &smmu->as[i];
461 struct smmu_client *c;
462
463 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
464 val = as->pdir_page ?
465 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
466 SMMU_PTB_DATA_RESET_VAL;
467 smmu_write(smmu, val, SMMU_PTB_DATA);
468
469 list_for_each_entry(c, &as->client, list)
470 __smmu_client_set_hwgrp(c, c->hwgrp, 1);
471 }
472
473 smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
474 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
475 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
476 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300477 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
478 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200479
480 smmu_flush_regs(smmu, 1);
481
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300482 return tegra_ahb_enable_smmu(smmu->ahb);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200483}
484
485static void flush_ptc_and_tlb(struct smmu_device *smmu,
486 struct smmu_as *as, dma_addr_t iova,
487 unsigned long *pte, struct page *page, int is_pde)
488{
489 u32 val;
490 unsigned long tlb_flush_va = is_pde
491 ? SMMU_TLB_FLUSH_VA(iova, SECTION)
492 : SMMU_TLB_FLUSH_VA(iova, GROUP);
493
494 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
495 smmu_write(smmu, val, SMMU_PTC_FLUSH);
496 FLUSH_SMMU_REGS(smmu);
497 val = tlb_flush_va |
498 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
499 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
500 smmu_write(smmu, val, SMMU_TLB_FLUSH);
501 FLUSH_SMMU_REGS(smmu);
502}
503
504static void free_ptbl(struct smmu_as *as, dma_addr_t iova)
505{
506 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
507 unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
508
509 if (pdir[pdn] != _PDE_VACANT(pdn)) {
510 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
511
512 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
513 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
514 pdir[pdn] = _PDE_VACANT(pdn);
515 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
516 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
517 as->pdir_page, 1);
518 }
519}
520
521static void free_pdir(struct smmu_as *as)
522{
523 unsigned addr;
524 int count;
525 struct device *dev = as->smmu->dev;
526
527 if (!as->pdir_page)
528 return;
529
530 addr = as->smmu->iovmm_base;
531 count = as->smmu->page_count;
532 while (count-- > 0) {
533 free_ptbl(as, addr);
534 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
535 }
536 ClearPageReserved(as->pdir_page);
537 __free_page(as->pdir_page);
538 as->pdir_page = NULL;
539 devm_kfree(dev, as->pte_count);
540 as->pte_count = NULL;
541}
542
543/*
544 * Maps PTBL for given iova and returns the PTE address
545 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
546 */
547static unsigned long *locate_pte(struct smmu_as *as,
548 dma_addr_t iova, bool allocate,
549 struct page **ptbl_page_p,
550 unsigned int **count)
551{
552 unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
553 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
554 unsigned long *pdir = page_address(as->pdir_page);
555 unsigned long *ptbl;
556
557 if (pdir[pdn] != _PDE_VACANT(pdn)) {
558 /* Mapped entry table already exists */
559 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
560 ptbl = page_address(*ptbl_page_p);
561 } else if (!allocate) {
562 return NULL;
563 } else {
564 int pn;
565 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
566
567 /* Vacant - allocate a new page table */
568 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
569
570 *ptbl_page_p = alloc_page(GFP_ATOMIC);
571 if (!*ptbl_page_p) {
572 dev_err(as->smmu->dev,
573 "failed to allocate smmu_device page table\n");
574 return NULL;
575 }
576 SetPageReserved(*ptbl_page_p);
577 ptbl = (unsigned long *)page_address(*ptbl_page_p);
578 for (pn = 0; pn < SMMU_PTBL_COUNT;
579 pn++, addr += SMMU_PAGE_SIZE) {
580 ptbl[pn] = _PTE_VACANT(addr);
581 }
582 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
583 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
584 as->pde_attr | _PDE_NEXT);
585 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
586 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
587 as->pdir_page, 1);
588 }
589 *count = &as->pte_count[pdn];
590
591 return &ptbl[ptn % SMMU_PTBL_COUNT];
592}
593
594#ifdef CONFIG_SMMU_SIG_DEBUG
595static void put_signature(struct smmu_as *as,
596 dma_addr_t iova, unsigned long pfn)
597{
598 struct page *page;
599 unsigned long *vaddr;
600
601 page = pfn_to_page(pfn);
602 vaddr = page_address(page);
603 if (!vaddr)
604 return;
605
606 vaddr[0] = iova;
607 vaddr[1] = pfn << PAGE_SHIFT;
608 FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
609}
610#else
611static inline void put_signature(struct smmu_as *as,
612 unsigned long addr, unsigned long pfn)
613{
614}
615#endif
616
617/*
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200618 * Caller must not hold as->lock
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200619 */
620static int alloc_pdir(struct smmu_as *as)
621{
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200622 unsigned long *pdir, flags;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300623 int pdn, err = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200624 u32 val;
625 struct smmu_device *smmu = as->smmu;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300626 struct page *page;
627 unsigned int *cnt;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200628
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300629 /*
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200630 * do the allocation, then grab as->lock
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300631 */
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300632 cnt = devm_kzalloc(smmu->dev,
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200633 sizeof(cnt[0]) * SMMU_PDIR_COUNT,
634 GFP_KERNEL);
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300635 page = alloc_page(GFP_KERNEL | __GFP_DMA);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200636
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200637 spin_lock_irqsave(&as->lock, flags);
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300638
639 if (as->pdir_page) {
640 /* We raced, free the redundant */
641 err = -EAGAIN;
642 goto err_out;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200643 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300644
645 if (!page || !cnt) {
646 dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
647 err = -ENOMEM;
648 goto err_out;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200649 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300650
651 as->pdir_page = page;
652 as->pte_count = cnt;
653
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200654 SetPageReserved(as->pdir_page);
655 pdir = page_address(as->pdir_page);
656
657 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
658 pdir[pdn] = _PDE_VACANT(pdn);
659 FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
660 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
661 smmu_write(smmu, val, SMMU_PTC_FLUSH);
662 FLUSH_SMMU_REGS(as->smmu);
663 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
664 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
665 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
666 smmu_write(smmu, val, SMMU_TLB_FLUSH);
667 FLUSH_SMMU_REGS(as->smmu);
668
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200669 spin_unlock_irqrestore(&as->lock, flags);
670
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200671 return 0;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300672
673err_out:
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200674 spin_unlock_irqrestore(&as->lock, flags);
675
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300676 devm_kfree(smmu->dev, cnt);
677 if (page)
678 __free_page(page);
679 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200680}
681
682static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
683{
684 unsigned long *pte;
685 struct page *page;
686 unsigned int *count;
687
688 pte = locate_pte(as, iova, false, &page, &count);
689 if (WARN_ON(!pte))
690 return;
691
692 if (WARN_ON(*pte == _PTE_VACANT(iova)))
693 return;
694
695 *pte = _PTE_VACANT(iova);
696 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
697 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
Hiroshi Doyu37683e42012-11-28 15:52:53 +0200698 if (!--(*count))
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200699 free_ptbl(as, iova);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200700}
701
702static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
703 unsigned long pfn)
704{
705 struct smmu_device *smmu = as->smmu;
706 unsigned long *pte;
707 unsigned int *count;
708 struct page *page;
709
710 pte = locate_pte(as, iova, true, &page, &count);
711 if (WARN_ON(!pte))
712 return;
713
714 if (*pte == _PTE_VACANT(iova))
715 (*count)++;
716 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
717 if (unlikely((*pte == _PTE_VACANT(iova))))
718 (*count)--;
719 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
720 flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
721 put_signature(as, iova, pfn);
722}
723
724static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
725 phys_addr_t pa, size_t bytes, int prot)
726{
727 struct smmu_as *as = domain->priv;
728 unsigned long pfn = __phys_to_pfn(pa);
729 unsigned long flags;
730
731 dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
732
733 if (!pfn_valid(pfn))
734 return -ENOMEM;
735
736 spin_lock_irqsave(&as->lock, flags);
737 __smmu_iommu_map_pfn(as, iova, pfn);
738 spin_unlock_irqrestore(&as->lock, flags);
739 return 0;
740}
741
742static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
743 size_t bytes)
744{
745 struct smmu_as *as = domain->priv;
746 unsigned long flags;
747
748 dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
749
750 spin_lock_irqsave(&as->lock, flags);
751 __smmu_iommu_unmap(as, iova);
752 spin_unlock_irqrestore(&as->lock, flags);
753 return SMMU_PAGE_SIZE;
754}
755
756static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
757 unsigned long iova)
758{
759 struct smmu_as *as = domain->priv;
760 unsigned long *pte;
761 unsigned int *count;
762 struct page *page;
763 unsigned long pfn;
764 unsigned long flags;
765
766 spin_lock_irqsave(&as->lock, flags);
767
768 pte = locate_pte(as, iova, true, &page, &count);
769 pfn = *pte & SMMU_PFN_MASK;
770 WARN_ON(!pfn_valid(pfn));
771 dev_dbg(as->smmu->dev,
772 "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
773
774 spin_unlock_irqrestore(&as->lock, flags);
775 return PFN_PHYS(pfn);
776}
777
778static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
779 unsigned long cap)
780{
781 return 0;
782}
783
784static int smmu_iommu_attach_dev(struct iommu_domain *domain,
785 struct device *dev)
786{
787 struct smmu_as *as = domain->priv;
788 struct smmu_device *smmu = as->smmu;
789 struct smmu_client *client, *c;
790 u32 map;
791 int err;
792
793 client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
794 if (!client)
795 return -ENOMEM;
796 client->dev = dev;
797 client->as = as;
798 map = (unsigned long)dev->platform_data;
799 if (!map)
800 return -EINVAL;
801
802 err = smmu_client_enable_hwgrp(client, map);
803 if (err)
804 goto err_hwgrp;
805
806 spin_lock(&as->client_lock);
807 list_for_each_entry(c, &as->client, list) {
808 if (c->dev == dev) {
809 dev_err(smmu->dev,
810 "%s is already attached\n", dev_name(c->dev));
811 err = -EINVAL;
812 goto err_client;
813 }
814 }
815 list_add(&client->list, &as->client);
816 spin_unlock(&as->client_lock);
817
818 /*
819 * Reserve "page zero" for AVP vectors using a common dummy
820 * page.
821 */
822 if (map & HWG_AVPC) {
823 struct page *page;
824
825 page = as->smmu->avp_vector_page;
826 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page));
827
828 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
829 }
830
Hiroshi DOYU90730912012-04-13 12:18:30 +0200831 dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200832 return 0;
833
834err_client:
835 smmu_client_disable_hwgrp(client);
836 spin_unlock(&as->client_lock);
837err_hwgrp:
838 devm_kfree(smmu->dev, client);
839 return err;
840}
841
842static void smmu_iommu_detach_dev(struct iommu_domain *domain,
843 struct device *dev)
844{
845 struct smmu_as *as = domain->priv;
846 struct smmu_device *smmu = as->smmu;
847 struct smmu_client *c;
848
849 spin_lock(&as->client_lock);
850
851 list_for_each_entry(c, &as->client, list) {
852 if (c->dev == dev) {
853 smmu_client_disable_hwgrp(c);
854 list_del(&c->list);
855 devm_kfree(smmu->dev, c);
856 c->as = NULL;
857 dev_dbg(smmu->dev,
858 "%s is detached\n", dev_name(c->dev));
859 goto out;
860 }
861 }
Julia Lawall9579a972012-07-08 13:37:42 +0200862 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200863out:
864 spin_unlock(&as->client_lock);
865}
866
867static int smmu_iommu_domain_init(struct iommu_domain *domain)
868{
Hiroshi Doyud1d076f2012-07-30 08:39:19 +0300869 int i, err = -EAGAIN;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200870 unsigned long flags;
871 struct smmu_as *as;
872 struct smmu_device *smmu = smmu_handle;
873
874 /* Look for a free AS with lock held */
875 for (i = 0; i < smmu->num_as; i++) {
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300876 as = &smmu->as[i];
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300877
878 if (as->pdir_page)
879 continue;
880
881 err = alloc_pdir(as);
882 if (!err)
883 goto found;
884
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300885 if (err != -EAGAIN)
886 break;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200887 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300888 if (i == smmu->num_as)
889 dev_err(smmu->dev, "no free AS\n");
890 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200891
892found:
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200893 spin_lock_irqsave(&smmu->lock, flags);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200894
895 /* Update PDIR register */
896 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
897 smmu_write(smmu,
898 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
899 FLUSH_SMMU_REGS(smmu);
900
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200901 spin_unlock_irqrestore(&smmu->lock, flags);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200902
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200903 domain->priv = as;
904
Hiroshi DOYU233499022012-01-26 19:40:57 +0100905 domain->geometry.aperture_start = smmu->iovmm_base;
906 domain->geometry.aperture_end = smmu->iovmm_base +
907 smmu->page_count * SMMU_PAGE_SIZE - 1;
908 domain->geometry.force_aperture = true;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200909
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200910 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
Joerg Roedelf9a4f062012-07-17 11:47:14 +0200911
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200912 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200913}
914
915static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
916{
917 struct smmu_as *as = domain->priv;
918 struct smmu_device *smmu = as->smmu;
919 unsigned long flags;
920
921 spin_lock_irqsave(&as->lock, flags);
922
923 if (as->pdir_page) {
924 spin_lock(&smmu->lock);
925 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
926 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
927 FLUSH_SMMU_REGS(smmu);
928 spin_unlock(&smmu->lock);
929
930 free_pdir(as);
931 }
932
933 if (!list_empty(&as->client)) {
934 struct smmu_client *c;
935
936 list_for_each_entry(c, &as->client, list)
937 smmu_iommu_detach_dev(domain, c->dev);
938 }
939
940 spin_unlock_irqrestore(&as->lock, flags);
941
942 domain->priv = NULL;
943 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
944}
945
946static struct iommu_ops smmu_iommu_ops = {
947 .domain_init = smmu_iommu_domain_init,
948 .domain_destroy = smmu_iommu_domain_destroy,
949 .attach_dev = smmu_iommu_attach_dev,
950 .detach_dev = smmu_iommu_detach_dev,
951 .map = smmu_iommu_map,
952 .unmap = smmu_iommu_unmap,
953 .iova_to_phys = smmu_iommu_iova_to_phys,
954 .domain_has_cap = smmu_iommu_domain_has_cap,
955 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
956};
957
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300958/* Should be in the order of enum */
959static const char * const smmu_debugfs_mc[] = { "mc", };
960static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
961
962static ssize_t smmu_debugfs_stats_write(struct file *file,
963 const char __user *buffer,
964 size_t count, loff_t *pos)
965{
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -0600966 struct smmu_debugfs_info *info;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300967 struct smmu_device *smmu;
968 struct dentry *dent;
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -0600969 int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300970 enum {
971 _OFF = 0,
972 _ON,
973 _RESET,
974 };
975 const char * const command[] = {
976 [_OFF] = "off",
977 [_ON] = "on",
978 [_RESET] = "reset",
979 };
980 char str[] = "reset";
981 u32 val;
982 size_t offs;
983
984 count = min_t(size_t, count, sizeof(str));
985 if (copy_from_user(str, buffer, count))
986 return -EINVAL;
987
988 for (i = 0; i < ARRAY_SIZE(command); i++)
989 if (strncmp(str, command[i],
990 strlen(command[i])) == 0)
991 break;
992
993 if (i == ARRAY_SIZE(command))
994 return -EINVAL;
995
996 dent = file->f_dentry;
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -0600997 info = dent->d_inode->i_private;
998 smmu = info->smmu;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300999
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001000 offs = SMMU_CACHE_CONFIG(info->cache);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001001 val = smmu_read(smmu, offs);
1002 switch (i) {
1003 case _OFF:
1004 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
1005 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1006 smmu_write(smmu, val, offs);
1007 break;
1008 case _ON:
1009 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
1010 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1011 smmu_write(smmu, val, offs);
1012 break;
1013 case _RESET:
1014 val |= SMMU_CACHE_CONFIG_STATS_TEST;
1015 smmu_write(smmu, val, offs);
1016 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1017 smmu_write(smmu, val, offs);
1018 break;
1019 default:
1020 BUG();
1021 break;
1022 }
1023
1024 dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
1025 val, smmu_read(smmu, offs), offs);
1026
1027 return count;
1028}
1029
1030static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
1031{
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001032 struct smmu_debugfs_info *info;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001033 struct smmu_device *smmu;
1034 struct dentry *dent;
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001035 int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001036 const char * const stats[] = { "hit", "miss", };
1037
1038 dent = d_find_alias(s->private);
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001039 info = dent->d_inode->i_private;
1040 smmu = info->smmu;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001041
1042 for (i = 0; i < ARRAY_SIZE(stats); i++) {
1043 u32 val;
1044 size_t offs;
1045
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001046 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001047 val = smmu_read(smmu, offs);
1048 seq_printf(s, "%s:%08x ", stats[i], val);
1049
1050 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1051 stats[i], val, offs);
1052 }
1053 seq_printf(s, "\n");
Cyril Roelandtb334b642012-11-11 21:49:30 +01001054 dput(dent);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001055
1056 return 0;
1057}
1058
1059static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
1060{
1061 return single_open(file, smmu_debugfs_stats_show, inode);
1062}
1063
1064static const struct file_operations smmu_debugfs_stats_fops = {
1065 .open = smmu_debugfs_stats_open,
1066 .read = seq_read,
1067 .llseek = seq_lseek,
1068 .release = single_release,
1069 .write = smmu_debugfs_stats_write,
1070};
1071
1072static void smmu_debugfs_delete(struct smmu_device *smmu)
1073{
1074 debugfs_remove_recursive(smmu->debugfs_root);
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001075 kfree(smmu->debugfs_info);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001076}
1077
1078static void smmu_debugfs_create(struct smmu_device *smmu)
1079{
1080 int i;
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001081 size_t bytes;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001082 struct dentry *root;
1083
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001084 bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1085 sizeof(*smmu->debugfs_info);
1086 smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1087 if (!smmu->debugfs_info)
1088 return;
1089
1090 root = debugfs_create_dir(dev_name(smmu->dev), NULL);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001091 if (!root)
1092 goto err_out;
1093 smmu->debugfs_root = root;
1094
1095 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1096 int j;
1097 struct dentry *mc;
1098
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001099 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001100 if (!mc)
1101 goto err_out;
1102
1103 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1104 struct dentry *cache;
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001105 struct smmu_debugfs_info *info;
1106
1107 info = smmu->debugfs_info;
1108 info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1109 info->smmu = smmu;
1110 info->mc = i;
1111 info->cache = j;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001112
1113 cache = debugfs_create_file(smmu_debugfs_cache[j],
1114 S_IWUGO | S_IRUGO, mc,
Hiroshi Doyu5a2c9372012-09-14 10:22:00 -06001115 (void *)info,
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001116 &smmu_debugfs_stats_fops);
1117 if (!cache)
1118 goto err_out;
1119 }
1120 }
1121
1122 return;
1123
1124err_out:
1125 smmu_debugfs_delete(smmu);
1126}
1127
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001128static int tegra_smmu_suspend(struct device *dev)
1129{
1130 struct smmu_device *smmu = dev_get_drvdata(dev);
1131
1132 smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0);
1133 smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1);
1134 smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2);
1135 smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY);
1136 return 0;
1137}
1138
1139static int tegra_smmu_resume(struct device *dev)
1140{
1141 struct smmu_device *smmu = dev_get_drvdata(dev);
1142 unsigned long flags;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001143 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001144
1145 spin_lock_irqsave(&smmu->lock, flags);
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001146 err = smmu_setup_regs(smmu);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001147 spin_unlock_irqrestore(&smmu->lock, flags);
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001148 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001149}
1150
1151static int tegra_smmu_probe(struct platform_device *pdev)
1152{
1153 struct smmu_device *smmu;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001154 struct device *dev = &pdev->dev;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001155 int i, asids, err = 0;
Hiroshi Doyuff763622012-06-25 14:23:58 +03001156 dma_addr_t uninitialized_var(base);
1157 size_t bytes, uninitialized_var(size);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001158
1159 if (smmu_handle)
1160 return -EIO;
1161
1162 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
1163
Hiroshi Doyua3b24912012-06-25 14:23:56 +03001164 if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001165 return -ENODEV;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001166
Hiroshi Doyua3b24912012-06-25 14:23:56 +03001167 bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
1168 smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001169 if (!smmu) {
1170 dev_err(dev, "failed to allocate smmu_device\n");
1171 return -ENOMEM;
1172 }
1173
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001174 for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
1175 struct resource *res;
1176
1177 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1178 if (!res)
1179 return -ENODEV;
Thierry Redingbc5e6de2013-01-21 11:09:06 +01001180 smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
1181 if (IS_ERR(smmu->regs[i]))
1182 return PTR_ERR(smmu->regs[i]);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001183 }
1184
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001185 err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
1186 if (err)
1187 return -ENODEV;
1188
1189 if (size & SMMU_PAGE_MASK)
1190 return -EINVAL;
1191
1192 size >>= SMMU_PAGE_SHIFT;
1193 if (!size)
1194 return -EINVAL;
1195
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001196 smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
1197 if (!smmu->ahb)
1198 return -ENODEV;
1199
1200 smmu->dev = dev;
1201 smmu->num_as = asids;
1202 smmu->iovmm_base = base;
1203 smmu->page_count = size;
1204
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001205 smmu->translation_enable_0 = ~0;
1206 smmu->translation_enable_1 = ~0;
1207 smmu->translation_enable_2 = ~0;
1208 smmu->asid_security = 0;
1209
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001210 for (i = 0; i < smmu->num_as; i++) {
1211 struct smmu_as *as = &smmu->as[i];
1212
1213 as->smmu = smmu;
1214 as->asid = i;
1215 as->pdir_attr = _PDIR_ATTR;
1216 as->pde_attr = _PDE_ATTR;
1217 as->pte_attr = _PTE_ATTR;
1218
1219 spin_lock_init(&as->lock);
1220 INIT_LIST_HEAD(&as->client);
1221 }
1222 spin_lock_init(&smmu->lock);
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001223 err = smmu_setup_regs(smmu);
1224 if (err)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +03001225 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001226 platform_set_drvdata(pdev, smmu);
1227
1228 smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1229 if (!smmu->avp_vector_page)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +03001230 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001231
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001232 smmu_debugfs_create(smmu);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001233 smmu_handle = smmu;
Hiroshi Doyuf1bda292012-11-28 15:52:55 +02001234 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001235 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001236}
1237
1238static int tegra_smmu_remove(struct platform_device *pdev)
1239{
1240 struct smmu_device *smmu = platform_get_drvdata(pdev);
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +03001241 int i;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001242
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +03001243 smmu_debugfs_delete(smmu);
1244
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001245 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +03001246 for (i = 0; i < smmu->num_as; i++)
1247 free_pdir(&smmu->as[i]);
1248 __free_page(smmu->avp_vector_page);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001249 smmu_handle = NULL;
1250 return 0;
1251}
1252
1253const struct dev_pm_ops tegra_smmu_pm_ops = {
1254 .suspend = tegra_smmu_suspend,
1255 .resume = tegra_smmu_resume,
1256};
1257
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001258#ifdef CONFIG_OF
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08001259static struct of_device_id tegra_smmu_of_match[] = {
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001260 { .compatible = "nvidia,tegra30-smmu", },
1261 { },
1262};
1263MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1264#endif
1265
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001266static struct platform_driver tegra_smmu_driver = {
1267 .probe = tegra_smmu_probe,
1268 .remove = tegra_smmu_remove,
1269 .driver = {
1270 .owner = THIS_MODULE,
1271 .name = "tegra-smmu",
1272 .pm = &tegra_smmu_pm_ops,
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001273 .of_match_table = of_match_ptr(tegra_smmu_of_match),
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001274 },
1275};
1276
Greg Kroah-Hartmand34d6512012-12-21 15:05:21 -08001277static int tegra_smmu_init(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001278{
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001279 return platform_driver_register(&tegra_smmu_driver);
1280}
1281
1282static void __exit tegra_smmu_exit(void)
1283{
1284 platform_driver_unregister(&tegra_smmu_driver);
1285}
1286
1287subsys_initcall(tegra_smmu_init);
1288module_exit(tegra_smmu_exit);
1289
1290MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1291MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001292MODULE_ALIAS("platform:tegra-smmu");
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001293MODULE_LICENSE("GPL v2");