blob: 2c92b8c3514ee447635b8540bd3863419536f9ff [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
2 * IOMMU API for SMMU in Tegra30
3 *
4 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/mm.h>
28#include <linux/pagemap.h>
29#include <linux/device.h>
30#include <linux/sched.h>
31#include <linux/iommu.h>
32#include <linux/io.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030033#include <linux/of.h>
34#include <linux/of_iommu.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020035
36#include <asm/page.h>
37#include <asm/cacheflush.h>
38
39#include <mach/iomap.h>
40#include <mach/smmu.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030041#include <mach/tegra-ahb.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020042
43/* bitmap of the page sizes currently supported */
44#define SMMU_IOMMU_PGSIZES (SZ_4K)
45
46#define SMMU_CONFIG 0x10
47#define SMMU_CONFIG_DISABLE 0
48#define SMMU_CONFIG_ENABLE 1
49
50#define SMMU_TLB_CONFIG 0x14
51#define SMMU_TLB_CONFIG_STATS__MASK (1 << 31)
52#define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31)
53#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
54#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
55#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
56
57#define SMMU_PTC_CONFIG 0x18
58#define SMMU_PTC_CONFIG_STATS__MASK (1 << 31)
59#define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31)
60#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
61#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
62#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
63
64#define SMMU_PTB_ASID 0x1c
65#define SMMU_PTB_ASID_CURRENT_SHIFT 0
66
67#define SMMU_PTB_DATA 0x20
68#define SMMU_PTB_DATA_RESET_VAL 0
69#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
70#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
71#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
72
73#define SMMU_TLB_FLUSH 0x30
74#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
75#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
76#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
77#define SMMU_TLB_FLUSH_ASID_SHIFT 29
78#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
79#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
80#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
81
82#define SMMU_PTC_FLUSH 0x34
83#define SMMU_PTC_FLUSH_TYPE_ALL 0
84#define SMMU_PTC_FLUSH_TYPE_ADR 1
85#define SMMU_PTC_FLUSH_ADR_SHIFT 4
86
87#define SMMU_ASID_SECURITY 0x38
88
89#define SMMU_STATS_TLB_HIT_COUNT 0x1f0
90#define SMMU_STATS_TLB_MISS_COUNT 0x1f4
91#define SMMU_STATS_PTC_HIT_COUNT 0x1f8
92#define SMMU_STATS_PTC_MISS_COUNT 0x1fc
93
94#define SMMU_TRANSLATION_ENABLE_0 0x228
95#define SMMU_TRANSLATION_ENABLE_1 0x22c
96#define SMMU_TRANSLATION_ENABLE_2 0x230
97
98#define SMMU_AFI_ASID 0x238 /* PCIE */
99#define SMMU_AVPC_ASID 0x23c /* AVP */
100#define SMMU_DC_ASID 0x240 /* Display controller */
101#define SMMU_DCB_ASID 0x244 /* Display controller B */
102#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
103#define SMMU_G2_ASID 0x24c /* 2D engine */
104#define SMMU_HC_ASID 0x250 /* Host1x */
105#define SMMU_HDA_ASID 0x254 /* High-def audio */
106#define SMMU_ISP_ASID 0x258 /* Image signal processor */
107#define SMMU_MPE_ASID 0x264 /* MPEG encoder */
108#define SMMU_NV_ASID 0x268 /* (3D) */
109#define SMMU_NV2_ASID 0x26c /* (3D) */
110#define SMMU_PPCS_ASID 0x270 /* AHB */
111#define SMMU_SATA_ASID 0x278 /* SATA */
112#define SMMU_VDE_ASID 0x27c /* Video decoder */
113#define SMMU_VI_ASID 0x280 /* Video input */
114
115#define SMMU_PDE_NEXT_SHIFT 28
116
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200117#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
118#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
119#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
120#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
121#define SMMU_TLB_FLUSH_VA(iova, which) \
122 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
123 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
124 SMMU_TLB_FLUSH_VA_MATCH_##which)
125#define SMMU_PTB_ASID_CUR(n) \
126 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
127#define SMMU_TLB_FLUSH_ASID_MATCH_disable \
128 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
129 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
130#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
131 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
132 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
133
134#define SMMU_PAGE_SHIFT 12
135#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300136#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200137
138#define SMMU_PDIR_COUNT 1024
139#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
140#define SMMU_PTBL_COUNT 1024
141#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
142#define SMMU_PDIR_SHIFT 12
143#define SMMU_PDE_SHIFT 12
144#define SMMU_PTE_SHIFT 12
145#define SMMU_PFN_MASK 0x000fffff
146
147#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
148#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
149#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
150
151#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
152#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
153#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
154#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
155#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
156
157#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
158
159#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
160#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
161#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
162
163#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
164#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
165
166#define SMMU_MK_PDIR(page, attr) \
167 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
168#define SMMU_MK_PDE(page, attr) \
169 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
170#define SMMU_EX_PTBL_PAGE(pde) \
171 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
172#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
173
174#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
175#define SMMU_ASID_DISABLE 0
176#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
177
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300178#define NUM_SMMU_REG_BANKS 3
179
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200180#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
181#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
182#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
183#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
184
185#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
186
187static const u32 smmu_hwgrp_asid_reg[] = {
188 HWGRP_INIT(AFI),
189 HWGRP_INIT(AVPC),
190 HWGRP_INIT(DC),
191 HWGRP_INIT(DCB),
192 HWGRP_INIT(EPP),
193 HWGRP_INIT(G2),
194 HWGRP_INIT(HC),
195 HWGRP_INIT(HDA),
196 HWGRP_INIT(ISP),
197 HWGRP_INIT(MPE),
198 HWGRP_INIT(NV),
199 HWGRP_INIT(NV2),
200 HWGRP_INIT(PPCS),
201 HWGRP_INIT(SATA),
202 HWGRP_INIT(VDE),
203 HWGRP_INIT(VI),
204};
205#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
206
207/*
208 * Per client for address space
209 */
210struct smmu_client {
211 struct device *dev;
212 struct list_head list;
213 struct smmu_as *as;
214 u32 hwgrp;
215};
216
217/*
218 * Per address space
219 */
220struct smmu_as {
221 struct smmu_device *smmu; /* back pointer to container */
222 unsigned int asid;
223 spinlock_t lock; /* for pagetable */
224 struct page *pdir_page;
225 unsigned long pdir_attr;
226 unsigned long pde_attr;
227 unsigned long pte_attr;
228 unsigned int *pte_count;
229
230 struct list_head client;
231 spinlock_t client_lock; /* for client list */
232};
233
234/*
235 * Per SMMU device - IOMMU device
236 */
237struct smmu_device {
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300238 void __iomem *regs[NUM_SMMU_REG_BANKS];
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200239 unsigned long iovmm_base; /* remappable base address */
240 unsigned long page_count; /* total remappable size */
241 spinlock_t lock;
242 char *name;
243 struct device *dev;
244 int num_as;
245 struct smmu_as *as; /* Run-time allocated array */
246 struct page *avp_vector_page; /* dummy page shared by all AS's */
247
248 /*
249 * Register image savers for suspend/resume
250 */
251 unsigned long translation_enable_0;
252 unsigned long translation_enable_1;
253 unsigned long translation_enable_2;
254 unsigned long asid_security;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300255
256 struct device_node *ahb;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200257};
258
259static struct smmu_device *smmu_handle; /* unique for a system */
260
261/*
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300262 * SMMU register accessors
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200263 */
264static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
265{
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300266 BUG_ON(offs < 0x10);
267 if (offs < 0x3c)
268 return readl(smmu->regs[0] + offs - 0x10);
269 BUG_ON(offs < 0x1f0);
270 if (offs < 0x200)
271 return readl(smmu->regs[1] + offs - 0x1f0);
272 BUG_ON(offs < 0x228);
273 if (offs < 0x284)
274 return readl(smmu->regs[2] + offs - 0x228);
275 BUG();
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200276}
277
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300278static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200279{
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300280 BUG_ON(offs < 0x10);
281 if (offs < 0x3c) {
282 writel(val, smmu->regs[0] + offs - 0x10);
283 return;
284 }
285 BUG_ON(offs < 0x1f0);
286 if (offs < 0x200) {
287 writel(val, smmu->regs[1] + offs - 0x1f0);
288 return;
289 }
290 BUG_ON(offs < 0x228);
291 if (offs < 0x284) {
292 writel(val, smmu->regs[2] + offs - 0x228);
293 return;
294 }
295 BUG();
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200296}
297
298#define VA_PAGE_TO_PA(va, page) \
299 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
300
301#define FLUSH_CPU_DCACHE(va, page, size) \
302 do { \
303 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \
304 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
305 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
306 } while (0)
307
308/*
309 * Any interaction between any block on PPSB and a block on APB or AHB
310 * must have these read-back barriers to ensure the APB/AHB bus
311 * transaction is complete before initiating activity on the PPSB
312 * block.
313 */
314#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
315
316#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data)
317
318static int __smmu_client_set_hwgrp(struct smmu_client *c,
319 unsigned long map, int on)
320{
321 int i;
322 struct smmu_as *as = c->as;
323 u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
324 struct smmu_device *smmu = as->smmu;
325
326 WARN_ON(!on && map);
327 if (on && !map)
328 return -EINVAL;
329 if (!on)
330 map = smmu_client_hwgrp(c);
331
332 for_each_set_bit(i, &map, HWGRP_COUNT) {
333 offs = HWGRP_ASID_REG(i);
334 val = smmu_read(smmu, offs);
335 if (on) {
336 if (WARN_ON(val & mask))
337 goto err_hw_busy;
338 val |= mask;
339 } else {
340 WARN_ON((val & mask) == mask);
341 val &= ~mask;
342 }
343 smmu_write(smmu, val, offs);
344 }
345 FLUSH_SMMU_REGS(smmu);
346 c->hwgrp = map;
347 return 0;
348
349err_hw_busy:
350 for_each_set_bit(i, &map, HWGRP_COUNT) {
351 offs = HWGRP_ASID_REG(i);
352 val = smmu_read(smmu, offs);
353 val &= ~mask;
354 smmu_write(smmu, val, offs);
355 }
356 return -EBUSY;
357}
358
359static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on)
360{
361 u32 val;
362 unsigned long flags;
363 struct smmu_as *as = c->as;
364 struct smmu_device *smmu = as->smmu;
365
366 spin_lock_irqsave(&smmu->lock, flags);
367 val = __smmu_client_set_hwgrp(c, map, on);
368 spin_unlock_irqrestore(&smmu->lock, flags);
369 return val;
370}
371
372/*
373 * Flush all TLB entries and all PTC entries
374 * Caller must lock smmu
375 */
376static void smmu_flush_regs(struct smmu_device *smmu, int enable)
377{
378 u32 val;
379
380 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
381 FLUSH_SMMU_REGS(smmu);
382 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
383 SMMU_TLB_FLUSH_ASID_MATCH_disable;
384 smmu_write(smmu, val, SMMU_TLB_FLUSH);
385
386 if (enable)
387 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
388 FLUSH_SMMU_REGS(smmu);
389}
390
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300391static int smmu_setup_regs(struct smmu_device *smmu)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200392{
393 int i;
394 u32 val;
395
396 for (i = 0; i < smmu->num_as; i++) {
397 struct smmu_as *as = &smmu->as[i];
398 struct smmu_client *c;
399
400 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
401 val = as->pdir_page ?
402 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
403 SMMU_PTB_DATA_RESET_VAL;
404 smmu_write(smmu, val, SMMU_PTB_DATA);
405
406 list_for_each_entry(c, &as->client, list)
407 __smmu_client_set_hwgrp(c, c->hwgrp, 1);
408 }
409
410 smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
411 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
412 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
413 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
414 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG);
415 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG);
416
417 smmu_flush_regs(smmu, 1);
418
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300419 return tegra_ahb_enable_smmu(smmu->ahb);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200420}
421
422static void flush_ptc_and_tlb(struct smmu_device *smmu,
423 struct smmu_as *as, dma_addr_t iova,
424 unsigned long *pte, struct page *page, int is_pde)
425{
426 u32 val;
427 unsigned long tlb_flush_va = is_pde
428 ? SMMU_TLB_FLUSH_VA(iova, SECTION)
429 : SMMU_TLB_FLUSH_VA(iova, GROUP);
430
431 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
432 smmu_write(smmu, val, SMMU_PTC_FLUSH);
433 FLUSH_SMMU_REGS(smmu);
434 val = tlb_flush_va |
435 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
436 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
437 smmu_write(smmu, val, SMMU_TLB_FLUSH);
438 FLUSH_SMMU_REGS(smmu);
439}
440
441static void free_ptbl(struct smmu_as *as, dma_addr_t iova)
442{
443 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
444 unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
445
446 if (pdir[pdn] != _PDE_VACANT(pdn)) {
447 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
448
449 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
450 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
451 pdir[pdn] = _PDE_VACANT(pdn);
452 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
453 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
454 as->pdir_page, 1);
455 }
456}
457
458static void free_pdir(struct smmu_as *as)
459{
460 unsigned addr;
461 int count;
462 struct device *dev = as->smmu->dev;
463
464 if (!as->pdir_page)
465 return;
466
467 addr = as->smmu->iovmm_base;
468 count = as->smmu->page_count;
469 while (count-- > 0) {
470 free_ptbl(as, addr);
471 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
472 }
473 ClearPageReserved(as->pdir_page);
474 __free_page(as->pdir_page);
475 as->pdir_page = NULL;
476 devm_kfree(dev, as->pte_count);
477 as->pte_count = NULL;
478}
479
480/*
481 * Maps PTBL for given iova and returns the PTE address
482 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
483 */
484static unsigned long *locate_pte(struct smmu_as *as,
485 dma_addr_t iova, bool allocate,
486 struct page **ptbl_page_p,
487 unsigned int **count)
488{
489 unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
490 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
491 unsigned long *pdir = page_address(as->pdir_page);
492 unsigned long *ptbl;
493
494 if (pdir[pdn] != _PDE_VACANT(pdn)) {
495 /* Mapped entry table already exists */
496 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
497 ptbl = page_address(*ptbl_page_p);
498 } else if (!allocate) {
499 return NULL;
500 } else {
501 int pn;
502 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
503
504 /* Vacant - allocate a new page table */
505 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
506
507 *ptbl_page_p = alloc_page(GFP_ATOMIC);
508 if (!*ptbl_page_p) {
509 dev_err(as->smmu->dev,
510 "failed to allocate smmu_device page table\n");
511 return NULL;
512 }
513 SetPageReserved(*ptbl_page_p);
514 ptbl = (unsigned long *)page_address(*ptbl_page_p);
515 for (pn = 0; pn < SMMU_PTBL_COUNT;
516 pn++, addr += SMMU_PAGE_SIZE) {
517 ptbl[pn] = _PTE_VACANT(addr);
518 }
519 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
520 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
521 as->pde_attr | _PDE_NEXT);
522 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
523 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
524 as->pdir_page, 1);
525 }
526 *count = &as->pte_count[pdn];
527
528 return &ptbl[ptn % SMMU_PTBL_COUNT];
529}
530
531#ifdef CONFIG_SMMU_SIG_DEBUG
532static void put_signature(struct smmu_as *as,
533 dma_addr_t iova, unsigned long pfn)
534{
535 struct page *page;
536 unsigned long *vaddr;
537
538 page = pfn_to_page(pfn);
539 vaddr = page_address(page);
540 if (!vaddr)
541 return;
542
543 vaddr[0] = iova;
544 vaddr[1] = pfn << PAGE_SHIFT;
545 FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
546}
547#else
548static inline void put_signature(struct smmu_as *as,
549 unsigned long addr, unsigned long pfn)
550{
551}
552#endif
553
554/*
555 * Caller must lock/unlock as
556 */
557static int alloc_pdir(struct smmu_as *as)
558{
559 unsigned long *pdir;
560 int pdn;
561 u32 val;
562 struct smmu_device *smmu = as->smmu;
563
564 if (as->pdir_page)
565 return 0;
566
567 as->pte_count = devm_kzalloc(smmu->dev,
568 sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
569 if (!as->pte_count) {
570 dev_err(smmu->dev,
571 "failed to allocate smmu_device PTE cunters\n");
572 return -ENOMEM;
573 }
574 as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
575 if (!as->pdir_page) {
576 dev_err(smmu->dev,
577 "failed to allocate smmu_device page directory\n");
578 devm_kfree(smmu->dev, as->pte_count);
579 as->pte_count = NULL;
580 return -ENOMEM;
581 }
582 SetPageReserved(as->pdir_page);
583 pdir = page_address(as->pdir_page);
584
585 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
586 pdir[pdn] = _PDE_VACANT(pdn);
587 FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
588 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
589 smmu_write(smmu, val, SMMU_PTC_FLUSH);
590 FLUSH_SMMU_REGS(as->smmu);
591 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
592 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
593 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
594 smmu_write(smmu, val, SMMU_TLB_FLUSH);
595 FLUSH_SMMU_REGS(as->smmu);
596
597 return 0;
598}
599
600static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
601{
602 unsigned long *pte;
603 struct page *page;
604 unsigned int *count;
605
606 pte = locate_pte(as, iova, false, &page, &count);
607 if (WARN_ON(!pte))
608 return;
609
610 if (WARN_ON(*pte == _PTE_VACANT(iova)))
611 return;
612
613 *pte = _PTE_VACANT(iova);
614 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
615 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
616 if (!--(*count)) {
617 free_ptbl(as, iova);
618 smmu_flush_regs(as->smmu, 0);
619 }
620}
621
622static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
623 unsigned long pfn)
624{
625 struct smmu_device *smmu = as->smmu;
626 unsigned long *pte;
627 unsigned int *count;
628 struct page *page;
629
630 pte = locate_pte(as, iova, true, &page, &count);
631 if (WARN_ON(!pte))
632 return;
633
634 if (*pte == _PTE_VACANT(iova))
635 (*count)++;
636 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
637 if (unlikely((*pte == _PTE_VACANT(iova))))
638 (*count)--;
639 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
640 flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
641 put_signature(as, iova, pfn);
642}
643
644static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
645 phys_addr_t pa, size_t bytes, int prot)
646{
647 struct smmu_as *as = domain->priv;
648 unsigned long pfn = __phys_to_pfn(pa);
649 unsigned long flags;
650
651 dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa);
652
653 if (!pfn_valid(pfn))
654 return -ENOMEM;
655
656 spin_lock_irqsave(&as->lock, flags);
657 __smmu_iommu_map_pfn(as, iova, pfn);
658 spin_unlock_irqrestore(&as->lock, flags);
659 return 0;
660}
661
662static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
663 size_t bytes)
664{
665 struct smmu_as *as = domain->priv;
666 unsigned long flags;
667
668 dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
669
670 spin_lock_irqsave(&as->lock, flags);
671 __smmu_iommu_unmap(as, iova);
672 spin_unlock_irqrestore(&as->lock, flags);
673 return SMMU_PAGE_SIZE;
674}
675
676static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
677 unsigned long iova)
678{
679 struct smmu_as *as = domain->priv;
680 unsigned long *pte;
681 unsigned int *count;
682 struct page *page;
683 unsigned long pfn;
684 unsigned long flags;
685
686 spin_lock_irqsave(&as->lock, flags);
687
688 pte = locate_pte(as, iova, true, &page, &count);
689 pfn = *pte & SMMU_PFN_MASK;
690 WARN_ON(!pfn_valid(pfn));
691 dev_dbg(as->smmu->dev,
692 "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid);
693
694 spin_unlock_irqrestore(&as->lock, flags);
695 return PFN_PHYS(pfn);
696}
697
698static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
699 unsigned long cap)
700{
701 return 0;
702}
703
704static int smmu_iommu_attach_dev(struct iommu_domain *domain,
705 struct device *dev)
706{
707 struct smmu_as *as = domain->priv;
708 struct smmu_device *smmu = as->smmu;
709 struct smmu_client *client, *c;
710 u32 map;
711 int err;
712
713 client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
714 if (!client)
715 return -ENOMEM;
716 client->dev = dev;
717 client->as = as;
718 map = (unsigned long)dev->platform_data;
719 if (!map)
720 return -EINVAL;
721
722 err = smmu_client_enable_hwgrp(client, map);
723 if (err)
724 goto err_hwgrp;
725
726 spin_lock(&as->client_lock);
727 list_for_each_entry(c, &as->client, list) {
728 if (c->dev == dev) {
729 dev_err(smmu->dev,
730 "%s is already attached\n", dev_name(c->dev));
731 err = -EINVAL;
732 goto err_client;
733 }
734 }
735 list_add(&client->list, &as->client);
736 spin_unlock(&as->client_lock);
737
738 /*
739 * Reserve "page zero" for AVP vectors using a common dummy
740 * page.
741 */
742 if (map & HWG_AVPC) {
743 struct page *page;
744
745 page = as->smmu->avp_vector_page;
746 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page));
747
748 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
749 }
750
Hiroshi DOYU90730912012-04-13 12:18:30 +0200751 dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200752 return 0;
753
754err_client:
755 smmu_client_disable_hwgrp(client);
756 spin_unlock(&as->client_lock);
757err_hwgrp:
758 devm_kfree(smmu->dev, client);
759 return err;
760}
761
762static void smmu_iommu_detach_dev(struct iommu_domain *domain,
763 struct device *dev)
764{
765 struct smmu_as *as = domain->priv;
766 struct smmu_device *smmu = as->smmu;
767 struct smmu_client *c;
768
769 spin_lock(&as->client_lock);
770
771 list_for_each_entry(c, &as->client, list) {
772 if (c->dev == dev) {
773 smmu_client_disable_hwgrp(c);
774 list_del(&c->list);
775 devm_kfree(smmu->dev, c);
776 c->as = NULL;
777 dev_dbg(smmu->dev,
778 "%s is detached\n", dev_name(c->dev));
779 goto out;
780 }
781 }
782 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
783out:
784 spin_unlock(&as->client_lock);
785}
786
787static int smmu_iommu_domain_init(struct iommu_domain *domain)
788{
789 int i;
790 unsigned long flags;
791 struct smmu_as *as;
792 struct smmu_device *smmu = smmu_handle;
793
794 /* Look for a free AS with lock held */
795 for (i = 0; i < smmu->num_as; i++) {
796 struct smmu_as *tmp = &smmu->as[i];
797
798 spin_lock_irqsave(&tmp->lock, flags);
799 if (!tmp->pdir_page) {
800 as = tmp;
801 goto found;
802 }
803 spin_unlock_irqrestore(&tmp->lock, flags);
804 }
805 dev_err(smmu->dev, "no free AS\n");
806 return -ENODEV;
807
808found:
809 if (alloc_pdir(as) < 0)
810 goto err_alloc_pdir;
811
812 spin_lock(&smmu->lock);
813
814 /* Update PDIR register */
815 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
816 smmu_write(smmu,
817 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
818 FLUSH_SMMU_REGS(smmu);
819
820 spin_unlock(&smmu->lock);
821
822 spin_unlock_irqrestore(&as->lock, flags);
823 domain->priv = as;
824
825 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
826 return 0;
827
828err_alloc_pdir:
829 spin_unlock_irqrestore(&as->lock, flags);
830 return -ENODEV;
831}
832
833static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
834{
835 struct smmu_as *as = domain->priv;
836 struct smmu_device *smmu = as->smmu;
837 unsigned long flags;
838
839 spin_lock_irqsave(&as->lock, flags);
840
841 if (as->pdir_page) {
842 spin_lock(&smmu->lock);
843 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
844 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
845 FLUSH_SMMU_REGS(smmu);
846 spin_unlock(&smmu->lock);
847
848 free_pdir(as);
849 }
850
851 if (!list_empty(&as->client)) {
852 struct smmu_client *c;
853
854 list_for_each_entry(c, &as->client, list)
855 smmu_iommu_detach_dev(domain, c->dev);
856 }
857
858 spin_unlock_irqrestore(&as->lock, flags);
859
860 domain->priv = NULL;
861 dev_dbg(smmu->dev, "smmu_as@%p\n", as);
862}
863
864static struct iommu_ops smmu_iommu_ops = {
865 .domain_init = smmu_iommu_domain_init,
866 .domain_destroy = smmu_iommu_domain_destroy,
867 .attach_dev = smmu_iommu_attach_dev,
868 .detach_dev = smmu_iommu_detach_dev,
869 .map = smmu_iommu_map,
870 .unmap = smmu_iommu_unmap,
871 .iova_to_phys = smmu_iommu_iova_to_phys,
872 .domain_has_cap = smmu_iommu_domain_has_cap,
873 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
874};
875
876static int tegra_smmu_suspend(struct device *dev)
877{
878 struct smmu_device *smmu = dev_get_drvdata(dev);
879
880 smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0);
881 smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1);
882 smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2);
883 smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY);
884 return 0;
885}
886
887static int tegra_smmu_resume(struct device *dev)
888{
889 struct smmu_device *smmu = dev_get_drvdata(dev);
890 unsigned long flags;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300891 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200892
893 spin_lock_irqsave(&smmu->lock, flags);
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300894 err = smmu_setup_regs(smmu);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200895 spin_unlock_irqrestore(&smmu->lock, flags);
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300896 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200897}
898
899static int tegra_smmu_probe(struct platform_device *pdev)
900{
901 struct smmu_device *smmu;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200902 struct device *dev = &pdev->dev;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300903 int i, asids, err = 0;
904 dma_addr_t base;
905 size_t size;
906 const void *prop;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200907
908 if (smmu_handle)
909 return -EIO;
910
911 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
912
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200913 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
914 if (!smmu) {
915 dev_err(dev, "failed to allocate smmu_device\n");
916 return -ENOMEM;
917 }
918
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300919 for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
920 struct resource *res;
921
922 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
923 if (!res)
924 return -ENODEV;
925 smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
926 if (!smmu->regs[i])
927 return -EBUSY;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200928 }
929
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300930 err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
931 if (err)
932 return -ENODEV;
933
934 if (size & SMMU_PAGE_MASK)
935 return -EINVAL;
936
937 size >>= SMMU_PAGE_SHIFT;
938 if (!size)
939 return -EINVAL;
940
941 prop = of_get_property(dev->of_node, "nvidia,#asids", NULL);
942 if (!prop)
943 return -ENODEV;
944 asids = be32_to_cpup(prop);
945 if (!asids)
946 return -ENODEV;
947
948 smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
949 if (!smmu->ahb)
950 return -ENODEV;
951
952 smmu->dev = dev;
953 smmu->num_as = asids;
954 smmu->iovmm_base = base;
955 smmu->page_count = size;
956
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200957 smmu->translation_enable_0 = ~0;
958 smmu->translation_enable_1 = ~0;
959 smmu->translation_enable_2 = ~0;
960 smmu->asid_security = 0;
961
962 smmu->as = devm_kzalloc(dev,
963 sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL);
964 if (!smmu->as) {
965 dev_err(dev, "failed to allocate smmu_as\n");
966 err = -ENOMEM;
967 goto fail;
968 }
969
970 for (i = 0; i < smmu->num_as; i++) {
971 struct smmu_as *as = &smmu->as[i];
972
973 as->smmu = smmu;
974 as->asid = i;
975 as->pdir_attr = _PDIR_ATTR;
976 as->pde_attr = _PDE_ATTR;
977 as->pte_attr = _PTE_ATTR;
978
979 spin_lock_init(&as->lock);
980 INIT_LIST_HEAD(&as->client);
981 }
982 spin_lock_init(&smmu->lock);
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300983 err = smmu_setup_regs(smmu);
984 if (err)
985 goto fail;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200986 platform_set_drvdata(pdev, smmu);
987
988 smmu->avp_vector_page = alloc_page(GFP_KERNEL);
989 if (!smmu->avp_vector_page)
990 goto fail;
991
992 smmu_handle = smmu;
993 return 0;
994
995fail:
996 if (smmu->avp_vector_page)
997 __free_page(smmu->avp_vector_page);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200998 if (smmu && smmu->as) {
999 for (i = 0; i < smmu->num_as; i++) {
1000 if (smmu->as[i].pdir_page) {
1001 ClearPageReserved(smmu->as[i].pdir_page);
1002 __free_page(smmu->as[i].pdir_page);
1003 }
1004 }
1005 devm_kfree(dev, smmu->as);
1006 }
1007 devm_kfree(dev, smmu);
1008 return err;
1009}
1010
1011static int tegra_smmu_remove(struct platform_device *pdev)
1012{
1013 struct smmu_device *smmu = platform_get_drvdata(pdev);
1014 struct device *dev = smmu->dev;
1015
1016 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
1017 platform_set_drvdata(pdev, NULL);
1018 if (smmu->as) {
1019 int i;
1020
1021 for (i = 0; i < smmu->num_as; i++)
1022 free_pdir(&smmu->as[i]);
1023 devm_kfree(dev, smmu->as);
1024 }
1025 if (smmu->avp_vector_page)
1026 __free_page(smmu->avp_vector_page);
1027 if (smmu->regs)
1028 devm_iounmap(dev, smmu->regs);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001029 devm_kfree(dev, smmu);
1030 smmu_handle = NULL;
1031 return 0;
1032}
1033
1034const struct dev_pm_ops tegra_smmu_pm_ops = {
1035 .suspend = tegra_smmu_suspend,
1036 .resume = tegra_smmu_resume,
1037};
1038
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001039#ifdef CONFIG_OF
1040static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
1041 { .compatible = "nvidia,tegra30-smmu", },
1042 { },
1043};
1044MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1045#endif
1046
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001047static struct platform_driver tegra_smmu_driver = {
1048 .probe = tegra_smmu_probe,
1049 .remove = tegra_smmu_remove,
1050 .driver = {
1051 .owner = THIS_MODULE,
1052 .name = "tegra-smmu",
1053 .pm = &tegra_smmu_pm_ops,
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001054 .of_match_table = of_match_ptr(tegra_smmu_of_match),
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001055 },
1056};
1057
1058static int __devinit tegra_smmu_init(void)
1059{
1060 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1061 return platform_driver_register(&tegra_smmu_driver);
1062}
1063
1064static void __exit tegra_smmu_exit(void)
1065{
1066 platform_driver_unregister(&tegra_smmu_driver);
1067}
1068
1069subsys_initcall(tegra_smmu_init);
1070module_exit(tegra_smmu_exit);
1071
1072MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
1073MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +03001074MODULE_ALIAS("platform:tegra-smmu");
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001075MODULE_LICENSE("GPL v2");