blob: ee3b9d93187ce5959b95d709e41048e0218cf823 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: pgtsrmmu.h,v 1.31 2000/07/16 21:48:52 anton Exp $
2 * pgtsrmmu.h: SRMMU page table defines and code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC_PGTSRMMU_H
8#define _SPARC_PGTSRMMU_H
9
10#include <asm/page.h>
11
12#ifdef __ASSEMBLY__
13#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
14#endif
15
16/* Number of contexts is implementation-dependent; 64k is the most we support */
17#define SRMMU_MAX_CONTEXTS 65536
18
19/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
20#define SRMMU_REAL_PMD_SHIFT 18
21#define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT)
22#define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1))
23#define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK)
24
25/* PGDIR_SHIFT determines what a third-level page table entry can map */
26#define SRMMU_PGDIR_SHIFT 24
27#define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT)
28#define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
29#define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
30
31#define SRMMU_REAL_PTRS_PER_PTE 64
32#define SRMMU_REAL_PTRS_PER_PMD 64
33#define SRMMU_PTRS_PER_PGD 256
34
35#define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4)
36#define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4)
37#define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4)
38
39/*
40 * To support pagetables in highmem, Linux introduces APIs which
41 * return struct page* and generally manipulate page tables when
42 * they are not mapped into kernel space. Our hardware page tables
43 * are smaller than pages. We lump hardware tabes into big, page sized
44 * software tables.
45 *
46 * PMD_SHIFT determines the size of the area a second-level page table entry
47 * can map, and our pmd_t is 16 times larger than normal. The values which
48 * were once defined here are now generic for 4c and srmmu, so they're
49 * found in pgtable.h.
50 */
51#define SRMMU_PTRS_PER_PMD 4
52
53/* Definition of the values in the ET field of PTD's and PTE's */
54#define SRMMU_ET_MASK 0x3
55#define SRMMU_ET_INVALID 0x0
56#define SRMMU_ET_PTD 0x1
57#define SRMMU_ET_PTE 0x2
58#define SRMMU_ET_REPTE 0x3 /* AIEEE, SuperSparc II reverse endian page! */
59
60/* Physical page extraction from PTP's and PTE's. */
61#define SRMMU_CTX_PMASK 0xfffffff0
62#define SRMMU_PTD_PMASK 0xfffffff0
63#define SRMMU_PTE_PMASK 0xffffff00
64
65/* The pte non-page bits. Some notes:
66 * 1) cache, dirty, valid, and ref are frobbable
67 * for both supervisor and user pages.
68 * 2) exec and write will only give the desired effect
69 * on user pages
70 * 3) use priv and priv_readonly for changing the
71 * characteristics of supervisor ptes
72 */
73#define SRMMU_CACHE 0x80
74#define SRMMU_DIRTY 0x40
75#define SRMMU_REF 0x20
76#define SRMMU_NOREAD 0x10
77#define SRMMU_EXEC 0x08
78#define SRMMU_WRITE 0x04
79#define SRMMU_VALID 0x02 /* SRMMU_ET_PTE */
80#define SRMMU_PRIV 0x1c
81#define SRMMU_PRIV_RDONLY 0x18
82
83#define SRMMU_FILE 0x40 /* Implemented in software */
84
85#define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */
86
87#define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
88
89/* SRMMU swap entry encoding
90 *
91 * We use 5 bits for the type and 19 for the offset. This gives us
92 * 32 swapfiles of 4GB each. Encoding looks like:
93 *
94 * oooooooooooooooooootttttRRRRRRRR
95 * fedcba9876543210fedcba9876543210
96 *
97 * The bottom 8 bits are reserved for protection and status bits, especially
98 * FILE and PRESENT.
99 */
100#define SRMMU_SWP_TYPE_MASK 0x1f
101#define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT
102#define SRMMU_SWP_OFF_MASK 0x7ffff
103#define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5)
104
105/* Some day I will implement true fine grained access bits for
106 * user pages because the SRMMU gives us the capabilities to
107 * enforce all the protection levels that vma's can have.
108 * XXX But for now...
109 */
110#define SRMMU_PAGE_NONE __pgprot(SRMMU_CACHE | \
111 SRMMU_PRIV | SRMMU_REF)
112#define SRMMU_PAGE_SHARED __pgprot(SRMMU_VALID | SRMMU_CACHE | \
113 SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
114#define SRMMU_PAGE_COPY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
115 SRMMU_EXEC | SRMMU_REF)
116#define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
117 SRMMU_EXEC | SRMMU_REF)
118#define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
119 SRMMU_DIRTY | SRMMU_REF)
120
121/* SRMMU Register addresses in ASI 0x4. These are valid for all
122 * current SRMMU implementations that exist.
123 */
124#define SRMMU_CTRL_REG 0x00000000
125#define SRMMU_CTXTBL_PTR 0x00000100
126#define SRMMU_CTX_REG 0x00000200
127#define SRMMU_FAULT_STATUS 0x00000300
128#define SRMMU_FAULT_ADDR 0x00000400
129
130#define WINDOW_FLUSH(tmp1, tmp2) \
131 mov 0, tmp1; \
13298: ld [%g6 + TI_UWINMASK], tmp2; \
133 orcc %g0, tmp2, %g0; \
134 add tmp1, 1, tmp1; \
135 bne 98b; \
136 save %sp, -64, %sp; \
13799: subcc tmp1, 1, tmp1; \
138 bne 99b; \
139 restore %g0, %g0, %g0;
140
141#ifndef __ASSEMBLY__
142
143/* This makes sense. Honest it does - Anton */
144/* XXX Yes but it's ugly as sin. FIXME. -KMW */
145extern void *srmmu_nocache_pool;
146#define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
147#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
148#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
149
150/* Accessing the MMU control register. */
151extern __inline__ unsigned int srmmu_get_mmureg(void)
152{
153 unsigned int retval;
154 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
155 "=r" (retval) :
156 "i" (ASI_M_MMUREGS));
157 return retval;
158}
159
160extern __inline__ void srmmu_set_mmureg(unsigned long regval)
161{
162 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
163 "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
164
165}
166
167extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
168{
169 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
170 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
171 "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
172 "i" (ASI_M_MMUREGS) :
173 "memory");
174}
175
176extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
177{
178 unsigned int retval;
179
180 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
181 "=r" (retval) :
182 "r" (SRMMU_CTXTBL_PTR),
183 "i" (ASI_M_MMUREGS));
184 return (retval & SRMMU_CTX_PMASK) << 4;
185}
186
187extern __inline__ void srmmu_set_context(int context)
188{
189 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
190 "r" (context), "r" (SRMMU_CTX_REG),
191 "i" (ASI_M_MMUREGS) : "memory");
192}
193
194extern __inline__ int srmmu_get_context(void)
195{
196 register int retval;
197 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
198 "=r" (retval) :
199 "r" (SRMMU_CTX_REG),
200 "i" (ASI_M_MMUREGS));
201 return retval;
202}
203
204extern __inline__ unsigned int srmmu_get_fstatus(void)
205{
206 unsigned int retval;
207
208 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
209 "=r" (retval) :
210 "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
211 return retval;
212}
213
214extern __inline__ unsigned int srmmu_get_faddr(void)
215{
216 unsigned int retval;
217
218 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
219 "=r" (retval) :
220 "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
221 return retval;
222}
223
224/* This is guaranteed on all SRMMU's. */
225extern __inline__ void srmmu_flush_whole_tlb(void)
226{
227 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
228 "r" (0x400), /* Flush entire TLB!! */
229 "i" (ASI_M_FLUSH_PROBE) : "memory");
230
231}
232
233/* These flush types are not available on all chips... */
234extern __inline__ void srmmu_flush_tlb_ctx(void)
235{
236 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
237 "r" (0x300), /* Flush TLB ctx.. */
238 "i" (ASI_M_FLUSH_PROBE) : "memory");
239
240}
241
242extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
243{
244 addr &= SRMMU_PGDIR_MASK;
245 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
246 "r" (addr | 0x200), /* Flush TLB region.. */
247 "i" (ASI_M_FLUSH_PROBE) : "memory");
248
249}
250
251
252extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
253{
254 addr &= SRMMU_REAL_PMD_MASK;
255 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
256 "r" (addr | 0x100), /* Flush TLB segment.. */
257 "i" (ASI_M_FLUSH_PROBE) : "memory");
258
259}
260
261extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
262{
263 page &= PAGE_MASK;
264 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
265 "r" (page), /* Flush TLB page.. */
266 "i" (ASI_M_FLUSH_PROBE) : "memory");
267
268}
269
270extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
271{
272 unsigned long retval;
273
274 vaddr &= PAGE_MASK;
275 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
276 "=r" (retval) :
277 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
278
279 return retval;
280}
281
282extern __inline__ int
283srmmu_get_pte (unsigned long addr)
284{
285 register unsigned long entry;
286
287 __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
288 "=r" (entry):
289 "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
290 return entry;
291}
292
293extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
294extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
295
296#endif /* !(__ASSEMBLY__) */
297
298#endif /* !(_SPARC_PGTSRMMU_H) */