blob: 4b46396ae4fb7bf044548748ad8f127663d86e29 [file] [log] [blame]
Sam Ravnborga439fe52008-07-27 23:00:59 +02001#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
Sam Ravnborgeb485d62011-04-21 15:48:39 -070011#include <linux/const.h>
12
Sam Ravnborga439fe52008-07-27 23:00:59 +020013#ifndef __ASSEMBLY__
14#include <asm-generic/4level-fixup.h>
15
16#include <linux/spinlock.h>
17#include <linux/swap.h>
18#include <asm/types.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020019#include <asm/pgtsrmmu.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020020#include <asm/oplib.h>
21#include <asm/btfixup.h>
David Howellsd550bbd2012-03-28 18:30:03 +010022#include <asm/cpu_type.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020023
24
25struct vm_area_struct;
26struct page;
27
28extern void load_mmu(void);
29extern unsigned long calc_highpages(void);
30
Sam Ravnborga439fe52008-07-27 23:00:59 +020031#define pte_ERROR(e) __builtin_trap()
32#define pmd_ERROR(e) __builtin_trap()
33#define pgd_ERROR(e) __builtin_trap()
34
Sam Ravnborg1ee0e142012-05-10 23:12:10 +020035#define PMD_SHIFT 22
Sam Ravnborga439fe52008-07-27 23:00:59 +020036#define PMD_SIZE (1UL << PMD_SHIFT)
37#define PMD_MASK (~(PMD_SIZE-1))
38#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
David S. Miller3d386c02012-05-12 12:02:02 -070039#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
40#define PGDIR_SIZE SRMMU_PGDIR_SIZE
41#define PGDIR_MASK SRMMU_PGDIR_MASK
Sam Ravnborga439fe52008-07-27 23:00:59 +020042#define PTRS_PER_PTE 1024
David S. Miller3d386c02012-05-12 12:02:02 -070043#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
44#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
45#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
Sam Ravnborga439fe52008-07-27 23:00:59 +020046#define FIRST_USER_ADDRESS 0
47#define PTE_SIZE (PTRS_PER_PTE*4)
48
David S. Miller6439d1c2012-05-12 12:52:47 -070049#define PAGE_NONE SRMMU_PAGE_NONE
50#define PAGE_SHARED SRMMU_PAGE_SHARED
51#define PAGE_COPY SRMMU_PAGE_COPY
52#define PAGE_READONLY SRMMU_PAGE_RDONLY
53#define PAGE_KERNEL SRMMU_PAGE_KERNEL
Sam Ravnborga439fe52008-07-27 23:00:59 +020054
55/* Top-level page directory */
56extern pgd_t swapper_pg_dir[1024];
57
58extern void paging_init(void);
59
Sam Ravnborga439fe52008-07-27 23:00:59 +020060extern unsigned long ptr_in_current_pgd;
61
David S. Miller6439d1c2012-05-12 12:52:47 -070062/* xwr */
63#define __P000 PAGE_NONE
64#define __P001 PAGE_READONLY
65#define __P010 PAGE_COPY
66#define __P011 PAGE_COPY
67#define __P100 PAGE_READONLY
68#define __P101 PAGE_READONLY
69#define __P110 PAGE_COPY
70#define __P111 PAGE_COPY
Sam Ravnborga439fe52008-07-27 23:00:59 +020071
David S. Miller6439d1c2012-05-12 12:52:47 -070072#define __S000 PAGE_NONE
73#define __S001 PAGE_READONLY
74#define __S010 PAGE_SHARED
75#define __S011 PAGE_SHARED
76#define __S100 PAGE_READONLY
77#define __S101 PAGE_READONLY
78#define __S110 PAGE_SHARED
79#define __S111 PAGE_SHARED
Sam Ravnborga439fe52008-07-27 23:00:59 +020080
81extern int num_contexts;
82
83/* First physical page can be anywhere, the following is needed so that
84 * va-->pa and vice versa conversions work properly without performance
85 * hit for all __pa()/__va() operations.
86 */
87extern unsigned long phys_base;
88extern unsigned long pfn_base;
89
90/*
91 * BAD_PAGETABLE is used when we need a bogus page-table, while
92 * BAD_PAGE is used for a bogus page.
93 *
94 * ZERO_PAGE is a global shared page that is always zero: used
95 * for zero-mapped memory areas etc..
96 */
97extern pte_t * __bad_pagetable(void);
98extern pte_t __bad_page(void);
99extern unsigned long empty_zero_page;
100
101#define BAD_PAGETABLE __bad_pagetable()
102#define BAD_PAGE __bad_page()
103#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
104
105/*
David S. Millera46d6052012-05-12 12:26:47 -0700106 * In general all page table modifications should use the V8 atomic
107 * swap instruction. This insures the mmu and the cpu are in sync
108 * with respect to ref/mod bits in the page tables.
109 */
110static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
111{
112 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
113 return value;
114}
115
David S. Miller62875cf2012-05-12 13:39:23 -0700116/* Certain architectures need to do special things when pte's
117 * within a page table are directly modified. Thus, the following
118 * hook is made available.
119 */
120
121static inline void set_pte(pte_t *ptep, pte_t pteval)
David S. Millera46d6052012-05-12 12:26:47 -0700122{
123 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
124}
125
David S. Miller62875cf2012-05-12 13:39:23 -0700126#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
127
David S. Miller3d827362012-05-12 12:33:08 -0700128static inline int srmmu_device_memory(unsigned long x)
129{
130 return ((x & 0xF0000000) != 0);
131}
132
133static inline struct page *pmd_page(pmd_t pmd)
134{
135 if (srmmu_device_memory(pmd_val(pmd)))
136 BUG();
137 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
138}
139
Sam Ravnborga439fe52008-07-27 23:00:59 +0200140BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
141
Sam Ravnborga439fe52008-07-27 23:00:59 +0200142#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
143
David S. Miller62875cf2012-05-12 13:39:23 -0700144static inline int pte_present(pte_t pte)
145{
146 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
147}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200148
149static inline int pte_none(pte_t pte)
150{
David S. Millerc87fe1c2010-05-25 23:36:31 -0700151 return !pte_val(pte);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200152}
153
David S. Millera46d6052012-05-12 12:26:47 -0700154static inline void __pte_clear(pte_t *ptep)
155{
David S. Miller62875cf2012-05-12 13:39:23 -0700156 set_pte(ptep, __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700157}
158
159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160{
161 __pte_clear(ptep);
162}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200163
David S. Millerf167eda2012-05-12 13:30:28 -0700164static inline int pmd_bad(pmd_t pmd)
165{
166 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
167}
168
169static inline int pmd_present(pmd_t pmd)
170{
171 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
172}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200173
174static inline int pmd_none(pmd_t pmd)
175{
David S. Millerc87fe1c2010-05-25 23:36:31 -0700176 return !pmd_val(pmd);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200177}
178
David S. Millera46d6052012-05-12 12:26:47 -0700179static inline void pmd_clear(pmd_t *pmdp)
180{
181 int i;
182 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
David S. Miller62875cf2012-05-12 13:39:23 -0700183 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700184}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200185
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700186static inline int pgd_none(pgd_t pgd)
187{
188 return !(pgd_val(pgd) & 0xFFFFFFF);
189}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200190
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700191static inline int pgd_bad(pgd_t pgd)
192{
193 return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
194}
195
196static inline int pgd_present(pgd_t pgd)
197{
198 return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
199}
David S. Millera46d6052012-05-12 12:26:47 -0700200
201static inline void pgd_clear(pgd_t *pgdp)
202{
David S. Miller62875cf2012-05-12 13:39:23 -0700203 set_pte((pte_t *)pgdp, __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700204}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200205
206/*
207 * The following only work if pte_present() is true.
208 * Undefined behaviour if not..
209 */
Sam Ravnborga439fe52008-07-27 23:00:59 +0200210static inline int pte_write(pte_t pte)
211{
David S. Millerf755f772012-05-12 13:48:10 -0700212 return pte_val(pte) & SRMMU_WRITE;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200213}
214
Sam Ravnborga439fe52008-07-27 23:00:59 +0200215static inline int pte_dirty(pte_t pte)
216{
David S. Millerf755f772012-05-12 13:48:10 -0700217 return pte_val(pte) & SRMMU_DIRTY;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200218}
219
Sam Ravnborga439fe52008-07-27 23:00:59 +0200220static inline int pte_young(pte_t pte)
221{
David S. Millerf755f772012-05-12 13:48:10 -0700222 return pte_val(pte) & SRMMU_REF;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200223}
224
225/*
226 * The following only work if pte_present() is not true.
227 */
228BTFIXUPDEF_HALF(pte_filei)
229
230static int pte_file(pte_t pte) __attribute_const__;
231static inline int pte_file(pte_t pte)
232{
233 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
234}
235
236static inline int pte_special(pte_t pte)
237{
238 return 0;
239}
240
241/*
242 */
243BTFIXUPDEF_HALF(pte_wrprotecti)
244BTFIXUPDEF_HALF(pte_mkcleani)
245BTFIXUPDEF_HALF(pte_mkoldi)
246
247static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
248static inline pte_t pte_wrprotect(pte_t pte)
249{
250 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
251}
252
253static pte_t pte_mkclean(pte_t pte) __attribute_const__;
254static inline pte_t pte_mkclean(pte_t pte)
255{
256 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
257}
258
259static pte_t pte_mkold(pte_t pte) __attribute_const__;
260static inline pte_t pte_mkold(pte_t pte)
261{
262 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
263}
264
265BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
266BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
267BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
268
269#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
270#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
271#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
272
273#define pte_mkspecial(pte) (pte)
274
275#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
276
David S. Miller3d827362012-05-12 12:33:08 -0700277static inline unsigned long pte_pfn(pte_t pte)
278{
279 if (srmmu_device_memory(pte_val(pte))) {
280 /* Just return something that will cause
281 * pfn_valid() to return false. This makes
282 * copy_one_pte() to just directly copy to
283 * PTE over.
284 */
285 return ~0UL;
286 }
287 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
288}
289
Sam Ravnborga439fe52008-07-27 23:00:59 +0200290#define pte_page(pte) pfn_to_page(pte_pfn(pte))
291
292/*
293 * Conversion functions: convert a page and protection to a page entry,
294 * and a page entry and page directory to the page they refer to.
295 */
David S. Miller62875cf2012-05-12 13:39:23 -0700296static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
297{
298 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
299}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200300
David S. Miller62875cf2012-05-12 13:39:23 -0700301static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
302{
303 return __pte(((page) >> 4) | pgprot_val(pgprot));
304}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200305
David S. Miller62875cf2012-05-12 13:39:23 -0700306static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
307{
308 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
309}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200310
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000311#define pgprot_noncached pgprot_noncached
312static inline pgprot_t pgprot_noncached(pgprot_t prot)
313{
314 prot &= ~__pgprot(SRMMU_CACHE);
315 return prot;
316}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200317
318BTFIXUPDEF_INT(pte_modify_mask)
319
320static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
321static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
322{
323 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
324 pgprot_val(newprot));
325}
326
327#define pgd_index(address) ((address) >> PGDIR_SHIFT)
328
329/* to find an entry in a page-table-directory */
330#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
331
332/* to find an entry in a kernel page-table-directory */
333#define pgd_offset_k(address) pgd_offset(&init_mm, address)
334
335/* Find an entry in the second-level page table.. */
336BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
337#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
338
339/* Find an entry in the third-level page table.. */
340BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
341#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
342
343/*
David S. Milleree906c92012-05-12 00:35:45 -0700344 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
Sam Ravnborga439fe52008-07-27 23:00:59 +0200345 */
346#define pte_offset_map(d, a) pte_offset_kernel(d,a)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200347#define pte_unmap(pte) do{}while(0)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200348
Sam Ravnborga439fe52008-07-27 23:00:59 +0200349struct seq_file;
350BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
351
352#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
353
354/* Fault handler stuff... */
355#define FAULT_CODE_PROT 0x1
356#define FAULT_CODE_WRITE 0x2
357#define FAULT_CODE_USER 0x4
358
Russell King4b3073e2009-12-18 16:40:18 +0000359BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200360
Russell King4b3073e2009-12-18 16:40:18 +0000361#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200362
363BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
364 unsigned long, unsigned int)
365BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
366#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
367#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
368
369extern int invalid_segment;
370
371/* Encode and de-code a swap entry */
372BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
373BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
374BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
375
376#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
377#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
378#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
379
380#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
381#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
382
383/* file-offset-in-pte helpers */
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000384static inline unsigned long pte_to_pgoff(pte_t pte)
385{
386 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
387}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200388
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000389static inline pte_t pgoff_to_pte(unsigned long pgoff)
390{
391 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
392}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200393
394/*
395 * This is made a constant because mm/fremap.c required a constant.
Sam Ravnborga439fe52008-07-27 23:00:59 +0200396 */
397#define PTE_FILE_MAX_BITS 24
398
399/*
400 */
401struct ctx_list {
402 struct ctx_list *next;
403 struct ctx_list *prev;
404 unsigned int ctx_number;
405 struct mm_struct *ctx_mm;
406};
407
408extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
409extern struct ctx_list ctx_free; /* Head of free list */
410extern struct ctx_list ctx_used; /* Head of used contexts list */
411
412#define NO_CONTEXT -1
413
414static inline void remove_from_ctx_list(struct ctx_list *entry)
415{
416 entry->next->prev = entry->prev;
417 entry->prev->next = entry->next;
418}
419
420static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
421{
422 entry->next = head;
423 (entry->prev = head->prev)->next = entry;
424 head->prev = entry;
425}
426#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
427#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
428
429static inline unsigned long
430__get_phys (unsigned long addr)
431{
432 switch (sparc_cpu_model){
Sam Ravnborga439fe52008-07-27 23:00:59 +0200433 case sun4m:
434 case sun4d:
435 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
436 default:
437 return 0;
438 }
439}
440
441static inline int
442__get_iospace (unsigned long addr)
443{
444 switch (sparc_cpu_model){
Sam Ravnborga439fe52008-07-27 23:00:59 +0200445 case sun4m:
446 case sun4d:
447 return (srmmu_get_pte (addr) >> 28);
448 default:
449 return -1;
450 }
451}
452
453extern unsigned long *sparc_valid_addr_bitmap;
454
455/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
456#define kern_addr_valid(addr) \
457 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
458
Sam Ravnborga439fe52008-07-27 23:00:59 +0200459/*
460 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
461 * its high 4 bits. These macros/functions put it there or get it from there.
462 */
463#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
464#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
465#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
466
David S. Miller3e37fd32011-11-17 18:17:59 -0800467extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
468 unsigned long, pgprot_t);
469
470static inline int io_remap_pfn_range(struct vm_area_struct *vma,
471 unsigned long from, unsigned long pfn,
472 unsigned long size, pgprot_t prot)
473{
474 unsigned long long offset, space, phys_base;
475
476 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
477 space = GET_IOSPACE(pfn);
478 phys_base = offset | (space << 32ULL);
479
480 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
481}
482
Sam Ravnborga439fe52008-07-27 23:00:59 +0200483#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
484#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
485({ \
486 int __changed = !pte_same(*(__ptep), __entry); \
487 if (__changed) { \
488 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
489 flush_tlb_page(__vma, __address); \
490 } \
Sam Ravnborg1ee0e142012-05-10 23:12:10 +0200491 __changed; \
Sam Ravnborga439fe52008-07-27 23:00:59 +0200492})
493
494#include <asm-generic/pgtable.h>
495
496#endif /* !(__ASSEMBLY__) */
497
Sam Ravnborgeb485d62011-04-21 15:48:39 -0700498#define VMALLOC_START _AC(0xfe600000,UL)
Sam Ravnborgeb485d62011-04-21 15:48:39 -0700499#define VMALLOC_END _AC(0xffc00000,UL)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200500
Sam Ravnborga439fe52008-07-27 23:00:59 +0200501/* We provide our own get_unmapped_area to cope with VA holes for userland */
502#define HAVE_ARCH_UNMAPPED_AREA
503
504/*
505 * No page table caches to initialise
506 */
507#define pgtable_cache_init() do { } while (0)
508
509#endif /* !(_SPARC_PGTABLE_H) */