blob: 5426bb28a9931956a46585268a4b86e2703a50f2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: page.h,v 1.39 2002/02/09 19:49:31 davem Exp $ */
2
3#ifndef _SPARC64_PAGE_H
4#define _SPARC64_PAGE_H
5
6#include <linux/config.h>
7#include <asm/const.h>
8
9#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
10#define PAGE_SHIFT 13
11#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
12#define PAGE_SHIFT 16
13#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
14#define PAGE_SHIFT 19
15#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
16#define PAGE_SHIFT 22
17#else
18#error No page size specified in kernel configuration
19#endif
20
21#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
David S. Miller6a9b4902005-09-19 20:11:57 -070024/* Flushing for D-cache alias handling is only needed if
25 * the page size is smaller than 16K.
26 */
27#if PAGE_SHIFT < 14
28#define DCACHE_ALIASING_POSSIBLE
29#endif
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#ifdef __KERNEL__
32
33#ifndef __ASSEMBLY__
34
35extern void _clear_page(void *page);
36#define clear_page(X) _clear_page((void *)(X))
37struct page;
38extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
39#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
40extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
41
42/* Unlike sparc32, sparc64's parameter passing API is more
43 * sane in that structures which as small enough are passed
44 * in registers instead of on the stack. Thus, setting
45 * STRICT_MM_TYPECHECKS does not generate worse code so
46 * let's enable it to get the type checking.
47 */
48
49#define STRICT_MM_TYPECHECKS
50
51#ifdef STRICT_MM_TYPECHECKS
52/* These are used to make use of C type-checking.. */
53typedef struct { unsigned long pte; } pte_t;
54typedef struct { unsigned long iopte; } iopte_t;
55typedef struct { unsigned int pmd; } pmd_t;
56typedef struct { unsigned int pgd; } pgd_t;
57typedef struct { unsigned long pgprot; } pgprot_t;
58
59#define pte_val(x) ((x).pte)
60#define iopte_val(x) ((x).iopte)
61#define pmd_val(x) ((x).pmd)
62#define pgd_val(x) ((x).pgd)
63#define pgprot_val(x) ((x).pgprot)
64
65#define __pte(x) ((pte_t) { (x) } )
66#define __iopte(x) ((iopte_t) { (x) } )
67#define __pmd(x) ((pmd_t) { (x) } )
68#define __pgd(x) ((pgd_t) { (x) } )
69#define __pgprot(x) ((pgprot_t) { (x) } )
70
71#else
72/* .. while these make it easier on the compiler */
73typedef unsigned long pte_t;
74typedef unsigned long iopte_t;
75typedef unsigned int pmd_t;
76typedef unsigned int pgd_t;
77typedef unsigned long pgprot_t;
78
79#define pte_val(x) (x)
80#define iopte_val(x) (x)
81#define pmd_val(x) (x)
82#define pgd_val(x) (x)
83#define pgprot_val(x) (x)
84
85#define __pte(x) (x)
86#define __iopte(x) (x)
87#define __pmd(x) (x)
88#define __pgd(x) (x)
89#define __pgprot(x) (x)
90
91#endif /* (STRICT_MM_TYPECHECKS) */
92
93#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
94#define HPAGE_SHIFT 22
95#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
96#define HPAGE_SHIFT 19
97#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
98#define HPAGE_SHIFT 16
99#endif
100
101#ifdef CONFIG_HUGETLB_PAGE
102#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
103#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
104#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
David Gibson63551ae2005-06-21 17:14:44 -0700105#define ARCH_HAS_SETCLEAR_HUGE_PTE
106#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#endif
108
109#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
110 (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET))
111
112#endif /* !(__ASSEMBLY__) */
113
114/* to align the pointer to the (next) page boundary */
115#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
116
117/* We used to stick this into a hard-coded global register (%g4)
118 * but that does not make sense anymore.
119 */
120#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
121
122#ifndef __ASSEMBLY__
123
124#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
125#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
126
127/* PFNs are real physical page numbers. However, mem_map only begins to record
128 * per-page information starting at pfn_base. This is to handle systems where
129 * the first physical page in the machine is at some huge physical address,
130 * such as 4GB. This is common on a partitioned E10000, for example.
131 */
132extern struct page *pfn_to_page(unsigned long pfn);
133extern unsigned long page_to_pfn(struct page *);
134
135#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
136
137#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr)
138#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
139
140#define virt_to_phys __pa
141#define phys_to_virt __va
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#endif /* !(__ASSEMBLY__) */
144
145#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
146 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
147
148#endif /* !(__KERNEL__) */
149
Stephen Rothwellfd4fd5a2005-09-03 15:54:30 -0700150#include <asm-generic/page.h>
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152#endif /* !(_SPARC64_PAGE_H) */