blob: 538e0c8ab2434758682d44d4e234e9f595b7ef6a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _PPC_PAGE_H
2#define _PPC_PAGE_H
3
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11004#include <linux/config.h>
5#include <asm/asm-compat.h>
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT 12
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11009#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11/*
12 * Subtle: this is an int (not an unsigned long) and so it
13 * gets extended to 64 bits the way want (i.e. with 1s). -- paulus
14 */
15#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
16
17#ifdef __KERNEL__
18#include <linux/config.h>
19
20/* This must match what is in arch/ppc/Makefile */
21#define PAGE_OFFSET CONFIG_KERNEL_START
22#define KERNELBASE PAGE_OFFSET
23
24#ifndef __ASSEMBLY__
25
26/*
27 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
28 * physical addressing. For now this just the IBM PPC440.
29 */
30#ifdef CONFIG_PTE_64BIT
31typedef unsigned long long pte_basic_t;
32#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
33#define PTE_FMT "%16Lx"
34#else
35typedef unsigned long pte_basic_t;
36#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
37#define PTE_FMT "%.8lx"
38#endif
39
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100040/* align addr on a size boundary - adjust address up/down if needed */
41#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
42#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
43
44/* align addr on a size boundary - adjust address up if needed */
45#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
46
47/* to align the pointer to the (next) page boundary */
48#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
49
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#undef STRICT_MM_TYPECHECKS
52
53#ifdef STRICT_MM_TYPECHECKS
54/*
55 * These are used to make use of C type-checking..
56 */
57typedef struct { pte_basic_t pte; } pte_t;
58typedef struct { unsigned long pmd; } pmd_t;
59typedef struct { unsigned long pgd; } pgd_t;
60typedef struct { unsigned long pgprot; } pgprot_t;
61
62#define pte_val(x) ((x).pte)
63#define pmd_val(x) ((x).pmd)
64#define pgd_val(x) ((x).pgd)
65#define pgprot_val(x) ((x).pgprot)
66
67#define __pte(x) ((pte_t) { (x) } )
68#define __pmd(x) ((pmd_t) { (x) } )
69#define __pgd(x) ((pgd_t) { (x) } )
70#define __pgprot(x) ((pgprot_t) { (x) } )
71
72#else
73/*
74 * .. while these make it easier on the compiler
75 */
76typedef pte_basic_t pte_t;
77typedef unsigned long pmd_t;
78typedef unsigned long pgd_t;
79typedef unsigned long pgprot_t;
80
81#define pte_val(x) (x)
82#define pmd_val(x) (x)
83#define pgd_val(x) (x)
84#define pgprot_val(x) (x)
85
86#define __pte(x) (x)
87#define __pmd(x) (x)
88#define __pgd(x) (x)
89#define __pgprot(x) (x)
90
91#endif
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093struct page;
94extern void clear_pages(void *page, int order);
95static inline void clear_page(void *page) { clear_pages(page, 0); }
96extern void copy_page(void *to, void *from);
97extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
98extern void copy_user_page(void *to, void *from, unsigned long vaddr,
99 struct page *pg);
100
101#ifndef CONFIG_APUS
102#define PPC_MEMSTART 0
103#define PPC_PGSTART 0
104#define PPC_MEMOFFSET PAGE_OFFSET
105#else
106extern unsigned long ppc_memstart;
107extern unsigned long ppc_pgstart;
108extern unsigned long ppc_memoffset;
109#define PPC_MEMSTART ppc_memstart
110#define PPC_PGSTART ppc_pgstart
111#define PPC_MEMOFFSET ppc_memoffset
112#endif
113
114#if defined(CONFIG_APUS) && !defined(MODULE)
115/* map phys->virtual and virtual->phys for RAM pages */
116static inline unsigned long ___pa(unsigned long v)
117{
118 unsigned long p;
119 asm volatile ("1: addis %0, %1, %2;"
120 ".section \".vtop_fixup\",\"aw\";"
121 ".align 1;"
122 ".long 1b;"
123 ".previous;"
124 : "=r" (p)
125 : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
126
127 return p;
128}
129static inline void* ___va(unsigned long p)
130{
131 unsigned long v;
132 asm volatile ("1: addis %0, %1, %2;"
133 ".section \".ptov_fixup\",\"aw\";"
134 ".align 1;"
135 ".long 1b;"
136 ".previous;"
137 : "=r" (v)
138 : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
139
140 return (void*) v;
141}
142#else
143#define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET)
144#define ___va(paddr) ((paddr)+PPC_MEMOFFSET)
145#endif
146
147extern int page_is_ram(unsigned long pfn);
148
149#define __pa(x) ___pa((unsigned long)(x))
150#define __va(x) ((void *)(___va((unsigned long)(x))))
151
152#define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART))
153#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART)
154#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
155#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
156
157#define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr)
158#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
159
160/* Pure 2^n version of get_order */
161extern __inline__ int get_order(unsigned long size)
162{
163 int lz;
164
165 size = (size-1) >> PAGE_SHIFT;
166 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
167 return 32 - lz;
168}
169
170#endif /* __ASSEMBLY__ */
171
172#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
173 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
174
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100175/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
176#define __HAVE_ARCH_GATE_AREA 1
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178#endif /* __KERNEL__ */
179#endif /* _PPC_PAGE_H */