blob: f7914f1782b0329b2d70cd6c8d172d1f2282d224 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_PAGE_H
2#define _ASM_PAGE_H
3
4#ifdef __KERNEL__
5
6#include <linux/config.h>
7#include <asm/virtconvert.h>
8#include <asm/mem-layout.h>
9#include <asm/sections.h>
10#include <asm/setup.h>
11
12#ifndef __ASSEMBLY__
13
14#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
15#define free_user_page(page, addr) free_page(addr)
16
17#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
18#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
19
20#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
21#define copy_user_page(vto, vfrom, vaddr, topg) memcpy((vto), (vfrom), PAGE_SIZE)
22
23/*
24 * These are used to make use of C type-checking..
25 */
26typedef struct { unsigned long pte; } pte_t;
27typedef struct { unsigned long ste[64];} pmd_t;
28typedef struct { pmd_t pue[1]; } pud_t;
29typedef struct { pud_t pge[1]; } pgd_t;
30typedef struct { unsigned long pgprot; } pgprot_t;
31
32#define pte_val(x) ((x).pte)
33#define pmd_val(x) ((x).ste[0])
34#define pud_val(x) ((x).pue[0])
35#define pgd_val(x) ((x).pge[0])
36#define pgprot_val(x) ((x).pgprot)
37
38#define __pte(x) ((pte_t) { (x) } )
39#define __pmd(x) ((pmd_t) { (x) } )
40#define __pud(x) ((pud_t) { (x) } )
41#define __pgd(x) ((pgd_t) { (x) } )
42#define __pgprot(x) ((pgprot_t) { (x) } )
43#define PTE_MASK PAGE_MASK
44
45/* to align the pointer to the (next) page boundary */
46#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
47
48/* Pure 2^n version of get_order */
49static inline int get_order(unsigned long size) __attribute_const__;
50static inline int get_order(unsigned long size)
51{
52 int order;
53
54 size = (size - 1) >> (PAGE_SHIFT - 1);
55 order = -1;
56 do {
57 size >>= 1;
58 order++;
59 } while (size);
60 return order;
61}
62
63#define devmem_is_allowed(pfn) 1
64
65#define __pa(vaddr) virt_to_phys((void *) vaddr)
66#define __va(paddr) phys_to_virt((unsigned long) paddr)
67
68#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
69
70extern unsigned long max_low_pfn;
71extern unsigned long min_low_pfn;
72extern unsigned long max_pfn;
73
74#ifdef CONFIG_MMU
75#define pfn_to_page(pfn) (mem_map + (pfn))
76#define page_to_pfn(page) ((unsigned long) ((page) - mem_map))
77#define pfn_valid(pfn) ((pfn) < max_mapnr)
78
79#else
80#define pfn_to_page(pfn) (&mem_map[(pfn) - (PAGE_OFFSET >> PAGE_SHIFT)])
81#define page_to_pfn(page) ((PAGE_OFFSET >> PAGE_SHIFT) + (unsigned long) ((page) - mem_map))
82#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
83
84#endif
85
86#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
87#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
88
89
90#ifdef CONFIG_MMU
91#define VM_DATA_DEFAULT_FLAGS \
92 (VM_READ | VM_WRITE | \
93 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
94 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
95#endif
96
97#endif /* __ASSEMBLY__ */
98
99#endif /* __KERNEL__ */
100
101#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
102#define WANT_PAGE_VIRTUAL 1
103#endif
104
105#endif /* _ASM_PAGE_H */