blob: 3448a3d4bc64efc74d84a9c3e337a8989b108fd9 [file] [log] [blame]
Michael Ellerman5cd16ee2005-11-11 14:25:24 +11001#ifndef _ASM_POWERPC_PAGE_64_H
2#define _ASM_POWERPC_PAGE_64_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Michael Ellerman5cd16ee2005-11-11 14:25:24 +11004
5/*
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/*
15 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
16 * specific, every notion of page number shared with the firmware, TCEs,
17 * iommu, etc... still uses a page size of 4K.
18 */
19#define HW_PAGE_SHIFT 12
20#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
21#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
22
23/*
24 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
25 * HW_PAGE_SHIFT, that is 4K pages.
26 */
27#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
28
Michael Ellerman5cd16ee2005-11-11 14:25:24 +110029/* Segment size */
30#define SID_SHIFT 28
31#define SID_MASK 0xfffffffffUL
32#define ESID_MASK 0xfffffffff0000000UL
33#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
34
35#ifndef __ASSEMBLY__
36#include <asm/cache.h>
37
38typedef unsigned long pte_basic_t;
39
40static __inline__ void clear_page(void *addr)
41{
42 unsigned long lines, line_size;
43
44 line_size = ppc64_caches.dline_size;
45 lines = ppc64_caches.dlines_per_page;
46
47 __asm__ __volatile__(
48 "mtctr %1 # clear_page\n\
491: dcbz 0,%0\n\
50 add %0,%0,%3\n\
51 bdnz+ 1b"
52 : "=r" (addr)
53 : "r" (lines), "0" (addr), "r" (line_size)
54 : "ctr", "memory");
55}
56
57extern void copy_4K_page(void *to, void *from);
58
59#ifdef CONFIG_PPC_64K_PAGES
60static inline void copy_page(void *to, void *from)
61{
62 unsigned int i;
63 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
64 copy_4K_page(to, from);
65 to += 4096;
66 from += 4096;
67 }
68}
69#else /* CONFIG_PPC_64K_PAGES */
70static inline void copy_page(void *to, void *from)
71{
72 copy_4K_page(to, from);
73}
74#endif /* CONFIG_PPC_64K_PAGES */
75
76/* Log 2 of page table size */
77extern u64 ppc64_pft_size;
78
79/* Large pages size */
Andy Whitcroftb50ce232005-11-18 01:11:02 -080080#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman5cd16ee2005-11-11 14:25:24 +110081extern unsigned int HPAGE_SHIFT;
Andy Whitcroftb50ce232005-11-18 01:11:02 -080082#else
83#define HPAGE_SHIFT PAGE_SHIFT
84#endif
Michael Ellerman5cd16ee2005-11-11 14:25:24 +110085#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
86#define HPAGE_MASK (~(HPAGE_SIZE - 1))
87#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
88
89#endif /* __ASSEMBLY__ */
90
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100091#ifdef CONFIG_PPC_MM_SLICES
Michael Ellerman5cd16ee2005-11-11 14:25:24 +110092
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100093#define SLICE_LOW_SHIFT 28
94#define SLICE_HIGH_SHIFT 40
Michael Ellerman5cd16ee2005-11-11 14:25:24 +110095
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100096#define SLICE_LOW_TOP (0x100000000ul)
97#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
98#define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
99
100#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
101#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
102
103#ifndef __ASSEMBLY__
104
105struct slice_mask {
106 u16 low_slices;
107 u16 high_slices;
108};
109
110struct mm_struct;
111
112extern unsigned long slice_get_unmapped_area(unsigned long addr,
113 unsigned long len,
114 unsigned long flags,
115 unsigned int psize,
116 int topdown,
117 int use_cache);
118
119extern unsigned int get_slice_psize(struct mm_struct *mm,
120 unsigned long addr);
121
122extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
123extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
Michael Ellerman5cd16ee2005-11-11 14:25:24 +1100124
125#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000126extern int is_hugepage_only_range(struct mm_struct *m,
127 unsigned long addr,
128 unsigned long len);
129
130#endif /* __ASSEMBLY__ */
131#else
132#define slice_init()
133#endif /* CONFIG_PPC_MM_SLICES */
134
135#ifdef CONFIG_HUGETLB_PAGE
136
David Gibsonf10a04c2006-04-28 15:02:51 +1000137#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
Michael Ellerman5cd16ee2005-11-11 14:25:24 +1100138#define ARCH_HAS_SETCLEAR_HUGE_PTE
Michael Ellerman5cd16ee2005-11-11 14:25:24 +1100139#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
140
Michael Ellerman5cd16ee2005-11-11 14:25:24 +1100141#endif /* !CONFIG_HUGETLB_PAGE */
142
143#ifdef MODULE
144#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
145#else
146#define __page_aligned \
147 __attribute__((__aligned__(PAGE_SIZE), \
148 __section__(".data.page_aligned")))
149#endif
150
151#define VM_DATA_DEFAULT_FLAGS \
152 (test_thread_flag(TIF_32BIT) ? \
153 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
154
155/*
156 * This is the default if a program doesn't have a PT_GNU_STACK
157 * program header entry. The PPC64 ELF ABI has a non executable stack
158 * stack by default, so in the absense of a PT_GNU_STACK program header
159 * we turn execute permission off.
160 */
161#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
162 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
163
164#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
165 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
166
167#define VM_STACK_DEFAULT_FLAGS \
168 (test_thread_flag(TIF_32BIT) ? \
169 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
170
171#include <asm-generic/page.h>
172
Arnd Bergmann88ced032005-12-16 22:43:46 +0100173#endif /* __KERNEL__ */
Michael Ellerman5cd16ee2005-11-11 14:25:24 +1100174#endif /* _ASM_POWERPC_PAGE_64_H */