blob: bfab55675c1607155ff983671c16fece71aafbdd [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGTABLE_32_H
2#define _ASM_X86_PGTABLE_32_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Jeremy Fitzhardingef402a652009-02-08 18:49:05 -08004#include <asm/pgtable_32_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6/*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
11 *
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
14 */
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/threads.h>
Rusty Russellda181a82006-12-07 02:14:08 +010018#include <asm/paravirt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Jiri Slaby1977f032007-10-18 23:40:25 -070020#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/list.h>
22#include <linux/spinlock.h>
23
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080024struct mm_struct;
25struct vm_area_struct;
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027extern pgd_t swapper_pg_dir[1024];
Borislav Petkovb40827fa2010-08-28 15:58:33 +020028extern pgd_t initial_page_table[1024];
Boris Ostrovsky1e620f92016-12-08 11:44:31 -050029extern pmd_t initial_pg_pmd[];
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Thomas Gleixner985a34b2008-03-09 13:14:37 +010031static inline void pgtable_cache_init(void) { }
32static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -070033void paging_init(void);
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Define this if things work differently on an i386 and an i486:
37 * it will (on an i486) warn about kernel memory accesses that are
Jesper Juhle49332b2005-05-01 08:59:08 -070038 * done without a 'access_ok(VERIFY_WRITE,..)'
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
Jesper Juhle49332b2005-05-01 08:59:08 -070040#undef TEST_ACCESS_OK
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#ifdef CONFIG_X86_PAE
43# include <asm/pgtable-3level.h>
44#else
45# include <asm/pgtable-2level.h>
46#endif
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#if defined(CONFIG_HIGHPTE)
Joe Perchescf8401472008-03-23 01:03:09 -070049#define pte_offset_map(dir, address) \
Peter Zijlstraece0e2b2010-10-26 14:21:52 -070050 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
Joe Perchescf8401472008-03-23 01:03:09 -070051 pte_index((address)))
Peter Zijlstraece0e2b2010-10-26 14:21:52 -070052#define pte_unmap(pte) kunmap_atomic((pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#else
Joe Perchescf8401472008-03-23 01:03:09 -070054#define pte_offset_map(dir, address) \
55 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#endif
58
Zachary Amsden23002d82006-09-30 23:29:35 -070059/* Clear a kernel PTE and flush it from the TLB */
Joe Perchescf8401472008-03-23 01:03:09 -070060#define kpte_clear_flush(ptep, vaddr) \
61do { \
62 pte_clear(&init_mm, (vaddr), (ptep)); \
63 __flush_tlb_one((vaddr)); \
Zachary Amsden23002d82006-09-30 23:29:35 -070064} while (0)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#endif /* !__ASSEMBLY__ */
67
Thomas Gleixner4757d7d82008-01-30 13:30:37 +010068/*
69 * kern_addr_valid() is (1) for FLATMEM and (0) for
70 * SPARSEMEM and DISCONTIGMEM
71 */
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070072#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define kern_addr_valid(addr) (1)
Thomas Gleixner4757d7d82008-01-30 13:30:37 +010074#else
75#define kern_addr_valid(kaddr) (0)
76#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Boris Ostrovsky1e620f92016-12-08 11:44:31 -050078/*
79 * This is how much memory in addition to the memory covered up to
80 * and including _end we need mapped initially.
81 * We need:
82 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
83 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
84 *
85 * Modulo rounding, each megabyte assigned here requires a kilobyte of
86 * memory, which is currently unreclaimed.
87 *
88 * This should be a multiple of a page.
89 *
90 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
91 * and small than max_low_pfn, otherwise will waste some page table entries
92 */
93#if PTRS_PER_PMD > 1
94#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
95#else
96#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
97#endif
98
99/*
100 * Number of possible pages in the lowmem region.
101 *
102 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
103 * gas warning about overflowing shift count when gas has been compiled
104 * with only a host target support using a 32-bit type for internal
105 * representation.
106 */
107#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
108
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700109#endif /* _ASM_X86_PGTABLE_32_H */