blob: fbc73360aea05128d6b40be23e261893cd47a523 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGTABLE_32_H
2#define _ASM_X86_PGTABLE_32_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Jeremy Fitzhardingef402a652009-02-08 18:49:05 -08004#include <asm/pgtable_32_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6/*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
11 *
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
14 */
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#include <asm/fixmap.h>
18#include <linux/threads.h>
Rusty Russellda181a82006-12-07 02:14:08 +010019#include <asm/paravirt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Jiri Slaby1977f032007-10-18 23:40:25 -070021#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/list.h>
23#include <linux/spinlock.h>
24
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080025struct mm_struct;
26struct vm_area_struct;
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028extern pgd_t swapper_pg_dir[1024];
Borislav Petkovb40827fa2010-08-28 15:58:33 +020029extern pgd_t initial_page_table[1024];
Boris Ostrovsky1e620f92016-12-08 11:44:31 -050030extern pmd_t initial_pg_pmd[];
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Thomas Gleixner985a34b2008-03-09 13:14:37 +010032static inline void pgtable_cache_init(void) { }
33static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -070034void paging_init(void);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Define this if things work differently on an i386 and an i486:
38 * it will (on an i486) warn about kernel memory accesses that are
Jesper Juhle49332b2005-05-01 08:59:08 -070039 * done without a 'access_ok(VERIFY_WRITE,..)'
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 */
Jesper Juhle49332b2005-05-01 08:59:08 -070041#undef TEST_ACCESS_OK
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#ifdef CONFIG_X86_PAE
44# include <asm/pgtable-3level.h>
45#else
46# include <asm/pgtable-2level.h>
47#endif
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#if defined(CONFIG_HIGHPTE)
Joe Perchescf8401472008-03-23 01:03:09 -070050#define pte_offset_map(dir, address) \
Peter Zijlstraece0e2b2010-10-26 14:21:52 -070051 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
Joe Perchescf8401472008-03-23 01:03:09 -070052 pte_index((address)))
Peter Zijlstraece0e2b2010-10-26 14:21:52 -070053#define pte_unmap(pte) kunmap_atomic((pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#else
Joe Perchescf8401472008-03-23 01:03:09 -070055#define pte_offset_map(dir, address) \
56 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#endif
59
Zachary Amsden23002d82006-09-30 23:29:35 -070060/* Clear a kernel PTE and flush it from the TLB */
Joe Perchescf8401472008-03-23 01:03:09 -070061#define kpte_clear_flush(ptep, vaddr) \
62do { \
63 pte_clear(&init_mm, (vaddr), (ptep)); \
64 __flush_tlb_one((vaddr)); \
Zachary Amsden23002d82006-09-30 23:29:35 -070065} while (0)
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#endif /* !__ASSEMBLY__ */
68
Thomas Gleixner4757d7d82008-01-30 13:30:37 +010069/*
70 * kern_addr_valid() is (1) for FLATMEM and (0) for
71 * SPARSEMEM and DISCONTIGMEM
72 */
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070073#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#define kern_addr_valid(addr) (1)
Thomas Gleixner4757d7d82008-01-30 13:30:37 +010075#else
76#define kern_addr_valid(kaddr) (0)
77#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Boris Ostrovsky1e620f92016-12-08 11:44:31 -050079/*
80 * This is how much memory in addition to the memory covered up to
81 * and including _end we need mapped initially.
82 * We need:
83 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
84 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
85 *
86 * Modulo rounding, each megabyte assigned here requires a kilobyte of
87 * memory, which is currently unreclaimed.
88 *
89 * This should be a multiple of a page.
90 *
91 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
92 * and small than max_low_pfn, otherwise will waste some page table entries
93 */
94#if PTRS_PER_PMD > 1
95#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
96#else
97#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
98#endif
99
100/*
101 * Number of possible pages in the lowmem region.
102 *
103 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
104 * gas warning about overflowing shift count when gas has been compiled
105 * with only a host target support using a 32-bit type for internal
106 * representation.
107 */
108#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
109
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700110#endif /* _ASM_X86_PGTABLE_32_H */