Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_PGTABLE_32_H |
| 3 | #define _ASM_X86_PGTABLE_32_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
Jeremy Fitzhardinge | f402a65 | 2009-02-08 18:49:05 -0800 | [diff] [blame] | 5 | #include <asm/pgtable_32_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
| 7 | /* |
| 8 | * The Linux memory management assumes a three-level page table setup. On |
| 9 | * the i386, we use that, but "fold" the mid level into the top-level page |
| 10 | * table, so that we physically have the same two-level page table as the |
| 11 | * i386 mmu expects. |
| 12 | * |
| 13 | * This file contains the functions and defines necessary to modify and use |
| 14 | * the i386 page table tree. |
| 15 | */ |
| 16 | #ifndef __ASSEMBLY__ |
| 17 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/threads.h> |
Rusty Russell | da181a8 | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 19 | #include <asm/paravirt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Jiri Slaby | 1977f03 | 2007-10-18 23:40:25 -0700 | [diff] [blame] | 21 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/list.h> |
| 23 | #include <linux/spinlock.h> |
| 24 | |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 25 | struct mm_struct; |
| 26 | struct vm_area_struct; |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | extern pgd_t swapper_pg_dir[1024]; |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 29 | extern pgd_t initial_page_table[1024]; |
Boris Ostrovsky | 1e620f9 | 2016-12-08 11:44:31 -0500 | [diff] [blame] | 30 | extern pmd_t initial_pg_pmd[]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Thomas Gleixner | 985a34b | 2008-03-09 13:14:37 +0100 | [diff] [blame] | 32 | static inline void pgtable_cache_init(void) { } |
| 33 | static inline void check_pgt_cache(void) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | void paging_init(void); |
Thomas Gleixner | 945fd17 | 2018-02-28 21:14:26 +0100 | [diff] [blame] | 35 | void sync_initial_page_table(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | * Define this if things work differently on an i386 and an i486: |
| 39 | * it will (on an i486) warn about kernel memory accesses that are |
Jesper Juhl | e49332b | 2005-05-01 08:59:08 -0700 | [diff] [blame] | 40 | * done without a 'access_ok(VERIFY_WRITE,..)' |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | */ |
Jesper Juhl | e49332b | 2005-05-01 08:59:08 -0700 | [diff] [blame] | 42 | #undef TEST_ACCESS_OK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #ifdef CONFIG_X86_PAE |
| 45 | # include <asm/pgtable-3level.h> |
| 46 | #else |
| 47 | # include <asm/pgtable-2level.h> |
| 48 | #endif |
| 49 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #if defined(CONFIG_HIGHPTE) |
Joe Perches | cf840147 | 2008-03-23 01:03:09 -0700 | [diff] [blame] | 51 | #define pte_offset_map(dir, address) \ |
Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 52 | ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ |
Joe Perches | cf840147 | 2008-03-23 01:03:09 -0700 | [diff] [blame] | 53 | pte_index((address))) |
Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 54 | #define pte_unmap(pte) kunmap_atomic((pte)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #else |
Joe Perches | cf840147 | 2008-03-23 01:03:09 -0700 | [diff] [blame] | 56 | #define pte_offset_map(dir, address) \ |
| 57 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #define pte_unmap(pte) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #endif |
| 60 | |
Zachary Amsden | 23002d8 | 2006-09-30 23:29:35 -0700 | [diff] [blame] | 61 | /* Clear a kernel PTE and flush it from the TLB */ |
Joe Perches | cf840147 | 2008-03-23 01:03:09 -0700 | [diff] [blame] | 62 | #define kpte_clear_flush(ptep, vaddr) \ |
| 63 | do { \ |
| 64 | pte_clear(&init_mm, (vaddr), (ptep)); \ |
Andy Lutomirski | 1299ef1 | 2018-01-31 08:03:10 -0800 | [diff] [blame] | 65 | __flush_tlb_one_kernel((vaddr)); \ |
Zachary Amsden | 23002d8 | 2006-09-30 23:29:35 -0700 | [diff] [blame] | 66 | } while (0) |
| 67 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #endif /* !__ASSEMBLY__ */ |
| 69 | |
Thomas Gleixner | 4757d7d8 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 70 | /* |
| 71 | * kern_addr_valid() is (1) for FLATMEM and (0) for |
| 72 | * SPARSEMEM and DISCONTIGMEM |
| 73 | */ |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 74 | #ifdef CONFIG_FLATMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #define kern_addr_valid(addr) (1) |
Thomas Gleixner | 4757d7d8 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 76 | #else |
| 77 | #define kern_addr_valid(kaddr) (0) |
| 78 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
Boris Ostrovsky | 1e620f9 | 2016-12-08 11:44:31 -0500 | [diff] [blame] | 80 | /* |
| 81 | * This is how much memory in addition to the memory covered up to |
| 82 | * and including _end we need mapped initially. |
| 83 | * We need: |
| 84 | * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) |
| 85 | * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) |
| 86 | * |
| 87 | * Modulo rounding, each megabyte assigned here requires a kilobyte of |
| 88 | * memory, which is currently unreclaimed. |
| 89 | * |
| 90 | * This should be a multiple of a page. |
| 91 | * |
| 92 | * KERNEL_IMAGE_SIZE should be greater than pa(_end) |
| 93 | * and small than max_low_pfn, otherwise will waste some page table entries |
| 94 | */ |
| 95 | #if PTRS_PER_PMD > 1 |
| 96 | #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) |
| 97 | #else |
| 98 | #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) |
| 99 | #endif |
| 100 | |
| 101 | /* |
| 102 | * Number of possible pages in the lowmem region. |
| 103 | * |
| 104 | * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a |
| 105 | * gas warning about overflowing shift count when gas has been compiled |
| 106 | * with only a host target support using a 32-bit type for internal |
| 107 | * representation. |
| 108 | */ |
| 109 | #define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)) |
| 110 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 111 | #endif /* _ASM_X86_PGTABLE_32_H */ |