blob: f2b6e3f11bfc58214dcbccdd87acdaa6a5d4b9a8 [file] [log] [blame]
Rafael J. Wysocki2d4a34c2006-12-06 20:34:29 -08001/*
Rafael J. Wysockic5759122008-02-09 23:24:09 +01002 * Hibernation support specific for i386 - temporary page tables
Rafael J. Wysocki2d4a34c2006-12-06 20:34:29 -08003 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
7 */
8
9#include <linux/suspend.h>
10#include <linux/bootmem.h>
11
12#include <asm/system.h>
13#include <asm/page.h>
14#include <asm/pgtable.h>
15
Rafael J. Wysocki261f0ce2008-02-09 23:24:09 +010016/* Defined in hibernate_asm_32.S */
Rafael J. Wysocki2d4a34c2006-12-06 20:34:29 -080017extern int restore_image(void);
18
Vivek Goyal49c3df62007-05-02 19:27:07 +020019/* References to section boundaries */
20extern const void __nosave_begin, __nosave_end;
21
Rafael J. Wysocki2d4a34c2006-12-06 20:34:29 -080022/* Pointer to the temporary resume page tables */
23pgd_t *resume_pg_dir;
24
25/* The following three functions are based on the analogous code in
Rafael J. Wysocki261f0ce2008-02-09 23:24:09 +010026 * arch/x86/mm/init_32.c
Rafael J. Wysocki2d4a34c2006-12-06 20:34:29 -080027 */
28
29/*
30 * Create a middle page table on a resume-safe page and put a pointer to it in
31 * the given global directory entry. This only returns the gd entry
32 * in non-PAE compilation mode, since the middle layer is folded.
33 */
34static pmd_t *resume_one_md_table_init(pgd_t *pgd)
35{
36 pud_t *pud;
37 pmd_t *pmd_table;
38
39#ifdef CONFIG_X86_PAE
40 pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
41 if (!pmd_table)
42 return NULL;
43
44 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
45 pud = pud_offset(pgd, 0);
46
47 BUG_ON(pmd_table != pmd_offset(pud, 0));
48#else
49 pud = pud_offset(pgd, 0);
50 pmd_table = pmd_offset(pud, 0);
51#endif
52
53 return pmd_table;
54}
55
56/*
57 * Create a page table on a resume-safe page and place a pointer to it in
58 * a middle page directory entry.
59 */
60static pte_t *resume_one_page_table_init(pmd_t *pmd)
61{
62 if (pmd_none(*pmd)) {
63 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
64 if (!page_table)
65 return NULL;
66
67 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
68
69 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
70
71 return page_table;
72 }
73
74 return pte_offset_kernel(pmd, 0);
75}
76
77/*
78 * This maps the physical memory to kernel virtual address space, a total
79 * of max_low_pfn pages, by creating page tables starting from address
80 * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
81 */
82static int resume_physical_mapping_init(pgd_t *pgd_base)
83{
84 unsigned long pfn;
85 pgd_t *pgd;
86 pmd_t *pmd;
87 pte_t *pte;
88 int pgd_idx, pmd_idx;
89
90 pgd_idx = pgd_index(PAGE_OFFSET);
91 pgd = pgd_base + pgd_idx;
92 pfn = 0;
93
94 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
95 pmd = resume_one_md_table_init(pgd);
96 if (!pmd)
97 return -ENOMEM;
98
99 if (pfn >= max_low_pfn)
100 continue;
101
102 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
103 if (pfn >= max_low_pfn)
104 break;
105
106 /* Map with big pages if possible, otherwise create
107 * normal page tables.
108 * NOTE: We can mark everything as executable here
109 */
110 if (cpu_has_pse) {
111 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
112 pfn += PTRS_PER_PTE;
113 } else {
114 pte_t *max_pte;
115
116 pte = resume_one_page_table_init(pmd);
117 if (!pte)
118 return -ENOMEM;
119
120 max_pte = pte + PTRS_PER_PTE;
121 for (; pte < max_pte; pte++, pfn++) {
122 if (pfn >= max_low_pfn)
123 break;
124
125 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
126 }
127 }
128 }
129 }
130 return 0;
131}
132
133static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
134{
135#ifdef CONFIG_X86_PAE
136 int i;
137
138 /* Init entries of the first-level page table to the zero page */
139 for (i = 0; i < PTRS_PER_PGD; i++)
140 set_pgd(pg_dir + i,
141 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
142#endif
143}
144
145int swsusp_arch_resume(void)
146{
147 int error;
148
149 resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
150 if (!resume_pg_dir)
151 return -ENOMEM;
152
153 resume_init_first_level_page_table(resume_pg_dir);
154 error = resume_physical_mapping_init(resume_pg_dir);
155 if (error)
156 return error;
157
158 /* We have got enough memory and from now on we cannot recover */
159 restore_image();
160 return 0;
161}
Vivek Goyal49c3df62007-05-02 19:27:07 +0200162
163/*
164 * pfn_is_nosave - check if given pfn is in the 'nosave' section
165 */
166
167int pfn_is_nosave(unsigned long pfn)
168{
169 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
170 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
171 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
172}