blob: a477038752bab6e1a7653c2b13adbcb7791bb4b0 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Page table management
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Modified by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/mm.h>
16#include <linux/swap.h>
17#include <linux/smp.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/pagemap.h>
21#include <linux/spinlock.h>
22#include <linux/quicklist.h>
23
24#include <asm/system.h>
25#include <asm/pgtable.h>
26#include <asm/pgalloc.h>
27#include <asm/tlb.h>
28#include <asm/tlbflush.h>
29
30void show_mem(void)
31{
32 unsigned long i;
33 int free = 0, total = 0, reserved = 0, shared = 0;
34
35 int cached = 0;
36 printk(KERN_INFO "Mem-info:\n");
37 show_free_areas();
38 i = max_mapnr;
39 while (i-- > 0) {
40 total++;
41 if (PageReserved(mem_map + i))
42 reserved++;
43 else if (PageSwapCache(mem_map + i))
44 cached++;
45 else if (!page_count(mem_map + i))
46 free++;
47 else
48 shared += page_count(mem_map + i) - 1;
49 }
50 printk(KERN_INFO "%d pages of RAM\n", total);
51 printk(KERN_INFO "%d free pages\n", free);
52 printk(KERN_INFO "%d reserved pages\n", reserved);
53 printk(KERN_INFO "%d pages shared\n", shared);
54 printk(KERN_INFO "%d pages swap cached\n", cached);
55}
56
57/*
58 * Associate a large virtual page frame with a given physical page frame
59 * and protection flags for that frame. pfn is for the base of the page,
60 * vaddr is what the page gets mapped to - both must be properly aligned.
61 * The pmd must already be instantiated. Assumes PAE mode.
62 */
63void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
64{
65 pgd_t *pgd;
66 pud_t *pud;
67 pmd_t *pmd;
68
69 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
70 printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
71 return; /* BUG(); */
72 }
73 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
74 printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
75 return; /* BUG(); */
76 }
77 pgd = swapper_pg_dir + pgd_index(vaddr);
78 if (pgd_none(*pgd)) {
79 printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
80 return; /* BUG(); */
81 }
82 pud = pud_offset(pgd, vaddr);
83 pmd = pmd_offset(pud, vaddr);
84 set_pmd(pmd, pfn_pmd(pfn, flags));
85 /*
86 * It's enough to flush this one mapping.
87 * (PGE mappings get flushed as well)
88 */
89 __flush_tlb_one(vaddr);
90}
91
92pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
93{
94 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
95 if (pte)
96 clear_page(pte);
97 return pte;
98}
99
100struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
101{
102 struct page *pte;
103
104#ifdef CONFIG_HIGHPTE
105 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
106#else
107 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
108#endif
109 if (pte)
110 clear_highpage(pte);
111 return pte;
112}
113
114/*
115 * List of all pgd's needed for non-PAE so it can invalidate entries
116 * in both cached and uncached pgd's; not needed for PAE since the
117 * kernel pmd is shared. If PAE were not to share the pmd a similar
118 * tactic would be needed. This is essentially codepath-based locking
119 * against pageattr.c; it is the unique case in which a valid change
120 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
121 * vmalloc faults work because attached pagetables are never freed.
122 * If the locking proves to be non-performant, a ticketing scheme with
123 * checks at dup_mmap(), exec(), and other mmlist addition points
124 * could be used. The locking scheme was chosen on the basis of
125 * manfred's recommendations and having no core impact whatsoever.
126 * -- wli
127 */
128DEFINE_SPINLOCK(pgd_lock);
129struct page *pgd_list;
130
131static inline void pgd_list_add(pgd_t *pgd)
132{
133 struct page *page = virt_to_page(pgd);
134 page->index = (unsigned long) pgd_list;
135 if (pgd_list)
136 set_page_private(pgd_list, (unsigned long) &page->index);
137 pgd_list = page;
138 set_page_private(page, (unsigned long) &pgd_list);
139}
140
141static inline void pgd_list_del(pgd_t *pgd)
142{
143 struct page *next, **pprev, *page = virt_to_page(pgd);
144 next = (struct page *) page->index;
145 pprev = (struct page **) page_private(page);
146 *pprev = next;
147 if (next)
148 set_page_private(next, (unsigned long) pprev);
149}
150
151void pgd_ctor(void *pgd)
152{
153 unsigned long flags;
154
155 if (PTRS_PER_PMD == 1)
156 spin_lock_irqsave(&pgd_lock, flags);
157
158 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
159 swapper_pg_dir + USER_PTRS_PER_PGD,
160 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
161
162 if (PTRS_PER_PMD > 1)
163 return;
164
165 pgd_list_add(pgd);
166 spin_unlock_irqrestore(&pgd_lock, flags);
167 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
168}
169
170/* never called when PTRS_PER_PMD > 1 */
171void pgd_dtor(void *pgd)
172{
173 unsigned long flags; /* can be called from interrupt context */
174
175 spin_lock_irqsave(&pgd_lock, flags);
176 pgd_list_del(pgd);
177 spin_unlock_irqrestore(&pgd_lock, flags);
178}
179
180pgd_t *pgd_alloc(struct mm_struct *mm)
181{
182 return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
183}
184
185void pgd_free(struct mm_struct *mm, pgd_t *pgd)
186{
187 quicklist_free(0, pgd_dtor, pgd);
188}
189
190void __init pgtable_cache_init(void)
191{
192}
193
194void check_pgt_cache(void)
195{
196 quicklist_trim(0, pgd_dtor, 25, 16);
197}