blob: bd9ada693f9561ba15b581855d1b80c95f57db4b [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Page table management
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Modified by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
David Howellsb920de12008-02-08 04:19:31 -080016#include <linux/mm.h>
17#include <linux/swap.h>
18#include <linux/smp.h>
19#include <linux/highmem.h>
David Howellsb920de12008-02-08 04:19:31 -080020#include <linux/pagemap.h>
21#include <linux/spinlock.h>
22#include <linux/quicklist.h>
23
David Howellsb920de12008-02-08 04:19:31 -080024#include <asm/pgtable.h>
25#include <asm/pgalloc.h>
26#include <asm/tlb.h>
27#include <asm/tlbflush.h>
28
David Howellsb920de12008-02-08 04:19:31 -080029/*
30 * Associate a large virtual page frame with a given physical page frame
31 * and protection flags for that frame. pfn is for the base of the page,
32 * vaddr is what the page gets mapped to - both must be properly aligned.
33 * The pmd must already be instantiated. Assumes PAE mode.
34 */
35void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
36{
37 pgd_t *pgd;
38 pud_t *pud;
39 pmd_t *pmd;
40
41 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
42 printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
43 return; /* BUG(); */
44 }
45 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
46 printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
47 return; /* BUG(); */
48 }
49 pgd = swapper_pg_dir + pgd_index(vaddr);
50 if (pgd_none(*pgd)) {
51 printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
52 return; /* BUG(); */
53 }
54 pud = pud_offset(pgd, vaddr);
55 pmd = pmd_offset(pud, vaddr);
56 set_pmd(pmd, pfn_pmd(pfn, flags));
57 /*
58 * It's enough to flush this one mapping.
59 * (PGE mappings get flushed as well)
60 */
David Howells492e6752010-10-27 17:28:49 +010061 local_flush_tlb_one(vaddr);
David Howellsb920de12008-02-08 04:19:31 -080062}
63
64pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
65{
66 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
67 if (pte)
68 clear_page(pte);
69 return pte;
70}
71
72struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
73{
74 struct page *pte;
75
76#ifdef CONFIG_HIGHPTE
77 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
78#else
79 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
80#endif
81 if (pte)
82 clear_highpage(pte);
83 return pte;
84}
85
86/*
87 * List of all pgd's needed for non-PAE so it can invalidate entries
88 * in both cached and uncached pgd's; not needed for PAE since the
89 * kernel pmd is shared. If PAE were not to share the pmd a similar
90 * tactic would be needed. This is essentially codepath-based locking
91 * against pageattr.c; it is the unique case in which a valid change
92 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
93 * vmalloc faults work because attached pagetables are never freed.
94 * If the locking proves to be non-performant, a ticketing scheme with
95 * checks at dup_mmap(), exec(), and other mmlist addition points
96 * could be used. The locking scheme was chosen on the basis of
97 * manfred's recommendations and having no core impact whatsoever.
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010098 * -- nyc
David Howellsb920de12008-02-08 04:19:31 -080099 */
100DEFINE_SPINLOCK(pgd_lock);
101struct page *pgd_list;
102
103static inline void pgd_list_add(pgd_t *pgd)
104{
105 struct page *page = virt_to_page(pgd);
106 page->index = (unsigned long) pgd_list;
107 if (pgd_list)
108 set_page_private(pgd_list, (unsigned long) &page->index);
109 pgd_list = page;
110 set_page_private(page, (unsigned long) &pgd_list);
111}
112
113static inline void pgd_list_del(pgd_t *pgd)
114{
115 struct page *next, **pprev, *page = virt_to_page(pgd);
116 next = (struct page *) page->index;
117 pprev = (struct page **) page_private(page);
118 *pprev = next;
119 if (next)
120 set_page_private(next, (unsigned long) pprev);
121}
122
123void pgd_ctor(void *pgd)
124{
125 unsigned long flags;
126
127 if (PTRS_PER_PMD == 1)
128 spin_lock_irqsave(&pgd_lock, flags);
129
130 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
131 swapper_pg_dir + USER_PTRS_PER_PGD,
132 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
133
134 if (PTRS_PER_PMD > 1)
135 return;
136
137 pgd_list_add(pgd);
138 spin_unlock_irqrestore(&pgd_lock, flags);
139 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
140}
141
142/* never called when PTRS_PER_PMD > 1 */
143void pgd_dtor(void *pgd)
144{
145 unsigned long flags; /* can be called from interrupt context */
146
147 spin_lock_irqsave(&pgd_lock, flags);
148 pgd_list_del(pgd);
149 spin_unlock_irqrestore(&pgd_lock, flags);
150}
151
152pgd_t *pgd_alloc(struct mm_struct *mm)
153{
154 return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
155}
156
157void pgd_free(struct mm_struct *mm, pgd_t *pgd)
158{
159 quicklist_free(0, pgd_dtor, pgd);
160}
161
162void __init pgtable_cache_init(void)
163{
164}
165
166void check_pgt_cache(void)
167{
168 quicklist_trim(0, pgd_dtor, 25, 16);
169}