blob: babd5a97cdcb6fd9f63dd075b554693b17d2fd7e [file] [log] [blame]
Greg Ungerer066bf872011-10-18 16:24:19 +10001/*
2 * Based upon linux/arch/m68k/mm/sun3mmu.c
3 * Based upon linux/arch/ppc/mm/mmu_context.c
4 *
5 * Implementations of mm routines specific to the Coldfire MMU.
6 *
7 * Copyright (c) 2008 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/bootmem.h>
16
17#include <asm/setup.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/mmu_context.h>
21#include <asm/mcf_pgalloc.h>
22#include <asm/tlbflush.h>
23
24#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
25
26mm_context_t next_mmu_context;
27unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
28atomic_t nr_free_contexts;
29struct mm_struct *context_mm[LAST_CONTEXT+1];
30extern unsigned long num_pages;
31
32void free_initmem(void)
33{
34}
35
36/*
37 * ColdFire paging_init derived from sun3.
38 */
39void __init paging_init(void)
40{
41 pgd_t *pg_dir;
42 pte_t *pg_table;
43 unsigned long address, size;
44 unsigned long next_pgtable, bootmem_end;
45 unsigned long zones_size[MAX_NR_ZONES];
46 enum zone_type zone;
47 int i;
48
49 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
50 memset((void *) empty_zero_page, 0, PAGE_SIZE);
51
52 pg_dir = swapper_pg_dir;
53 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
54
55 size = num_pages * sizeof(pte_t);
56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57 next_pgtable = (unsigned long) alloc_bootmem_pages(size);
58
59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
61
62 address = PAGE_OFFSET;
63 while (address < (unsigned long)high_memory) {
64 pg_table = (pte_t *) next_pgtable;
65 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
66 pgd_val(*pg_dir) = (unsigned long) pg_table;
67 pg_dir++;
68
69 /* now change pg_table to kernel virtual addresses */
70 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
71 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
72 if (address >= (unsigned long) high_memory)
73 pte_val(pte) = 0;
74
75 set_pte(pg_table, pte);
76 address += PAGE_SIZE;
77 }
78 }
79
80 current->mm = NULL;
81
82 for (zone = 0; zone < MAX_NR_ZONES; zone++)
83 zones_size[zone] = 0x0;
84 zones_size[ZONE_DMA] = num_pages;
85 free_area_init(zones_size);
86}
87
88int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
89{
90 unsigned long flags, mmuar;
91 struct mm_struct *mm;
92 pgd_t *pgd;
93 pmd_t *pmd;
94 pte_t *pte;
95 int asid;
96
97 local_irq_save(flags);
98
99 mmuar = (dtlb) ? mmu_read(MMUAR) :
100 regs->pc + (extension_word * sizeof(long));
101
102 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103 if (!mm) {
104 local_irq_restore(flags);
105 return -1;
106 }
107
108 pgd = pgd_offset(mm, mmuar);
109 if (pgd_none(*pgd)) {
110 local_irq_restore(flags);
111 return -1;
112 }
113
114 pmd = pmd_offset(pgd, mmuar);
115 if (pmd_none(*pmd)) {
116 local_irq_restore(flags);
117 return -1;
118 }
119
120 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
121 : pte_offset_map(pmd, mmuar);
122 if (pte_none(*pte) || !pte_present(*pte)) {
123 local_irq_restore(flags);
124 return -1;
125 }
126
127 if (write) {
128 if (!pte_write(*pte)) {
129 local_irq_restore(flags);
130 return -1;
131 }
132 set_pte(pte, pte_mkdirty(*pte));
133 }
134
135 set_pte(pte, pte_mkyoung(*pte));
136 asid = mm->context & 0xff;
137 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
138 set_pte(pte, pte_wrprotect(*pte));
139
140 mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
141 (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
142 >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
143
144 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
145 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
146
147 if (dtlb)
148 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
149 else
150 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
151
152 local_irq_restore(flags);
153 return 0;
154}
155
156/*
157 * Initialize the context management stuff.
158 * The following was taken from arch/ppc/mmu_context.c
159 */
160void __init mmu_context_init(void)
161{
162 /*
163 * Some processors have too few contexts to reserve one for
164 * init_mm, and require using context 0 for a normal task.
165 * Other processors reserve the use of context zero for the kernel.
166 * This code assumes FIRST_CONTEXT < 32.
167 */
168 context_map[0] = (1 << FIRST_CONTEXT) - 1;
169 next_mmu_context = FIRST_CONTEXT;
170 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
171}
172
173/*
174 * Steal a context from a task that has one at the moment.
175 * This is only used on 8xx and 4xx and we presently assume that
176 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
177 * whether the MM we steal is in use.
178 * We also assume that this is only used on systems that don't
179 * use an MMU hash table - this is true for 8xx and 4xx.
180 * This isn't an LRU system, it just frees up each context in
181 * turn (sort-of pseudo-random replacement :). This would be the
182 * place to implement an LRU scheme if anyone was motivated to do it.
183 * -- paulus
184 */
185void steal_context(void)
186{
187 struct mm_struct *mm;
188 /*
189 * free up context `next_mmu_context'
190 * if we shouldn't free context 0, don't...
191 */
192 if (next_mmu_context < FIRST_CONTEXT)
193 next_mmu_context = FIRST_CONTEXT;
194 mm = context_mm[next_mmu_context];
195 flush_tlb_mm(mm);
196 destroy_context(mm);
197}
198