Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the routines for handling the MMU on those |
| 3 | * PowerPC implementations where the MMU is not using the hash |
| 4 | * table, such as 8xx, 4xx, BookE's etc... |
| 5 | * |
| 6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> |
| 7 | * IBM Corp. |
| 8 | * |
| 9 | * Derived from previous arch/powerpc/mm/mmu_context.c |
| 10 | * and arch/powerpc/include/asm/mmu_context.h |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 17 | * TODO: |
| 18 | * |
| 19 | * - The global context lock will not scale very well |
| 20 | * - The maps should be dynamically allocated to allow for processors |
| 21 | * that support more PID bits at runtime |
| 22 | * - Implement flush_tlb_mm() by making the context stale and picking |
| 23 | * a new one |
| 24 | * - More aggressively clear stale map bits and maybe find some way to |
| 25 | * also clear mm->cpu_vm_mask bits when processes are migrated |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 26 | */ |
| 27 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 28 | #undef DEBUG |
| 29 | #define DEBUG_STEAL_ONLY |
| 30 | #undef DEBUG_MAP_CONSISTENCY |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 31 | /*#define DEBUG_CLAMP_LAST_CONTEXT 15 */ |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 32 | |
| 33 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 34 | #include <linux/mm.h> |
| 35 | #include <linux/init.h> |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 36 | #include <linux/spinlock.h> |
| 37 | #include <linux/bootmem.h> |
| 38 | #include <linux/notifier.h> |
| 39 | #include <linux/cpu.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 40 | |
| 41 | #include <asm/mmu_context.h> |
| 42 | #include <asm/tlbflush.h> |
| 43 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 44 | static unsigned int first_context, last_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 45 | static unsigned int next_context, nr_free_contexts; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 46 | static unsigned long *context_map; |
| 47 | static unsigned long *stale_map[NR_CPUS]; |
| 48 | static struct mm_struct **context_mm; |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 49 | static DEFINE_SPINLOCK(context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 50 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 51 | #define CTX_MAP_SIZE \ |
| 52 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) |
| 53 | |
| 54 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 55 | /* Steal a context from a task that has one at the moment. |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 56 | * |
| 57 | * This is used when we are running out of available PID numbers |
| 58 | * on the processors. |
| 59 | * |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 60 | * This isn't an LRU system, it just frees up each context in |
| 61 | * turn (sort-of pseudo-random replacement :). This would be the |
| 62 | * place to implement an LRU scheme if anyone was motivated to do it. |
| 63 | * -- paulus |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 64 | * |
| 65 | * For context stealing, we use a slightly different approach for |
| 66 | * SMP and UP. Basically, the UP one is simpler and doesn't use |
| 67 | * the stale map as we can just flush the local CPU |
| 68 | * -- benh |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 69 | */ |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 70 | #ifdef CONFIG_SMP |
| 71 | static unsigned int steal_context_smp(unsigned int id) |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 72 | { |
| 73 | struct mm_struct *mm; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 74 | unsigned int cpu, max; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 75 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 76 | max = last_context - first_context; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 77 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 78 | /* Attempt to free next_context first and then loop until we manage */ |
| 79 | while (max--) { |
| 80 | /* Pick up the victim mm */ |
| 81 | mm = context_mm[id]; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 82 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 83 | /* We have a candidate victim, check if it's active, on SMP |
| 84 | * we cannot steal active contexts |
| 85 | */ |
| 86 | if (mm->context.active) { |
| 87 | id++; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 88 | if (id > last_context) |
| 89 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 90 | continue; |
| 91 | } |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 92 | pr_devel("[%d] steal context %d from mm @%p\n", |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 93 | smp_processor_id(), id, mm); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 94 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 95 | /* Mark this mm has having no context anymore */ |
| 96 | mm->context.id = MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 97 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 98 | /* Mark it stale on all CPUs that used this mm */ |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 99 | for_each_cpu(cpu, mm_cpumask(mm)) |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 100 | __set_bit(id, stale_map[cpu]); |
| 101 | return id; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 102 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 103 | |
| 104 | /* This will happen if you have more CPUs than available contexts, |
| 105 | * all we can do here is wait a bit and try again |
| 106 | */ |
| 107 | spin_unlock(&context_lock); |
| 108 | cpu_relax(); |
| 109 | spin_lock(&context_lock); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 110 | |
| 111 | /* This will cause the caller to try again */ |
| 112 | return MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 113 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 114 | #endif /* CONFIG_SMP */ |
| 115 | |
| 116 | /* Note that this will also be called on SMP if all other CPUs are |
| 117 | * offlined, which means that it may be called for cpu != 0. For |
| 118 | * this to work, we somewhat assume that CPUs that are onlined |
| 119 | * come up with a fully clean TLB (or are cleaned when offlined) |
| 120 | */ |
| 121 | static unsigned int steal_context_up(unsigned int id) |
| 122 | { |
| 123 | struct mm_struct *mm; |
| 124 | int cpu = smp_processor_id(); |
| 125 | |
| 126 | /* Pick up the victim mm */ |
| 127 | mm = context_mm[id]; |
| 128 | |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 129 | pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 130 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 131 | /* Flush the TLB for that context */ |
| 132 | local_flush_tlb_mm(mm); |
| 133 | |
Hideo Saito | 8e35961 | 2009-05-24 15:33:34 +0000 | [diff] [blame] | 134 | /* Mark this mm has having no context anymore */ |
| 135 | mm->context.id = MMU_NO_CONTEXT; |
| 136 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 137 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
| 138 | __clear_bit(id, stale_map[cpu]); |
| 139 | |
| 140 | return id; |
| 141 | } |
| 142 | |
| 143 | #ifdef DEBUG_MAP_CONSISTENCY |
| 144 | static void context_check_map(void) |
| 145 | { |
| 146 | unsigned int id, nrf, nact; |
| 147 | |
| 148 | nrf = nact = 0; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 149 | for (id = first_context; id <= last_context; id++) { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 150 | int used = test_bit(id, context_map); |
| 151 | if (!used) |
| 152 | nrf++; |
| 153 | if (used != (context_mm[id] != NULL)) |
| 154 | pr_err("MMU: Context %d is %s and MM is %p !\n", |
| 155 | id, used ? "used" : "free", context_mm[id]); |
| 156 | if (context_mm[id] != NULL) |
| 157 | nact += context_mm[id]->context.active; |
| 158 | } |
| 159 | if (nrf != nr_free_contexts) { |
| 160 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", |
| 161 | nr_free_contexts, nrf); |
| 162 | nr_free_contexts = nrf; |
| 163 | } |
| 164 | if (nact > num_online_cpus()) |
| 165 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", |
| 166 | nact, num_online_cpus()); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 167 | if (first_context > 0 && !test_bit(0, context_map)) |
| 168 | pr_err("MMU: Context 0 has been freed !!!\n"); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 169 | } |
| 170 | #else |
| 171 | static void context_check_map(void) { } |
| 172 | #endif |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 173 | |
| 174 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) |
| 175 | { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 176 | unsigned int id, cpu = smp_processor_id(); |
| 177 | unsigned long *map; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 178 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 179 | /* No lockless fast path .. yet */ |
| 180 | spin_lock(&context_lock); |
| 181 | |
| 182 | #ifndef DEBUG_STEAL_ONLY |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 183 | pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 184 | cpu, next, next->context.active, next->context.id); |
| 185 | #endif |
| 186 | |
| 187 | #ifdef CONFIG_SMP |
| 188 | /* Mark us active and the previous one not anymore */ |
| 189 | next->context.active++; |
| 190 | if (prev) { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 191 | #ifndef DEBUG_STEAL_ONLY |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 192 | pr_devel(" old context %p active was: %d\n", |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 193 | prev, prev->context.active); |
| 194 | #endif |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 195 | WARN_ON(prev->context.active < 1); |
| 196 | prev->context.active--; |
| 197 | } |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 198 | |
| 199 | again: |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 200 | #endif /* CONFIG_SMP */ |
| 201 | |
| 202 | /* If we already have a valid assigned context, skip all that */ |
| 203 | id = next->context.id; |
| 204 | if (likely(id != MMU_NO_CONTEXT)) |
| 205 | goto ctxt_ok; |
| 206 | |
| 207 | /* We really don't have a context, let's try to acquire one */ |
| 208 | id = next_context; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 209 | if (id > last_context) |
| 210 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 211 | map = context_map; |
| 212 | |
| 213 | /* No more free contexts, let's try to steal one */ |
| 214 | if (nr_free_contexts == 0) { |
| 215 | #ifdef CONFIG_SMP |
| 216 | if (num_online_cpus() > 1) { |
| 217 | id = steal_context_smp(id); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 218 | if (id == MMU_NO_CONTEXT) |
| 219 | goto again; |
Kumar Gala | 5156ddc | 2009-07-29 23:04:25 -0500 | [diff] [blame] | 220 | goto stolen; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 221 | } |
| 222 | #endif /* CONFIG_SMP */ |
| 223 | id = steal_context_up(id); |
| 224 | goto stolen; |
| 225 | } |
| 226 | nr_free_contexts--; |
| 227 | |
| 228 | /* We know there's at least one free context, try to find it */ |
| 229 | while (__test_and_set_bit(id, map)) { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 230 | id = find_next_zero_bit(map, last_context+1, id); |
| 231 | if (id > last_context) |
| 232 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 233 | } |
| 234 | stolen: |
| 235 | next_context = id + 1; |
| 236 | context_mm[id] = next; |
| 237 | next->context.id = id; |
| 238 | |
| 239 | #ifndef DEBUG_STEAL_ONLY |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 240 | pr_devel("[%d] picked up new id %d, nrf is now %d\n", |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 241 | cpu, id, nr_free_contexts); |
| 242 | #endif |
| 243 | |
| 244 | context_check_map(); |
| 245 | ctxt_ok: |
| 246 | |
| 247 | /* If that context got marked stale on this CPU, then flush the |
| 248 | * local TLB for it and unmark it before we use it |
| 249 | */ |
| 250 | if (test_bit(id, stale_map[cpu])) { |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 251 | pr_devel("[%d] flushing stale context %d for mm @%p !\n", |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 252 | cpu, id, next); |
| 253 | local_flush_tlb_mm(next); |
| 254 | |
| 255 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
| 256 | __clear_bit(id, stale_map[cpu]); |
| 257 | } |
| 258 | |
| 259 | /* Flick the MMU and release lock */ |
| 260 | set_context(id, next->pgd); |
| 261 | spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | /* |
| 265 | * Set up the context for a new address space. |
| 266 | */ |
| 267 | int init_new_context(struct task_struct *t, struct mm_struct *mm) |
| 268 | { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 269 | mm->context.id = MMU_NO_CONTEXT; |
| 270 | mm->context.active = 0; |
| 271 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 272 | return 0; |
| 273 | } |
| 274 | |
| 275 | /* |
| 276 | * We're finished using the context for an address space. |
| 277 | */ |
| 278 | void destroy_context(struct mm_struct *mm) |
| 279 | { |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 280 | unsigned long flags; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 281 | unsigned int id; |
| 282 | |
| 283 | if (mm->context.id == MMU_NO_CONTEXT) |
| 284 | return; |
| 285 | |
| 286 | WARN_ON(mm->context.active != 0); |
| 287 | |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 288 | spin_lock_irqsave(&context_lock, flags); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 289 | id = mm->context.id; |
| 290 | if (id != MMU_NO_CONTEXT) { |
| 291 | __clear_bit(id, context_map); |
| 292 | mm->context.id = MMU_NO_CONTEXT; |
| 293 | #ifdef DEBUG_MAP_CONSISTENCY |
| 294 | mm->context.active = 0; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 295 | #endif |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 296 | context_mm[id] = NULL; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 297 | nr_free_contexts++; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 298 | } |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 299 | spin_unlock_irqrestore(&context_lock, flags); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 300 | } |
| 301 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 302 | #ifdef CONFIG_SMP |
| 303 | |
| 304 | static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, |
| 305 | unsigned long action, void *hcpu) |
| 306 | { |
| 307 | unsigned int cpu = (unsigned int)(long)hcpu; |
| 308 | |
| 309 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
| 310 | * around forever |
| 311 | */ |
| 312 | if (cpu == 0) |
| 313 | return NOTIFY_OK; |
| 314 | |
| 315 | switch (action) { |
| 316 | case CPU_ONLINE: |
| 317 | case CPU_ONLINE_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 318 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 319 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
| 320 | break; |
| 321 | #ifdef CONFIG_HOTPLUG_CPU |
| 322 | case CPU_DEAD: |
| 323 | case CPU_DEAD_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 324 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 325 | kfree(stale_map[cpu]); |
| 326 | stale_map[cpu] = NULL; |
| 327 | break; |
| 328 | #endif |
| 329 | } |
| 330 | return NOTIFY_OK; |
| 331 | } |
| 332 | |
| 333 | static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { |
| 334 | .notifier_call = mmu_context_cpu_notify, |
| 335 | }; |
| 336 | |
| 337 | #endif /* CONFIG_SMP */ |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 338 | |
| 339 | /* |
| 340 | * Initialize the context management stuff. |
| 341 | */ |
| 342 | void __init mmu_context_init(void) |
| 343 | { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 344 | /* Mark init_mm as being active on all possible CPUs since |
| 345 | * we'll get called with prev == init_mm the first time |
| 346 | * we schedule on a given CPU |
| 347 | */ |
| 348 | init_mm.context.active = NR_CPUS; |
| 349 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 350 | /* |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 351 | * The MPC8xx has only 16 contexts. We rotate through them on each |
| 352 | * task switch. A better way would be to keep track of tasks that |
| 353 | * own contexts, and implement an LRU usage. That way very active |
| 354 | * tasks don't always have to pay the TLB reload overhead. The |
| 355 | * kernel pages are mapped shared, so the kernel can run on behalf |
| 356 | * of any task that makes a kernel entry. Shared does not mean they |
| 357 | * are not protected, just that the ASID comparison is not performed. |
| 358 | * -- Dan |
| 359 | * |
| 360 | * The IBM4xx has 256 contexts, so we can just rotate through these |
| 361 | * as a way of "switching" contexts. If the TID of the TLB is zero, |
| 362 | * the PID/TID comparison is disabled, so we can use a TID of zero |
| 363 | * to represent all kernel pages as shared among all contexts. |
| 364 | * -- Dan |
| 365 | */ |
| 366 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { |
| 367 | first_context = 0; |
| 368 | last_context = 15; |
| 369 | } else { |
| 370 | first_context = 1; |
| 371 | last_context = 255; |
| 372 | } |
| 373 | |
| 374 | #ifdef DEBUG_CLAMP_LAST_CONTEXT |
| 375 | last_context = DEBUG_CLAMP_LAST_CONTEXT; |
| 376 | #endif |
| 377 | /* |
| 378 | * Allocate the maps used by context management |
| 379 | */ |
| 380 | context_map = alloc_bootmem(CTX_MAP_SIZE); |
| 381 | context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); |
| 382 | stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); |
| 383 | |
| 384 | #ifdef CONFIG_SMP |
| 385 | register_cpu_notifier(&mmu_context_cpu_nb); |
| 386 | #endif |
| 387 | |
| 388 | printk(KERN_INFO |
Benjamin Herrenschmidt | ff7c660 | 2009-03-19 19:34:13 +0000 | [diff] [blame] | 389 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 390 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
| 391 | last_context - first_context + 1); |
| 392 | |
| 393 | /* |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 394 | * Some processors have too few contexts to reserve one for |
| 395 | * init_mm, and require using context 0 for a normal task. |
| 396 | * Other processors reserve the use of context zero for the kernel. |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 397 | * This code assumes first_context < 32. |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 398 | */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 399 | context_map[0] = (1 << first_context) - 1; |
| 400 | next_context = first_context; |
| 401 | nr_free_contexts = last_context - first_context + 1; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 402 | } |
| 403 | |