Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the routines for handling the MMU on those |
| 3 | * PowerPC implementations where the MMU is not using the hash |
| 4 | * table, such as 8xx, 4xx, BookE's etc... |
| 5 | * |
| 6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> |
| 7 | * IBM Corp. |
| 8 | * |
| 9 | * Derived from previous arch/powerpc/mm/mmu_context.c |
| 10 | * and arch/powerpc/include/asm/mmu_context.h |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 17 | * TODO: |
| 18 | * |
| 19 | * - The global context lock will not scale very well |
| 20 | * - The maps should be dynamically allocated to allow for processors |
| 21 | * that support more PID bits at runtime |
| 22 | * - Implement flush_tlb_mm() by making the context stale and picking |
| 23 | * a new one |
| 24 | * - More aggressively clear stale map bits and maybe find some way to |
| 25 | * also clear mm->cpu_vm_mask bits when processes are migrated |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 26 | */ |
| 27 | |
Benjamin Herrenschmidt | f1167fb | 2009-11-04 13:39:52 +0000 | [diff] [blame] | 28 | //#define DEBUG_MAP_CONSISTENCY |
| 29 | //#define DEBUG_CLAMP_LAST_CONTEXT 31 |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 30 | //#define DEBUG_HARDER |
| 31 | |
| 32 | /* We don't use DEBUG because it tends to be compiled in always nowadays |
| 33 | * and this would generate way too much output |
| 34 | */ |
| 35 | #ifdef DEBUG_HARDER |
| 36 | #define pr_hard(args...) printk(KERN_DEBUG args) |
| 37 | #define pr_hardcont(args...) printk(KERN_CONT args) |
| 38 | #else |
| 39 | #define pr_hard(args...) do { } while(0) |
| 40 | #define pr_hardcont(args...) do { } while(0) |
| 41 | #endif |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 42 | |
| 43 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 44 | #include <linux/mm.h> |
| 45 | #include <linux/init.h> |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 46 | #include <linux/spinlock.h> |
| 47 | #include <linux/bootmem.h> |
| 48 | #include <linux/notifier.h> |
| 49 | #include <linux/cpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 50 | #include <linux/slab.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 51 | |
| 52 | #include <asm/mmu_context.h> |
| 53 | #include <asm/tlbflush.h> |
| 54 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 55 | static unsigned int first_context, last_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 56 | static unsigned int next_context, nr_free_contexts; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 57 | static unsigned long *context_map; |
| 58 | static unsigned long *stale_map[NR_CPUS]; |
| 59 | static struct mm_struct **context_mm; |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 60 | static DEFINE_RAW_SPINLOCK(context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 61 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 62 | #define CTX_MAP_SIZE \ |
| 63 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) |
| 64 | |
| 65 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 66 | /* Steal a context from a task that has one at the moment. |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 67 | * |
| 68 | * This is used when we are running out of available PID numbers |
| 69 | * on the processors. |
| 70 | * |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 71 | * This isn't an LRU system, it just frees up each context in |
| 72 | * turn (sort-of pseudo-random replacement :). This would be the |
| 73 | * place to implement an LRU scheme if anyone was motivated to do it. |
| 74 | * -- paulus |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 75 | * |
| 76 | * For context stealing, we use a slightly different approach for |
| 77 | * SMP and UP. Basically, the UP one is simpler and doesn't use |
| 78 | * the stale map as we can just flush the local CPU |
| 79 | * -- benh |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 80 | */ |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 81 | #ifdef CONFIG_SMP |
| 82 | static unsigned int steal_context_smp(unsigned int id) |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 83 | { |
| 84 | struct mm_struct *mm; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 85 | unsigned int cpu, max, i; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 86 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 87 | max = last_context - first_context; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 88 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 89 | /* Attempt to free next_context first and then loop until we manage */ |
| 90 | while (max--) { |
| 91 | /* Pick up the victim mm */ |
| 92 | mm = context_mm[id]; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 93 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 94 | /* We have a candidate victim, check if it's active, on SMP |
| 95 | * we cannot steal active contexts |
| 96 | */ |
| 97 | if (mm->context.active) { |
| 98 | id++; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 99 | if (id > last_context) |
| 100 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 101 | continue; |
| 102 | } |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 103 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 104 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 105 | /* Mark this mm has having no context anymore */ |
| 106 | mm->context.id = MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 107 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 108 | /* Mark it stale on all CPUs that used this mm. For threaded |
| 109 | * implementations, we set it on all threads on each core |
| 110 | * represented in the mask. A future implementation will use |
| 111 | * a core map instead but this will do for now. |
| 112 | */ |
| 113 | for_each_cpu(cpu, mm_cpumask(mm)) { |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 114 | for (i = cpu_first_thread_sibling(cpu); |
| 115 | i <= cpu_last_thread_sibling(cpu); i++) |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 116 | __set_bit(id, stale_map[i]); |
| 117 | cpu = i - 1; |
| 118 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 119 | return id; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 120 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 121 | |
| 122 | /* This will happen if you have more CPUs than available contexts, |
| 123 | * all we can do here is wait a bit and try again |
| 124 | */ |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 125 | raw_spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 126 | cpu_relax(); |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 127 | raw_spin_lock(&context_lock); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 128 | |
| 129 | /* This will cause the caller to try again */ |
| 130 | return MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 131 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 132 | #endif /* CONFIG_SMP */ |
| 133 | |
| 134 | /* Note that this will also be called on SMP if all other CPUs are |
| 135 | * offlined, which means that it may be called for cpu != 0. For |
| 136 | * this to work, we somewhat assume that CPUs that are onlined |
| 137 | * come up with a fully clean TLB (or are cleaned when offlined) |
| 138 | */ |
| 139 | static unsigned int steal_context_up(unsigned int id) |
| 140 | { |
| 141 | struct mm_struct *mm; |
| 142 | int cpu = smp_processor_id(); |
| 143 | |
| 144 | /* Pick up the victim mm */ |
| 145 | mm = context_mm[id]; |
| 146 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 147 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 148 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 149 | /* Flush the TLB for that context */ |
| 150 | local_flush_tlb_mm(mm); |
| 151 | |
Hideo Saito | 8e35961 | 2009-05-24 15:33:34 +0000 | [diff] [blame] | 152 | /* Mark this mm has having no context anymore */ |
| 153 | mm->context.id = MMU_NO_CONTEXT; |
| 154 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 155 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
| 156 | __clear_bit(id, stale_map[cpu]); |
| 157 | |
| 158 | return id; |
| 159 | } |
| 160 | |
| 161 | #ifdef DEBUG_MAP_CONSISTENCY |
| 162 | static void context_check_map(void) |
| 163 | { |
| 164 | unsigned int id, nrf, nact; |
| 165 | |
| 166 | nrf = nact = 0; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 167 | for (id = first_context; id <= last_context; id++) { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 168 | int used = test_bit(id, context_map); |
| 169 | if (!used) |
| 170 | nrf++; |
| 171 | if (used != (context_mm[id] != NULL)) |
| 172 | pr_err("MMU: Context %d is %s and MM is %p !\n", |
| 173 | id, used ? "used" : "free", context_mm[id]); |
| 174 | if (context_mm[id] != NULL) |
| 175 | nact += context_mm[id]->context.active; |
| 176 | } |
| 177 | if (nrf != nr_free_contexts) { |
| 178 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", |
| 179 | nr_free_contexts, nrf); |
| 180 | nr_free_contexts = nrf; |
| 181 | } |
| 182 | if (nact > num_online_cpus()) |
| 183 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", |
| 184 | nact, num_online_cpus()); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 185 | if (first_context > 0 && !test_bit(0, context_map)) |
| 186 | pr_err("MMU: Context 0 has been freed !!!\n"); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 187 | } |
| 188 | #else |
| 189 | static void context_check_map(void) { } |
| 190 | #endif |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 191 | |
| 192 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) |
| 193 | { |
Kumar Gala | 67050b5 | 2009-08-04 22:33:32 -0500 | [diff] [blame] | 194 | unsigned int i, id, cpu = smp_processor_id(); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 195 | unsigned long *map; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 196 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 197 | /* No lockless fast path .. yet */ |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 198 | raw_spin_lock(&context_lock); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 199 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 200 | pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", |
| 201 | cpu, next, next->context.active, next->context.id); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 202 | |
| 203 | #ifdef CONFIG_SMP |
| 204 | /* Mark us active and the previous one not anymore */ |
| 205 | next->context.active++; |
| 206 | if (prev) { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 207 | pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 208 | WARN_ON(prev->context.active < 1); |
| 209 | prev->context.active--; |
| 210 | } |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 211 | |
| 212 | again: |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 213 | #endif /* CONFIG_SMP */ |
| 214 | |
| 215 | /* If we already have a valid assigned context, skip all that */ |
| 216 | id = next->context.id; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 217 | if (likely(id != MMU_NO_CONTEXT)) { |
| 218 | #ifdef DEBUG_MAP_CONSISTENCY |
| 219 | if (context_mm[id] != next) |
| 220 | pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", |
| 221 | next, id, id, context_mm[id]); |
| 222 | #endif |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 223 | goto ctxt_ok; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 224 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 225 | |
| 226 | /* We really don't have a context, let's try to acquire one */ |
| 227 | id = next_context; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 228 | if (id > last_context) |
| 229 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 230 | map = context_map; |
| 231 | |
| 232 | /* No more free contexts, let's try to steal one */ |
| 233 | if (nr_free_contexts == 0) { |
| 234 | #ifdef CONFIG_SMP |
| 235 | if (num_online_cpus() > 1) { |
| 236 | id = steal_context_smp(id); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 237 | if (id == MMU_NO_CONTEXT) |
| 238 | goto again; |
Kumar Gala | 5156ddc | 2009-07-29 23:04:25 -0500 | [diff] [blame] | 239 | goto stolen; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 240 | } |
| 241 | #endif /* CONFIG_SMP */ |
| 242 | id = steal_context_up(id); |
| 243 | goto stolen; |
| 244 | } |
| 245 | nr_free_contexts--; |
| 246 | |
| 247 | /* We know there's at least one free context, try to find it */ |
| 248 | while (__test_and_set_bit(id, map)) { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 249 | id = find_next_zero_bit(map, last_context+1, id); |
| 250 | if (id > last_context) |
| 251 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 252 | } |
| 253 | stolen: |
| 254 | next_context = id + 1; |
| 255 | context_mm[id] = next; |
| 256 | next->context.id = id; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 257 | pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 258 | |
| 259 | context_check_map(); |
| 260 | ctxt_ok: |
| 261 | |
| 262 | /* If that context got marked stale on this CPU, then flush the |
| 263 | * local TLB for it and unmark it before we use it |
| 264 | */ |
| 265 | if (test_bit(id, stale_map[cpu])) { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 266 | pr_hardcont(" | stale flush %d [%d..%d]", |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 267 | id, cpu_first_thread_sibling(cpu), |
| 268 | cpu_last_thread_sibling(cpu)); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 269 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 270 | local_flush_tlb_mm(next); |
| 271 | |
| 272 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 273 | for (i = cpu_first_thread_sibling(cpu); |
| 274 | i <= cpu_last_thread_sibling(cpu); i++) { |
Kumar Gala | 67050b5 | 2009-08-04 22:33:32 -0500 | [diff] [blame] | 275 | __clear_bit(id, stale_map[i]); |
| 276 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | /* Flick the MMU and release lock */ |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 280 | pr_hardcont(" -> %d\n", id); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 281 | set_context(id, next->pgd); |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 282 | raw_spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Set up the context for a new address space. |
| 287 | */ |
| 288 | int init_new_context(struct task_struct *t, struct mm_struct *mm) |
| 289 | { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 290 | pr_hard("initing context for mm @%p\n", mm); |
| 291 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 292 | mm->context.id = MMU_NO_CONTEXT; |
| 293 | mm->context.active = 0; |
| 294 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 295 | #ifdef CONFIG_PPC_MM_SLICES |
| 296 | if (slice_mm_new_context(mm)) |
| 297 | slice_set_user_psize(mm, mmu_virtual_psize); |
| 298 | #endif |
| 299 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | /* |
| 304 | * We're finished using the context for an address space. |
| 305 | */ |
| 306 | void destroy_context(struct mm_struct *mm) |
| 307 | { |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 308 | unsigned long flags; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 309 | unsigned int id; |
| 310 | |
| 311 | if (mm->context.id == MMU_NO_CONTEXT) |
| 312 | return; |
| 313 | |
| 314 | WARN_ON(mm->context.active != 0); |
| 315 | |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 316 | raw_spin_lock_irqsave(&context_lock, flags); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 317 | id = mm->context.id; |
| 318 | if (id != MMU_NO_CONTEXT) { |
| 319 | __clear_bit(id, context_map); |
| 320 | mm->context.id = MMU_NO_CONTEXT; |
| 321 | #ifdef DEBUG_MAP_CONSISTENCY |
| 322 | mm->context.active = 0; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 323 | #endif |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 324 | context_mm[id] = NULL; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 325 | nr_free_contexts++; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 326 | } |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 327 | raw_spin_unlock_irqrestore(&context_lock, flags); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 328 | } |
| 329 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 330 | #ifdef CONFIG_SMP |
| 331 | |
| 332 | static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, |
| 333 | unsigned long action, void *hcpu) |
| 334 | { |
| 335 | unsigned int cpu = (unsigned int)(long)hcpu; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 336 | #ifdef CONFIG_HOTPLUG_CPU |
| 337 | struct task_struct *p; |
| 338 | #endif |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 339 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
| 340 | * around forever |
| 341 | */ |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 342 | if (cpu == boot_cpuid) |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 343 | return NOTIFY_OK; |
| 344 | |
| 345 | switch (action) { |
Michael Ellerman | f5be2dc | 2011-04-04 20:57:27 +0000 | [diff] [blame] | 346 | case CPU_UP_PREPARE: |
| 347 | case CPU_UP_PREPARE_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 348 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 349 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
| 350 | break; |
| 351 | #ifdef CONFIG_HOTPLUG_CPU |
Michael Ellerman | f5be2dc | 2011-04-04 20:57:27 +0000 | [diff] [blame] | 352 | case CPU_UP_CANCELED: |
| 353 | case CPU_UP_CANCELED_FROZEN: |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 354 | case CPU_DEAD: |
| 355 | case CPU_DEAD_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 356 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 357 | kfree(stale_map[cpu]); |
| 358 | stale_map[cpu] = NULL; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 359 | |
| 360 | /* We also clear the cpu_vm_mask bits of CPUs going away */ |
| 361 | read_lock(&tasklist_lock); |
| 362 | for_each_process(p) { |
| 363 | if (p->mm) |
Yang Li | f04b10c | 2009-12-14 03:01:49 +0000 | [diff] [blame] | 364 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 365 | } |
| 366 | read_unlock(&tasklist_lock); |
| 367 | break; |
| 368 | #endif /* CONFIG_HOTPLUG_CPU */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 369 | } |
| 370 | return NOTIFY_OK; |
| 371 | } |
| 372 | |
| 373 | static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { |
| 374 | .notifier_call = mmu_context_cpu_notify, |
| 375 | }; |
| 376 | |
| 377 | #endif /* CONFIG_SMP */ |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 378 | |
| 379 | /* |
| 380 | * Initialize the context management stuff. |
| 381 | */ |
| 382 | void __init mmu_context_init(void) |
| 383 | { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 384 | /* Mark init_mm as being active on all possible CPUs since |
| 385 | * we'll get called with prev == init_mm the first time |
| 386 | * we schedule on a given CPU |
| 387 | */ |
| 388 | init_mm.context.active = NR_CPUS; |
| 389 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 390 | /* |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 391 | * The MPC8xx has only 16 contexts. We rotate through them on each |
| 392 | * task switch. A better way would be to keep track of tasks that |
| 393 | * own contexts, and implement an LRU usage. That way very active |
| 394 | * tasks don't always have to pay the TLB reload overhead. The |
| 395 | * kernel pages are mapped shared, so the kernel can run on behalf |
| 396 | * of any task that makes a kernel entry. Shared does not mean they |
| 397 | * are not protected, just that the ASID comparison is not performed. |
| 398 | * -- Dan |
| 399 | * |
| 400 | * The IBM4xx has 256 contexts, so we can just rotate through these |
| 401 | * as a way of "switching" contexts. If the TID of the TLB is zero, |
| 402 | * the PID/TID comparison is disabled, so we can use a TID of zero |
| 403 | * to represent all kernel pages as shared among all contexts. |
| 404 | * -- Dan |
Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 405 | * |
| 406 | * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We |
| 407 | * should normally never have to steal though the facility is |
| 408 | * present if needed. |
| 409 | * -- BenH |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 410 | */ |
| 411 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { |
| 412 | first_context = 0; |
| 413 | last_context = 15; |
Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 414 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { |
| 415 | first_context = 1; |
| 416 | last_context = 65535; |
Benjamin Herrenschmidt | bd49178 | 2011-04-14 22:32:02 +0000 | [diff] [blame] | 417 | } else |
| 418 | #ifdef CONFIG_PPC_BOOK3E_MMU |
| 419 | if (mmu_has_feature(MMU_FTR_TYPE_3E)) { |
| 420 | u32 mmucfg = mfspr(SPRN_MMUCFG); |
| 421 | u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK) |
| 422 | >> MMUCFG_PIDSIZE_SHIFT; |
| 423 | first_context = 1; |
| 424 | last_context = (1UL << (pid_bits + 1)) - 1; |
| 425 | } else |
| 426 | #endif |
| 427 | { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 428 | first_context = 1; |
| 429 | last_context = 255; |
| 430 | } |
| 431 | |
| 432 | #ifdef DEBUG_CLAMP_LAST_CONTEXT |
| 433 | last_context = DEBUG_CLAMP_LAST_CONTEXT; |
| 434 | #endif |
| 435 | /* |
| 436 | * Allocate the maps used by context management |
| 437 | */ |
| 438 | context_map = alloc_bootmem(CTX_MAP_SIZE); |
| 439 | context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 440 | #ifndef CONFIG_SMP |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 441 | stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 442 | #else |
| 443 | stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 444 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 445 | register_cpu_notifier(&mmu_context_cpu_nb); |
| 446 | #endif |
| 447 | |
| 448 | printk(KERN_INFO |
Benjamin Herrenschmidt | ff7c660 | 2009-03-19 19:34:13 +0000 | [diff] [blame] | 449 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 450 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
| 451 | last_context - first_context + 1); |
| 452 | |
| 453 | /* |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 454 | * Some processors have too few contexts to reserve one for |
| 455 | * init_mm, and require using context 0 for a normal task. |
| 456 | * Other processors reserve the use of context zero for the kernel. |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 457 | * This code assumes first_context < 32. |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 458 | */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 459 | context_map[0] = (1 << first_context) - 1; |
| 460 | next_context = first_context; |
| 461 | nr_free_contexts = last_context - first_context + 1; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 462 | } |
| 463 | |