Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the routines for handling the MMU on those |
| 3 | * PowerPC implementations where the MMU is not using the hash |
| 4 | * table, such as 8xx, 4xx, BookE's etc... |
| 5 | * |
| 6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> |
| 7 | * IBM Corp. |
| 8 | * |
| 9 | * Derived from previous arch/powerpc/mm/mmu_context.c |
| 10 | * and arch/powerpc/include/asm/mmu_context.h |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 17 | * TODO: |
| 18 | * |
| 19 | * - The global context lock will not scale very well |
| 20 | * - The maps should be dynamically allocated to allow for processors |
| 21 | * that support more PID bits at runtime |
| 22 | * - Implement flush_tlb_mm() by making the context stale and picking |
| 23 | * a new one |
| 24 | * - More aggressively clear stale map bits and maybe find some way to |
| 25 | * also clear mm->cpu_vm_mask bits when processes are migrated |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 26 | */ |
| 27 | |
Benjamin Herrenschmidt | f1167fb | 2009-11-04 13:39:52 +0000 | [diff] [blame] | 28 | //#define DEBUG_MAP_CONSISTENCY |
| 29 | //#define DEBUG_CLAMP_LAST_CONTEXT 31 |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 30 | //#define DEBUG_HARDER |
| 31 | |
| 32 | /* We don't use DEBUG because it tends to be compiled in always nowadays |
| 33 | * and this would generate way too much output |
| 34 | */ |
| 35 | #ifdef DEBUG_HARDER |
| 36 | #define pr_hard(args...) printk(KERN_DEBUG args) |
| 37 | #define pr_hardcont(args...) printk(KERN_CONT args) |
| 38 | #else |
| 39 | #define pr_hard(args...) do { } while(0) |
| 40 | #define pr_hardcont(args...) do { } while(0) |
| 41 | #endif |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 42 | |
| 43 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 44 | #include <linux/mm.h> |
| 45 | #include <linux/init.h> |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 46 | #include <linux/spinlock.h> |
| 47 | #include <linux/bootmem.h> |
| 48 | #include <linux/notifier.h> |
| 49 | #include <linux/cpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 50 | #include <linux/slab.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 51 | |
| 52 | #include <asm/mmu_context.h> |
| 53 | #include <asm/tlbflush.h> |
| 54 | |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 55 | #include "mmu_decl.h" |
| 56 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 57 | static unsigned int first_context, last_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 58 | static unsigned int next_context, nr_free_contexts; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 59 | static unsigned long *context_map; |
| 60 | static unsigned long *stale_map[NR_CPUS]; |
| 61 | static struct mm_struct **context_mm; |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 62 | static DEFINE_RAW_SPINLOCK(context_lock); |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 63 | static bool no_selective_tlbil; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 64 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 65 | #define CTX_MAP_SIZE \ |
| 66 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) |
| 67 | |
| 68 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 69 | /* Steal a context from a task that has one at the moment. |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 70 | * |
| 71 | * This is used when we are running out of available PID numbers |
| 72 | * on the processors. |
| 73 | * |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 74 | * This isn't an LRU system, it just frees up each context in |
| 75 | * turn (sort-of pseudo-random replacement :). This would be the |
| 76 | * place to implement an LRU scheme if anyone was motivated to do it. |
| 77 | * -- paulus |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 78 | * |
| 79 | * For context stealing, we use a slightly different approach for |
| 80 | * SMP and UP. Basically, the UP one is simpler and doesn't use |
| 81 | * the stale map as we can just flush the local CPU |
| 82 | * -- benh |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 83 | */ |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 84 | #ifdef CONFIG_SMP |
| 85 | static unsigned int steal_context_smp(unsigned int id) |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 86 | { |
| 87 | struct mm_struct *mm; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 88 | unsigned int cpu, max, i; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 89 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 90 | max = last_context - first_context; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 91 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 92 | /* Attempt to free next_context first and then loop until we manage */ |
| 93 | while (max--) { |
| 94 | /* Pick up the victim mm */ |
| 95 | mm = context_mm[id]; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 96 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 97 | /* We have a candidate victim, check if it's active, on SMP |
| 98 | * we cannot steal active contexts |
| 99 | */ |
| 100 | if (mm->context.active) { |
| 101 | id++; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 102 | if (id > last_context) |
| 103 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 104 | continue; |
| 105 | } |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 106 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 107 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 108 | /* Mark this mm has having no context anymore */ |
| 109 | mm->context.id = MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 110 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 111 | /* Mark it stale on all CPUs that used this mm. For threaded |
| 112 | * implementations, we set it on all threads on each core |
| 113 | * represented in the mask. A future implementation will use |
| 114 | * a core map instead but this will do for now. |
| 115 | */ |
| 116 | for_each_cpu(cpu, mm_cpumask(mm)) { |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 117 | for (i = cpu_first_thread_sibling(cpu); |
Scott Wood | 39a421f | 2013-03-20 19:06:12 -0500 | [diff] [blame] | 118 | i <= cpu_last_thread_sibling(cpu); i++) { |
| 119 | if (stale_map[i]) |
| 120 | __set_bit(id, stale_map[i]); |
| 121 | } |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 122 | cpu = i - 1; |
| 123 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 124 | return id; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 125 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 126 | |
| 127 | /* This will happen if you have more CPUs than available contexts, |
| 128 | * all we can do here is wait a bit and try again |
| 129 | */ |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 130 | raw_spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 131 | cpu_relax(); |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 132 | raw_spin_lock(&context_lock); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 133 | |
| 134 | /* This will cause the caller to try again */ |
| 135 | return MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 136 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 137 | #endif /* CONFIG_SMP */ |
| 138 | |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 139 | static unsigned int steal_all_contexts(void) |
| 140 | { |
| 141 | struct mm_struct *mm; |
| 142 | int cpu = smp_processor_id(); |
| 143 | unsigned int id; |
| 144 | |
| 145 | for (id = first_context; id <= last_context; id++) { |
| 146 | /* Pick up the victim mm */ |
| 147 | mm = context_mm[id]; |
| 148 | |
| 149 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
| 150 | |
| 151 | /* Mark this mm as having no context anymore */ |
| 152 | mm->context.id = MMU_NO_CONTEXT; |
| 153 | if (id != first_context) { |
| 154 | context_mm[id] = NULL; |
| 155 | __clear_bit(id, context_map); |
| 156 | #ifdef DEBUG_MAP_CONSISTENCY |
| 157 | mm->context.active = 0; |
| 158 | #endif |
| 159 | } |
| 160 | __clear_bit(id, stale_map[cpu]); |
| 161 | } |
| 162 | |
| 163 | /* Flush the TLB for all contexts (not to be used on SMP) */ |
| 164 | _tlbil_all(); |
| 165 | |
| 166 | nr_free_contexts = last_context - first_context; |
| 167 | |
| 168 | return first_context; |
| 169 | } |
| 170 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 171 | /* Note that this will also be called on SMP if all other CPUs are |
| 172 | * offlined, which means that it may be called for cpu != 0. For |
| 173 | * this to work, we somewhat assume that CPUs that are onlined |
| 174 | * come up with a fully clean TLB (or are cleaned when offlined) |
| 175 | */ |
| 176 | static unsigned int steal_context_up(unsigned int id) |
| 177 | { |
| 178 | struct mm_struct *mm; |
| 179 | int cpu = smp_processor_id(); |
| 180 | |
| 181 | /* Pick up the victim mm */ |
| 182 | mm = context_mm[id]; |
| 183 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 184 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 185 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 186 | /* Flush the TLB for that context */ |
| 187 | local_flush_tlb_mm(mm); |
| 188 | |
Hideo Saito | 8e35961 | 2009-05-24 15:33:34 +0000 | [diff] [blame] | 189 | /* Mark this mm has having no context anymore */ |
| 190 | mm->context.id = MMU_NO_CONTEXT; |
| 191 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 192 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
| 193 | __clear_bit(id, stale_map[cpu]); |
| 194 | |
| 195 | return id; |
| 196 | } |
| 197 | |
| 198 | #ifdef DEBUG_MAP_CONSISTENCY |
| 199 | static void context_check_map(void) |
| 200 | { |
| 201 | unsigned int id, nrf, nact; |
| 202 | |
| 203 | nrf = nact = 0; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 204 | for (id = first_context; id <= last_context; id++) { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 205 | int used = test_bit(id, context_map); |
| 206 | if (!used) |
| 207 | nrf++; |
| 208 | if (used != (context_mm[id] != NULL)) |
| 209 | pr_err("MMU: Context %d is %s and MM is %p !\n", |
| 210 | id, used ? "used" : "free", context_mm[id]); |
| 211 | if (context_mm[id] != NULL) |
| 212 | nact += context_mm[id]->context.active; |
| 213 | } |
| 214 | if (nrf != nr_free_contexts) { |
| 215 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", |
| 216 | nr_free_contexts, nrf); |
| 217 | nr_free_contexts = nrf; |
| 218 | } |
| 219 | if (nact > num_online_cpus()) |
| 220 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", |
| 221 | nact, num_online_cpus()); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 222 | if (first_context > 0 && !test_bit(0, context_map)) |
| 223 | pr_err("MMU: Context 0 has been freed !!!\n"); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 224 | } |
| 225 | #else |
| 226 | static void context_check_map(void) { } |
| 227 | #endif |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 228 | |
| 229 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) |
| 230 | { |
Kumar Gala | 67050b5 | 2009-08-04 22:33:32 -0500 | [diff] [blame] | 231 | unsigned int i, id, cpu = smp_processor_id(); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 232 | unsigned long *map; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 233 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 234 | /* No lockless fast path .. yet */ |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 235 | raw_spin_lock(&context_lock); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 236 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 237 | pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", |
| 238 | cpu, next, next->context.active, next->context.id); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 239 | |
| 240 | #ifdef CONFIG_SMP |
| 241 | /* Mark us active and the previous one not anymore */ |
| 242 | next->context.active++; |
| 243 | if (prev) { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 244 | pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 245 | WARN_ON(prev->context.active < 1); |
| 246 | prev->context.active--; |
| 247 | } |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 248 | |
| 249 | again: |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 250 | #endif /* CONFIG_SMP */ |
| 251 | |
| 252 | /* If we already have a valid assigned context, skip all that */ |
| 253 | id = next->context.id; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 254 | if (likely(id != MMU_NO_CONTEXT)) { |
| 255 | #ifdef DEBUG_MAP_CONSISTENCY |
| 256 | if (context_mm[id] != next) |
| 257 | pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", |
| 258 | next, id, id, context_mm[id]); |
| 259 | #endif |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 260 | goto ctxt_ok; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 261 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 262 | |
| 263 | /* We really don't have a context, let's try to acquire one */ |
| 264 | id = next_context; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 265 | if (id > last_context) |
| 266 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 267 | map = context_map; |
| 268 | |
| 269 | /* No more free contexts, let's try to steal one */ |
| 270 | if (nr_free_contexts == 0) { |
| 271 | #ifdef CONFIG_SMP |
| 272 | if (num_online_cpus() > 1) { |
| 273 | id = steal_context_smp(id); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 274 | if (id == MMU_NO_CONTEXT) |
| 275 | goto again; |
Kumar Gala | 5156ddc | 2009-07-29 23:04:25 -0500 | [diff] [blame] | 276 | goto stolen; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 277 | } |
| 278 | #endif /* CONFIG_SMP */ |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 279 | if (no_selective_tlbil) |
| 280 | id = steal_all_contexts(); |
| 281 | else |
| 282 | id = steal_context_up(id); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 283 | goto stolen; |
| 284 | } |
| 285 | nr_free_contexts--; |
| 286 | |
| 287 | /* We know there's at least one free context, try to find it */ |
| 288 | while (__test_and_set_bit(id, map)) { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 289 | id = find_next_zero_bit(map, last_context+1, id); |
| 290 | if (id > last_context) |
| 291 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 292 | } |
| 293 | stolen: |
| 294 | next_context = id + 1; |
| 295 | context_mm[id] = next; |
| 296 | next->context.id = id; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 297 | pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 298 | |
| 299 | context_check_map(); |
| 300 | ctxt_ok: |
| 301 | |
| 302 | /* If that context got marked stale on this CPU, then flush the |
| 303 | * local TLB for it and unmark it before we use it |
| 304 | */ |
| 305 | if (test_bit(id, stale_map[cpu])) { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 306 | pr_hardcont(" | stale flush %d [%d..%d]", |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 307 | id, cpu_first_thread_sibling(cpu), |
| 308 | cpu_last_thread_sibling(cpu)); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 309 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 310 | local_flush_tlb_mm(next); |
| 311 | |
| 312 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 313 | for (i = cpu_first_thread_sibling(cpu); |
| 314 | i <= cpu_last_thread_sibling(cpu); i++) { |
Scott Wood | 39a421f | 2013-03-20 19:06:12 -0500 | [diff] [blame] | 315 | if (stale_map[i]) |
| 316 | __clear_bit(id, stale_map[i]); |
Kumar Gala | 67050b5 | 2009-08-04 22:33:32 -0500 | [diff] [blame] | 317 | } |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | /* Flick the MMU and release lock */ |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 321 | pr_hardcont(" -> %d\n", id); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 322 | set_context(id, next->pgd); |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 323 | raw_spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | /* |
| 327 | * Set up the context for a new address space. |
| 328 | */ |
| 329 | int init_new_context(struct task_struct *t, struct mm_struct *mm) |
| 330 | { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 331 | pr_hard("initing context for mm @%p\n", mm); |
| 332 | |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 333 | mm->context.id = MMU_NO_CONTEXT; |
| 334 | mm->context.active = 0; |
| 335 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 336 | #ifdef CONFIG_PPC_MM_SLICES |
| 337 | if (slice_mm_new_context(mm)) |
| 338 | slice_set_user_psize(mm, mmu_virtual_psize); |
| 339 | #endif |
| 340 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * We're finished using the context for an address space. |
| 346 | */ |
| 347 | void destroy_context(struct mm_struct *mm) |
| 348 | { |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 349 | unsigned long flags; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 350 | unsigned int id; |
| 351 | |
| 352 | if (mm->context.id == MMU_NO_CONTEXT) |
| 353 | return; |
| 354 | |
| 355 | WARN_ON(mm->context.active != 0); |
| 356 | |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 357 | raw_spin_lock_irqsave(&context_lock, flags); |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 358 | id = mm->context.id; |
| 359 | if (id != MMU_NO_CONTEXT) { |
| 360 | __clear_bit(id, context_map); |
| 361 | mm->context.id = MMU_NO_CONTEXT; |
| 362 | #ifdef DEBUG_MAP_CONSISTENCY |
| 363 | mm->context.active = 0; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 364 | #endif |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 365 | context_mm[id] = NULL; |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 366 | nr_free_contexts++; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 367 | } |
Thomas Gleixner | be833f3 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 368 | raw_spin_unlock_irqrestore(&context_lock, flags); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 369 | } |
| 370 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 371 | #ifdef CONFIG_SMP |
| 372 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 373 | static int mmu_context_cpu_notify(struct notifier_block *self, |
| 374 | unsigned long action, void *hcpu) |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 375 | { |
| 376 | unsigned int cpu = (unsigned int)(long)hcpu; |
Anton Vorontsov | 73863ab | 2012-05-31 16:26:23 -0700 | [diff] [blame] | 377 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 378 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
| 379 | * around forever |
| 380 | */ |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 381 | if (cpu == boot_cpuid) |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 382 | return NOTIFY_OK; |
| 383 | |
| 384 | switch (action) { |
Michael Ellerman | f5be2dc | 2011-04-04 20:57:27 +0000 | [diff] [blame] | 385 | case CPU_UP_PREPARE: |
| 386 | case CPU_UP_PREPARE_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 387 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 388 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
| 389 | break; |
| 390 | #ifdef CONFIG_HOTPLUG_CPU |
Michael Ellerman | f5be2dc | 2011-04-04 20:57:27 +0000 | [diff] [blame] | 391 | case CPU_UP_CANCELED: |
| 392 | case CPU_UP_CANCELED_FROZEN: |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 393 | case CPU_DEAD: |
| 394 | case CPU_DEAD_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 395 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 396 | kfree(stale_map[cpu]); |
| 397 | stale_map[cpu] = NULL; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 398 | |
| 399 | /* We also clear the cpu_vm_mask bits of CPUs going away */ |
Anton Vorontsov | 73863ab | 2012-05-31 16:26:23 -0700 | [diff] [blame] | 400 | clear_tasks_mm_cpumask(cpu); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 401 | break; |
| 402 | #endif /* CONFIG_HOTPLUG_CPU */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 403 | } |
| 404 | return NOTIFY_OK; |
| 405 | } |
| 406 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 407 | static struct notifier_block mmu_context_cpu_nb = { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 408 | .notifier_call = mmu_context_cpu_notify, |
| 409 | }; |
| 410 | |
| 411 | #endif /* CONFIG_SMP */ |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 412 | |
| 413 | /* |
| 414 | * Initialize the context management stuff. |
| 415 | */ |
| 416 | void __init mmu_context_init(void) |
| 417 | { |
Benjamin Herrenschmidt | 2ca8cf7 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 418 | /* Mark init_mm as being active on all possible CPUs since |
| 419 | * we'll get called with prev == init_mm the first time |
| 420 | * we schedule on a given CPU |
| 421 | */ |
| 422 | init_mm.context.active = NR_CPUS; |
| 423 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 424 | /* |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 425 | * The MPC8xx has only 16 contexts. We rotate through them on each |
| 426 | * task switch. A better way would be to keep track of tasks that |
| 427 | * own contexts, and implement an LRU usage. That way very active |
| 428 | * tasks don't always have to pay the TLB reload overhead. The |
| 429 | * kernel pages are mapped shared, so the kernel can run on behalf |
| 430 | * of any task that makes a kernel entry. Shared does not mean they |
| 431 | * are not protected, just that the ASID comparison is not performed. |
| 432 | * -- Dan |
| 433 | * |
| 434 | * The IBM4xx has 256 contexts, so we can just rotate through these |
| 435 | * as a way of "switching" contexts. If the TID of the TLB is zero, |
| 436 | * the PID/TID comparison is disabled, so we can use a TID of zero |
| 437 | * to represent all kernel pages as shared among all contexts. |
| 438 | * -- Dan |
Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 439 | * |
| 440 | * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We |
| 441 | * should normally never have to steal though the facility is |
| 442 | * present if needed. |
| 443 | * -- BenH |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 444 | */ |
| 445 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { |
| 446 | first_context = 0; |
| 447 | last_context = 15; |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 448 | no_selective_tlbil = true; |
Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 449 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { |
| 450 | first_context = 1; |
| 451 | last_context = 65535; |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 452 | no_selective_tlbil = false; |
Michael Ellerman | cd68098 | 2014-07-08 17:10:45 +1000 | [diff] [blame] | 453 | } else { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 454 | first_context = 1; |
| 455 | last_context = 255; |
LEROY Christophe | debddd9 | 2015-01-19 16:44:42 +0100 | [diff] [blame] | 456 | no_selective_tlbil = false; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | #ifdef DEBUG_CLAMP_LAST_CONTEXT |
| 460 | last_context = DEBUG_CLAMP_LAST_CONTEXT; |
| 461 | #endif |
| 462 | /* |
| 463 | * Allocate the maps used by context management |
| 464 | */ |
Michael Ellerman | e39f223f | 2014-11-18 16:47:35 +1100 | [diff] [blame] | 465 | context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
| 466 | context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0); |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 467 | #ifndef CONFIG_SMP |
Michael Ellerman | e39f223f | 2014-11-18 16:47:35 +1100 | [diff] [blame] | 468 | stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 469 | #else |
Michael Ellerman | e39f223f | 2014-11-18 16:47:35 +1100 | [diff] [blame] | 470 | stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 471 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 472 | register_cpu_notifier(&mmu_context_cpu_nb); |
| 473 | #endif |
| 474 | |
| 475 | printk(KERN_INFO |
Benjamin Herrenschmidt | ff7c660 | 2009-03-19 19:34:13 +0000 | [diff] [blame] | 476 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 477 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
| 478 | last_context - first_context + 1); |
| 479 | |
| 480 | /* |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 481 | * Some processors have too few contexts to reserve one for |
| 482 | * init_mm, and require using context 0 for a normal task. |
| 483 | * Other processors reserve the use of context zero for the kernel. |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 484 | * This code assumes first_context < 32. |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 485 | */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 486 | context_map[0] = (1 << first_context) - 1; |
| 487 | next_context = first_context; |
| 488 | nr_free_contexts = last_context - first_context + 1; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 489 | } |
| 490 | |