Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the routines for handling the MMU on those |
| 3 | * PowerPC implementations where the MMU is not using the hash |
| 4 | * table, such as 8xx, 4xx, BookE's etc... |
| 5 | * |
| 6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> |
| 7 | * IBM Corp. |
| 8 | * |
| 9 | * Derived from previous arch/powerpc/mm/mmu_context.c |
| 10 | * and arch/powerpc/include/asm/mmu_context.h |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 17 | * TODO: |
| 18 | * |
| 19 | * - The global context lock will not scale very well |
| 20 | * - The maps should be dynamically allocated to allow for processors |
| 21 | * that support more PID bits at runtime |
| 22 | * - Implement flush_tlb_mm() by making the context stale and picking |
| 23 | * a new one |
| 24 | * - More aggressively clear stale map bits and maybe find some way to |
| 25 | * also clear mm->cpu_vm_mask bits when processes are migrated |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 26 | */ |
| 27 | |
Benjamin Herrenschmidt | f1167fb | 2009-11-04 13:39:52 +0000 | [diff] [blame] | 28 | //#define DEBUG_MAP_CONSISTENCY |
| 29 | //#define DEBUG_CLAMP_LAST_CONTEXT 31 |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 30 | //#define DEBUG_HARDER |
| 31 | |
| 32 | /* We don't use DEBUG because it tends to be compiled in always nowadays |
| 33 | * and this would generate way too much output |
| 34 | */ |
| 35 | #ifdef DEBUG_HARDER |
| 36 | #define pr_hard(args...) printk(KERN_DEBUG args) |
| 37 | #define pr_hardcont(args...) printk(KERN_CONT args) |
| 38 | #else |
| 39 | #define pr_hard(args...) do { } while(0) |
| 40 | #define pr_hardcont(args...) do { } while(0) |
| 41 | #endif |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 42 | |
| 43 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 44 | #include <linux/mm.h> |
| 45 | #include <linux/init.h> |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 46 | #include <linux/spinlock.h> |
| 47 | #include <linux/bootmem.h> |
| 48 | #include <linux/notifier.h> |
| 49 | #include <linux/cpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 50 | #include <linux/slab.h> |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 51 | |
| 52 | #include <asm/mmu_context.h> |
| 53 | #include <asm/tlbflush.h> |
| 54 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 55 | static unsigned int first_context, last_context; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 56 | static unsigned int next_context, nr_free_contexts; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 57 | static unsigned long *context_map; |
| 58 | static unsigned long *stale_map[NR_CPUS]; |
| 59 | static struct mm_struct **context_mm; |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 60 | static DEFINE_RAW_SPINLOCK(context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 61 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 62 | #define CTX_MAP_SIZE \ |
| 63 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) |
| 64 | |
| 65 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 66 | /* Steal a context from a task that has one at the moment. |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 67 | * |
| 68 | * This is used when we are running out of available PID numbers |
| 69 | * on the processors. |
| 70 | * |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 71 | * This isn't an LRU system, it just frees up each context in |
| 72 | * turn (sort-of pseudo-random replacement :). This would be the |
| 73 | * place to implement an LRU scheme if anyone was motivated to do it. |
| 74 | * -- paulus |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 75 | * |
| 76 | * For context stealing, we use a slightly different approach for |
| 77 | * SMP and UP. Basically, the UP one is simpler and doesn't use |
| 78 | * the stale map as we can just flush the local CPU |
| 79 | * -- benh |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 80 | */ |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 81 | #ifdef CONFIG_SMP |
| 82 | static unsigned int steal_context_smp(unsigned int id) |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 83 | { |
| 84 | struct mm_struct *mm; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 85 | unsigned int cpu, max, i; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 86 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 87 | max = last_context - first_context; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 88 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 89 | /* Attempt to free next_context first and then loop until we manage */ |
| 90 | while (max--) { |
| 91 | /* Pick up the victim mm */ |
| 92 | mm = context_mm[id]; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 93 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 94 | /* We have a candidate victim, check if it's active, on SMP |
| 95 | * we cannot steal active contexts |
| 96 | */ |
| 97 | if (mm->context.active) { |
| 98 | id++; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 99 | if (id > last_context) |
| 100 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 101 | continue; |
| 102 | } |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 103 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 104 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 105 | /* Mark this mm has having no context anymore */ |
| 106 | mm->context.id = MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 107 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 108 | /* Mark it stale on all CPUs that used this mm. For threaded |
| 109 | * implementations, we set it on all threads on each core |
| 110 | * represented in the mask. A future implementation will use |
| 111 | * a core map instead but this will do for now. |
| 112 | */ |
| 113 | for_each_cpu(cpu, mm_cpumask(mm)) { |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 114 | for (i = cpu_first_thread_sibling(cpu); |
Scott Wood | 39a421f | 2013-03-20 19:06:12 -0500 | [diff] [blame] | 115 | i <= cpu_last_thread_sibling(cpu); i++) { |
| 116 | if (stale_map[i]) |
| 117 | __set_bit(id, stale_map[i]); |
| 118 | } |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 119 | cpu = i - 1; |
| 120 | } |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 121 | return id; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 122 | } |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 123 | |
| 124 | /* This will happen if you have more CPUs than available contexts, |
| 125 | * all we can do here is wait a bit and try again |
| 126 | */ |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 127 | raw_spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 128 | cpu_relax(); |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 129 | raw_spin_lock(&context_lock); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 130 | |
| 131 | /* This will cause the caller to try again */ |
| 132 | return MMU_NO_CONTEXT; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 133 | } |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 134 | #endif /* CONFIG_SMP */ |
| 135 | |
| 136 | /* Note that this will also be called on SMP if all other CPUs are |
| 137 | * offlined, which means that it may be called for cpu != 0. For |
| 138 | * this to work, we somewhat assume that CPUs that are onlined |
| 139 | * come up with a fully clean TLB (or are cleaned when offlined) |
| 140 | */ |
| 141 | static unsigned int steal_context_up(unsigned int id) |
| 142 | { |
| 143 | struct mm_struct *mm; |
| 144 | int cpu = smp_processor_id(); |
| 145 | |
| 146 | /* Pick up the victim mm */ |
| 147 | mm = context_mm[id]; |
| 148 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 149 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 150 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 151 | /* Flush the TLB for that context */ |
| 152 | local_flush_tlb_mm(mm); |
| 153 | |
Hideo Saito | 8e35961 | 2009-05-24 15:33:34 +0000 | [diff] [blame] | 154 | /* Mark this mm has having no context anymore */ |
| 155 | mm->context.id = MMU_NO_CONTEXT; |
| 156 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 157 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
| 158 | __clear_bit(id, stale_map[cpu]); |
| 159 | |
| 160 | return id; |
| 161 | } |
| 162 | |
| 163 | #ifdef DEBUG_MAP_CONSISTENCY |
| 164 | static void context_check_map(void) |
| 165 | { |
| 166 | unsigned int id, nrf, nact; |
| 167 | |
| 168 | nrf = nact = 0; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 169 | for (id = first_context; id <= last_context; id++) { |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 170 | int used = test_bit(id, context_map); |
| 171 | if (!used) |
| 172 | nrf++; |
| 173 | if (used != (context_mm[id] != NULL)) |
| 174 | pr_err("MMU: Context %d is %s and MM is %p !\n", |
| 175 | id, used ? "used" : "free", context_mm[id]); |
| 176 | if (context_mm[id] != NULL) |
| 177 | nact += context_mm[id]->context.active; |
| 178 | } |
| 179 | if (nrf != nr_free_contexts) { |
| 180 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", |
| 181 | nr_free_contexts, nrf); |
| 182 | nr_free_contexts = nrf; |
| 183 | } |
| 184 | if (nact > num_online_cpus()) |
| 185 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", |
| 186 | nact, num_online_cpus()); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 187 | if (first_context > 0 && !test_bit(0, context_map)) |
| 188 | pr_err("MMU: Context 0 has been freed !!!\n"); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 189 | } |
| 190 | #else |
| 191 | static void context_check_map(void) { } |
| 192 | #endif |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 193 | |
| 194 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) |
| 195 | { |
Kumar Gala | 67050b5c | 2009-08-04 22:33:32 -0500 | [diff] [blame] | 196 | unsigned int i, id, cpu = smp_processor_id(); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 197 | unsigned long *map; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 198 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 199 | /* No lockless fast path .. yet */ |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 200 | raw_spin_lock(&context_lock); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 201 | |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 202 | pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", |
| 203 | cpu, next, next->context.active, next->context.id); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 204 | |
| 205 | #ifdef CONFIG_SMP |
| 206 | /* Mark us active and the previous one not anymore */ |
| 207 | next->context.active++; |
| 208 | if (prev) { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 209 | pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 210 | WARN_ON(prev->context.active < 1); |
| 211 | prev->context.active--; |
| 212 | } |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 213 | |
| 214 | again: |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 215 | #endif /* CONFIG_SMP */ |
| 216 | |
| 217 | /* If we already have a valid assigned context, skip all that */ |
| 218 | id = next->context.id; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 219 | if (likely(id != MMU_NO_CONTEXT)) { |
| 220 | #ifdef DEBUG_MAP_CONSISTENCY |
| 221 | if (context_mm[id] != next) |
| 222 | pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", |
| 223 | next, id, id, context_mm[id]); |
| 224 | #endif |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 225 | goto ctxt_ok; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 226 | } |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 227 | |
| 228 | /* We really don't have a context, let's try to acquire one */ |
| 229 | id = next_context; |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 230 | if (id > last_context) |
| 231 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 232 | map = context_map; |
| 233 | |
| 234 | /* No more free contexts, let's try to steal one */ |
| 235 | if (nr_free_contexts == 0) { |
| 236 | #ifdef CONFIG_SMP |
| 237 | if (num_online_cpus() > 1) { |
| 238 | id = steal_context_smp(id); |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 239 | if (id == MMU_NO_CONTEXT) |
| 240 | goto again; |
Kumar Gala | 5156ddc | 2009-07-29 23:04:25 -0500 | [diff] [blame] | 241 | goto stolen; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 242 | } |
| 243 | #endif /* CONFIG_SMP */ |
| 244 | id = steal_context_up(id); |
| 245 | goto stolen; |
| 246 | } |
| 247 | nr_free_contexts--; |
| 248 | |
| 249 | /* We know there's at least one free context, try to find it */ |
| 250 | while (__test_and_set_bit(id, map)) { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 251 | id = find_next_zero_bit(map, last_context+1, id); |
| 252 | if (id > last_context) |
| 253 | id = first_context; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 254 | } |
| 255 | stolen: |
| 256 | next_context = id + 1; |
| 257 | context_mm[id] = next; |
| 258 | next->context.id = id; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 259 | pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 260 | |
| 261 | context_check_map(); |
| 262 | ctxt_ok: |
| 263 | |
| 264 | /* If that context got marked stale on this CPU, then flush the |
| 265 | * local TLB for it and unmark it before we use it |
| 266 | */ |
| 267 | if (test_bit(id, stale_map[cpu])) { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 268 | pr_hardcont(" | stale flush %d [%d..%d]", |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 269 | id, cpu_first_thread_sibling(cpu), |
| 270 | cpu_last_thread_sibling(cpu)); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 271 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 272 | local_flush_tlb_mm(next); |
| 273 | |
| 274 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
Vaidyanathan Srinivasan | 99d8670 | 2010-10-06 08:36:59 +0000 | [diff] [blame] | 275 | for (i = cpu_first_thread_sibling(cpu); |
| 276 | i <= cpu_last_thread_sibling(cpu); i++) { |
Scott Wood | 39a421f | 2013-03-20 19:06:12 -0500 | [diff] [blame] | 277 | if (stale_map[i]) |
| 278 | __clear_bit(id, stale_map[i]); |
Kumar Gala | 67050b5c | 2009-08-04 22:33:32 -0500 | [diff] [blame] | 279 | } |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | /* Flick the MMU and release lock */ |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 283 | pr_hardcont(" -> %d\n", id); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 284 | set_context(id, next->pgd); |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 285 | raw_spin_unlock(&context_lock); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Set up the context for a new address space. |
| 290 | */ |
| 291 | int init_new_context(struct task_struct *t, struct mm_struct *mm) |
| 292 | { |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 293 | pr_hard("initing context for mm @%p\n", mm); |
| 294 | |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 295 | mm->context.id = MMU_NO_CONTEXT; |
| 296 | mm->context.active = 0; |
| 297 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 298 | #ifdef CONFIG_PPC_MM_SLICES |
| 299 | if (slice_mm_new_context(mm)) |
| 300 | slice_set_user_psize(mm, mmu_virtual_psize); |
| 301 | #endif |
| 302 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 303 | return 0; |
| 304 | } |
| 305 | |
| 306 | /* |
| 307 | * We're finished using the context for an address space. |
| 308 | */ |
| 309 | void destroy_context(struct mm_struct *mm) |
| 310 | { |
Benjamin Herrenschmidt | b46b694 | 2009-06-02 18:53:37 +0000 | [diff] [blame] | 311 | unsigned long flags; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 312 | unsigned int id; |
| 313 | |
| 314 | if (mm->context.id == MMU_NO_CONTEXT) |
| 315 | return; |
| 316 | |
| 317 | WARN_ON(mm->context.active != 0); |
| 318 | |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 319 | raw_spin_lock_irqsave(&context_lock, flags); |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 320 | id = mm->context.id; |
| 321 | if (id != MMU_NO_CONTEXT) { |
| 322 | __clear_bit(id, context_map); |
| 323 | mm->context.id = MMU_NO_CONTEXT; |
| 324 | #ifdef DEBUG_MAP_CONSISTENCY |
| 325 | mm->context.active = 0; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 326 | #endif |
Benjamin Herrenschmidt | 3035c86 | 2009-05-19 16:56:42 +0000 | [diff] [blame] | 327 | context_mm[id] = NULL; |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 328 | nr_free_contexts++; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 329 | } |
Thomas Gleixner | be833f33 | 2010-02-18 02:22:39 +0000 | [diff] [blame] | 330 | raw_spin_unlock_irqrestore(&context_lock, flags); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 331 | } |
| 332 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 333 | #ifdef CONFIG_SMP |
| 334 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 335 | static int mmu_context_cpu_notify(struct notifier_block *self, |
| 336 | unsigned long action, void *hcpu) |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 337 | { |
| 338 | unsigned int cpu = (unsigned int)(long)hcpu; |
Anton Vorontsov | 73863ab | 2012-05-31 16:26:23 -0700 | [diff] [blame] | 339 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 340 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
| 341 | * around forever |
| 342 | */ |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 343 | if (cpu == boot_cpuid) |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 344 | return NOTIFY_OK; |
| 345 | |
| 346 | switch (action) { |
Michael Ellerman | f5be2dc | 2011-04-04 20:57:27 +0000 | [diff] [blame] | 347 | case CPU_UP_PREPARE: |
| 348 | case CPU_UP_PREPARE_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 349 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 350 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
| 351 | break; |
| 352 | #ifdef CONFIG_HOTPLUG_CPU |
Michael Ellerman | f5be2dc | 2011-04-04 20:57:27 +0000 | [diff] [blame] | 353 | case CPU_UP_CANCELED: |
| 354 | case CPU_UP_CANCELED_FROZEN: |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 355 | case CPU_DEAD: |
| 356 | case CPU_DEAD_FROZEN: |
Michael Ellerman | a1ac38a | 2009-06-17 18:13:54 +0000 | [diff] [blame] | 357 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 358 | kfree(stale_map[cpu]); |
| 359 | stale_map[cpu] = NULL; |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 360 | |
| 361 | /* We also clear the cpu_vm_mask bits of CPUs going away */ |
Anton Vorontsov | 73863ab | 2012-05-31 16:26:23 -0700 | [diff] [blame] | 362 | clear_tasks_mm_cpumask(cpu); |
Benjamin Herrenschmidt | fcce810 | 2009-07-23 23:15:10 +0000 | [diff] [blame] | 363 | break; |
| 364 | #endif /* CONFIG_HOTPLUG_CPU */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 365 | } |
| 366 | return NOTIFY_OK; |
| 367 | } |
| 368 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 369 | static struct notifier_block mmu_context_cpu_nb = { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 370 | .notifier_call = mmu_context_cpu_notify, |
| 371 | }; |
| 372 | |
| 373 | #endif /* CONFIG_SMP */ |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 374 | |
| 375 | /* |
| 376 | * Initialize the context management stuff. |
| 377 | */ |
| 378 | void __init mmu_context_init(void) |
| 379 | { |
Benjamin Herrenschmidt | 2ca8cf73 | 2008-12-18 19:13:29 +0000 | [diff] [blame] | 380 | /* Mark init_mm as being active on all possible CPUs since |
| 381 | * we'll get called with prev == init_mm the first time |
| 382 | * we schedule on a given CPU |
| 383 | */ |
| 384 | init_mm.context.active = NR_CPUS; |
| 385 | |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 386 | /* |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 387 | * The MPC8xx has only 16 contexts. We rotate through them on each |
| 388 | * task switch. A better way would be to keep track of tasks that |
| 389 | * own contexts, and implement an LRU usage. That way very active |
| 390 | * tasks don't always have to pay the TLB reload overhead. The |
| 391 | * kernel pages are mapped shared, so the kernel can run on behalf |
| 392 | * of any task that makes a kernel entry. Shared does not mean they |
| 393 | * are not protected, just that the ASID comparison is not performed. |
| 394 | * -- Dan |
| 395 | * |
| 396 | * The IBM4xx has 256 contexts, so we can just rotate through these |
| 397 | * as a way of "switching" contexts. If the TID of the TLB is zero, |
| 398 | * the PID/TID comparison is disabled, so we can use a TID of zero |
| 399 | * to represent all kernel pages as shared among all contexts. |
| 400 | * -- Dan |
Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 401 | * |
| 402 | * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We |
| 403 | * should normally never have to steal though the facility is |
| 404 | * present if needed. |
| 405 | * -- BenH |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 406 | */ |
| 407 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { |
| 408 | first_context = 0; |
| 409 | last_context = 15; |
Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 410 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { |
| 411 | first_context = 1; |
| 412 | last_context = 65535; |
Michael Ellerman | cd68098 | 2014-07-08 17:10:45 +1000 | [diff] [blame] | 413 | } else { |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 414 | first_context = 1; |
| 415 | last_context = 255; |
| 416 | } |
| 417 | |
| 418 | #ifdef DEBUG_CLAMP_LAST_CONTEXT |
| 419 | last_context = DEBUG_CLAMP_LAST_CONTEXT; |
| 420 | #endif |
| 421 | /* |
| 422 | * Allocate the maps used by context management |
| 423 | */ |
Michael Ellerman | e39f223f | 2014-11-18 16:47:35 +1100 | [diff] [blame^] | 424 | context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
| 425 | context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0); |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 426 | #ifndef CONFIG_SMP |
Michael Ellerman | e39f223f | 2014-11-18 16:47:35 +1100 | [diff] [blame^] | 427 | stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
Matthew McClintock | 0d35e16 | 2010-08-31 13:24:44 +0000 | [diff] [blame] | 428 | #else |
Michael Ellerman | e39f223f | 2014-11-18 16:47:35 +1100 | [diff] [blame^] | 429 | stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 430 | |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 431 | register_cpu_notifier(&mmu_context_cpu_nb); |
| 432 | #endif |
| 433 | |
| 434 | printk(KERN_INFO |
Benjamin Herrenschmidt | ff7c660 | 2009-03-19 19:34:13 +0000 | [diff] [blame] | 435 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 436 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
| 437 | last_context - first_context + 1); |
| 438 | |
| 439 | /* |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 440 | * Some processors have too few contexts to reserve one for |
| 441 | * init_mm, and require using context 0 for a normal task. |
| 442 | * Other processors reserve the use of context zero for the kernel. |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 443 | * This code assumes first_context < 32. |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 444 | */ |
Benjamin Herrenschmidt | 7752035 | 2008-12-18 19:13:48 +0000 | [diff] [blame] | 445 | context_map[0] = (1 << first_context) - 1; |
| 446 | next_context = first_context; |
| 447 | nr_free_contexts = last_context - first_context + 1; |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 448 | } |
| 449 | |