blob: c67c2b5365a6dbef415d767a69db49f73ea2ad69 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 MMU context management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Modified by David Howells (dhowells@redhat.com)
5 * - Derived from include/asm-m32r/mmu_context.h
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 *
12 *
13 * This implements an algorithm to provide TLB PID mappings to provide
14 * selective access to the TLB for processes, thus reducing the number of TLB
15 * flushes required.
16 *
17 * Note, however, that the M32R algorithm is technically broken as it does not
18 * handle version wrap-around, and could, theoretically, have a problem with a
19 * very long lived program that sleeps long enough for the version number to
20 * wrap all the way around so that its TLB mappings appear valid once again.
21 */
22#ifndef _ASM_MMU_CONTEXT_H
23#define _ASM_MMU_CONTEXT_H
24
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David Howellsb920de12008-02-08 04:19:31 -080026#include <asm/pgalloc.h>
27#include <asm/tlbflush.h>
28#include <asm-generic/mm_hooks.h>
29
Akira Takeuchia9bc60e2010-10-27 17:28:49 +010030#define MMU_CONTEXT_TLBPID_NR 256
David Howellsb920de12008-02-08 04:19:31 -080031#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
32#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
33#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
34#define MMU_NO_CONTEXT 0x00000000UL
Akira Takeuchia9bc60e2010-10-27 17:28:49 +010035#define MMU_CONTEXT_TLBPID_LOCK_NR 0
David Howellsb920de12008-02-08 04:19:31 -080036
37#define enter_lazy_tlb(mm, tsk) do {} while (0)
38
Akira Takeuchi965ea4b2010-10-27 17:28:51 +010039static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
40{
41#ifdef CONFIG_SMP
42 cpumask_set_cpu(cpu, mm_cpumask(mm));
43#endif
44}
45
46static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
47{
48#ifdef CONFIG_SMP
49 return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
50#else
51 return true;
52#endif
53}
54
Akira Takeuchia9bc60e2010-10-27 17:28:49 +010055#ifdef CONFIG_MN10300_TLB_USE_PIDR
56extern unsigned long mmu_context_cache[NR_CPUS];
57#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
David Howellsb920de12008-02-08 04:19:31 -080058
Akira Takeuchia9bc60e2010-10-27 17:28:49 +010059/**
60 * allocate_mmu_context - Allocate storage for the arch-specific MMU data
61 * @mm: The userspace VM context being set up
David Howellsb920de12008-02-08 04:19:31 -080062 */
63static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
64{
65 unsigned long *pmc = &mmu_context_cache[smp_processor_id()];
66 unsigned long mc = ++(*pmc);
67
68 if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
69 /* we exhausted the TLB PIDs of this version on this CPU, so we
70 * flush this CPU's TLB in its entirety and start new cycle */
David Howells492e6752010-10-27 17:28:49 +010071 local_flush_tlb_all();
David Howellsb920de12008-02-08 04:19:31 -080072
73 /* fix the TLB version if needed (we avoid version #0 so as to
74 * distingush MMU_NO_CONTEXT) */
75 if (!mc)
76 *pmc = mc = MMU_CONTEXT_FIRST_VERSION;
77 }
78 mm_context(mm) = mc;
79 return mc;
80}
81
82/*
83 * get an MMU context if one is needed
84 */
85static inline unsigned long get_mmu_context(struct mm_struct *mm)
86{
87 unsigned long mc = MMU_NO_CONTEXT, cache;
88
89 if (mm) {
90 cache = mmu_context_cache[smp_processor_id()];
91 mc = mm_context(mm);
92
93 /* if we have an old version of the context, replace it */
94 if ((mc ^ cache) & MMU_CONTEXT_VERSION_MASK)
95 mc = allocate_mmu_context(mm);
96 }
97 return mc;
98}
99
100/*
101 * initialise the context related info for a new mm_struct instance
102 */
103static inline int init_new_context(struct task_struct *tsk,
104 struct mm_struct *mm)
105{
106 int num_cpus = NR_CPUS, i;
107
108 for (i = 0; i < num_cpus; i++)
109 mm->context.tlbpid[i] = MMU_NO_CONTEXT;
110 return 0;
111}
112
113/*
David Howellsb920de12008-02-08 04:19:31 -0800114 * after we have set current->mm to a new value, this activates the context for
115 * the new mm so we see the new mappings.
116 */
Akira Takeuchia9bc60e2010-10-27 17:28:49 +0100117static inline void activate_context(struct mm_struct *mm)
David Howellsb920de12008-02-08 04:19:31 -0800118{
119 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
120}
Akira Takeuchia9bc60e2010-10-27 17:28:49 +0100121#else /* CONFIG_MN10300_TLB_USE_PIDR */
David Howellsb920de12008-02-08 04:19:31 -0800122
Akira Takeuchia9bc60e2010-10-27 17:28:49 +0100123#define init_new_context(tsk, mm) (0)
124#define activate_context(mm) local_flush_tlb()
125
126#endif /* CONFIG_MN10300_TLB_USE_PIDR */
127
128/**
129 * destroy_context - Destroy mm context information
130 * @mm: The MM being destroyed.
131 *
132 * Destroy context related info for an mm_struct that is about to be put to
133 * rest
134 */
135#define destroy_context(mm) do {} while (0)
136
137/**
138 * switch_mm - Change between userspace virtual memory contexts
139 * @prev: The outgoing MM context.
140 * @next: The incoming MM context.
141 * @tsk: The incoming task.
David Howellsb920de12008-02-08 04:19:31 -0800142 */
143static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
144 struct task_struct *tsk)
145{
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100146 int cpu = smp_processor_id();
147
David Howellsb920de12008-02-08 04:19:31 -0800148 if (prev != next) {
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100149#ifdef CONFIG_SMP
150 per_cpu(cpu_tlbstate, cpu).active_mm = next;
151#endif
152 cpu_ran_vm(cpu, next);
David Howellsb920de12008-02-08 04:19:31 -0800153 PTBR = (unsigned long) next->pgd;
Akira Takeuchia9bc60e2010-10-27 17:28:49 +0100154 activate_context(next);
David Howellsb920de12008-02-08 04:19:31 -0800155 }
156}
157
158#define deactivate_mm(tsk, mm) do {} while (0)
159#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
160
161#endif /* _ASM_MMU_CONTEXT_H */