blob: 3f980baade4c19c27d1f6f42d4d0ee6a1893a4a7 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * MMU context allocation for 64-bit kernels.
3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Paul Mackerras14cf11a2005-09-26 16:04:21 +100013#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/mm.h>
Ram Pai4fb158f2018-01-18 17:50:25 -080019#include <linux/pkeys.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100020#include <linux/spinlock.h>
21#include <linux/idr.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040022#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/gfp.h>
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +000024#include <linux/slab.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100025
26#include <asm/mmu_context.h>
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +000027#include <asm/pgalloc.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100028
29static DEFINE_SPINLOCK(mmu_context_lock);
Anton Blanchard7317ac82010-02-07 12:30:12 +000030static DEFINE_IDA(mmu_context_ida);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100031
Michael Ellermanc1ff8402017-03-29 22:10:45 +110032static int alloc_context_id(int min_id, int max_id)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100033{
Michael Ellermanc1ff8402017-03-29 22:10:45 +110034 int index, err;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100035
36again:
Anton Blanchard7317ac82010-02-07 12:30:12 +000037 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
Paul Mackerras14cf11a2005-09-26 16:04:21 +100038 return -ENOMEM;
39
40 spin_lock(&mmu_context_lock);
Michael Ellermanc1ff8402017-03-29 22:10:45 +110041 err = ida_get_new_above(&mmu_context_ida, min_id, &index);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100042 spin_unlock(&mmu_context_lock);
43
44 if (err == -EAGAIN)
45 goto again;
46 else if (err)
47 return err;
48
Michael Ellermanc1ff8402017-03-29 22:10:45 +110049 if (index > max_id) {
Sonny Raof86c97472006-06-27 08:46:09 -040050 spin_lock(&mmu_context_lock);
Anton Blanchard7317ac82010-02-07 12:30:12 +000051 ida_remove(&mmu_context_ida, index);
Sonny Raof86c97472006-06-27 08:46:09 -040052 spin_unlock(&mmu_context_lock);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100053 return -ENOMEM;
54 }
55
Alexander Grafe85a4712009-11-02 12:02:30 +000056 return index;
57}
Michael Ellermana336f2f2017-03-29 22:00:46 +110058
Aneesh Kumar K.V82228e32017-03-22 09:07:00 +053059void hash__reserve_context_id(int id)
60{
61 int rc, result = 0;
62
63 do {
64 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
65 break;
66
67 spin_lock(&mmu_context_lock);
68 rc = ida_get_new_above(&mmu_context_ida, id, &result);
69 spin_unlock(&mmu_context_lock);
70 } while (rc == -EAGAIN);
71
72 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
73}
74
Michael Ellermana336f2f2017-03-29 22:00:46 +110075int hash__alloc_context_id(void)
76{
Aneesh Kumar K.Ve6f81a92017-03-29 17:21:53 +110077 unsigned long max;
78
79 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
80 max = MAX_USER_CONTEXT;
81 else
82 max = MAX_USER_CONTEXT_65BIT_VA;
83
84 return alloc_context_id(MIN_USER_CONTEXT, max);
Michael Ellermana336f2f2017-03-29 22:00:46 +110085}
86EXPORT_SYMBOL_GPL(hash__alloc_context_id);
87
Michael Ellerman760573c2017-03-29 22:36:56 +110088static int hash__init_new_context(struct mm_struct *mm)
Alexander Grafe85a4712009-11-02 12:02:30 +000089{
90 int index;
91
Michael Ellermanc1ff8402017-03-29 22:10:45 +110092 index = hash__alloc_context_id();
Alexander Grafe85a4712009-11-02 12:02:30 +000093 if (index < 0)
94 return index;
95
Michael Ellerman760573c2017-03-29 22:36:56 +110096 /*
Nicholas Piggineffc1b22017-11-10 04:27:37 +110097 * In the case of exec, use the default limit,
98 * otherwise inherit it from the mm we are duplicating.
Aneesh Kumar K.V957b7782017-03-22 09:06:58 +053099 */
Nicholas Piggin47224762017-11-10 04:27:40 +1100100 if (!mm->context.slb_addr_limit)
101 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
Aneesh Kumar K.V957b7782017-03-22 09:06:58 +0530102
103 /*
Michael Ellerman760573c2017-03-29 22:36:56 +1100104 * The old code would re-promote on fork, we don't do that when using
105 * slices as it could cause problem promoting slices that have been
106 * forced down to 4K.
107 *
108 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
109 * explicitly against context.id == 0. This ensures that we properly
110 * initialize context slice details for newly allocated mm's (which will
111 * have id == 0) and don't alter context slice inherited via fork (which
112 * will have id != 0).
113 *
114 * We should not be calling init_new_context() on init_mm. Hence a
115 * check against 0 is OK.
116 */
117 if (mm->context.id == 0)
118 slice_set_user_psize(mm, mmu_virtual_psize);
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000119
Michael Ellerman760573c2017-03-29 22:36:56 +1100120 subpage_prot_init_new_context(mm);
121
Ram Pai4fb158f2018-01-18 17:50:25 -0800122 pkey_mm_init(mm);
Michael Ellerman760573c2017-03-29 22:36:56 +1100123 return index;
124}
125
126static int radix__init_new_context(struct mm_struct *mm)
127{
128 unsigned long rts_field;
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000129 int index, max_id;
Michael Ellerman760573c2017-03-29 22:36:56 +1100130
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000131 max_id = (1 << mmu_pid_bits) - 1;
132 index = alloc_context_id(mmu_base_pid, max_id);
Michael Ellerman760573c2017-03-29 22:36:56 +1100133 if (index < 0)
134 return index;
135
136 /*
137 * set the process table entry,
138 */
139 rts_field = radix__get_tree_size();
140 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
141
Benjamin Herrenschmidt3a6a0472017-07-07 16:12:16 -0500142 /*
143 * Order the above store with subsequent update of the PID
144 * register (at which point HW can start loading/caching
145 * the entry) and the corresponding load by the MMU from
146 * the L2 cache.
147 */
148 asm volatile("ptesync;isync" : : : "memory");
149
Alistair Popple1ab66d12017-04-03 19:51:44 +1000150 mm->context.npu_context = NULL;
151
Michael Ellerman760573c2017-03-29 22:36:56 +1100152 return index;
153}
154
155int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
156{
157 int index;
158
159 if (radix_enabled())
160 index = radix__init_new_context(mm);
161 else
162 index = hash__init_new_context(mm);
163
164 if (index < 0)
165 return index;
166
Stephen Rothwell9dfe5c532007-08-15 16:33:55 +1000167 mm->context.id = index;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000168
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000169#ifdef CONFIG_PPC_64K_PAGES
170 mm->context.pte_frag = NULL;
171#endif
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000172#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy88f54a32016-11-30 17:51:59 +1100173 mm_iommu_init(mm);
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000174#endif
Benjamin Herrenschmidta619e592017-07-24 14:28:02 +1000175 atomic_set(&mm->context.active_cpus, 0);
Benjamin Herrenschmidtaff6f8c2018-03-23 09:29:05 +1100176 atomic_set(&mm->context.copros, 0);
Benjamin Herrenschmidta619e592017-07-24 14:28:02 +1000177
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000178 return 0;
179}
180
Alexander Grafe85a4712009-11-02 12:02:30 +0000181void __destroy_context(int context_id)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000182{
183 spin_lock(&mmu_context_lock);
Anton Blanchard7317ac82010-02-07 12:30:12 +0000184 ida_remove(&mmu_context_ida, context_id);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000185 spin_unlock(&mmu_context_lock);
Alexander Grafe85a4712009-11-02 12:02:30 +0000186}
187EXPORT_SYMBOL_GPL(__destroy_context);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000188
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000189#ifdef CONFIG_PPC_64K_PAGES
190static void destroy_pagetable_page(struct mm_struct *mm)
191{
192 int count;
193 void *pte_frag;
194 struct page *page;
195
196 pte_frag = mm->context.pte_frag;
197 if (!pte_frag)
198 return;
199
200 page = virt_to_page(pte_frag);
201 /* drop all the pending references */
202 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
203 /* We allow PTE_FRAG_NR fragments from a PTE page */
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700204 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000205 pgtable_page_dtor(page);
Mel Gorman2d4894b2017-11-15 17:37:59 -0800206 free_unref_page(page);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000207 }
208}
209
210#else
211static inline void destroy_pagetable_page(struct mm_struct *mm)
212{
213 return;
214}
215#endif
216
Alexander Grafe85a4712009-11-02 12:02:30 +0000217void destroy_context(struct mm_struct *mm)
218{
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000219#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy4b6fad72016-11-30 17:52:05 +1100220 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
Alexey Kardashevskiy15b244a2015-06-05 16:35:24 +1000221#endif
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000222 if (radix_enabled())
223 WARN_ON(process_tb[mm->context.id].prtb0 != 0);
224 else
225 subpage_prot_free(mm);
226 destroy_pagetable_page(mm);
227 __destroy_context(mm->context.id);
228 mm->context.id = MMU_NO_CONTEXT;
229}
230
231void arch_exit_mmap(struct mm_struct *mm)
232{
Benjamin Herrenschmidtc6bb0b82017-07-08 07:45:32 -0500233 if (radix_enabled()) {
234 /*
235 * Radix doesn't have a valid bit in the process table
236 * entries. However we know that at least P9 implementation
237 * will avoid caching an entry with an invalid RTS field,
238 * and 0 is invalid. So this will do.
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000239 *
240 * This runs before the "fullmm" tlb flush in exit_mmap,
241 * which does a RIC=2 tlbie to clear the process table
242 * entry. See the "fullmm" comments in tlb-radix.c.
243 *
244 * No barrier required here after the store because
245 * this process will do the invalidate, which starts with
246 * ptesync.
Benjamin Herrenschmidtc6bb0b82017-07-08 07:45:32 -0500247 */
248 process_tb[mm->context.id].prtb0 = 0;
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000249 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000250}
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000251
252#ifdef CONFIG_PPC_RADIX_MMU
253void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
254{
Benjamin Herrenschmidt74e27c62017-06-25 15:08:46 -0500255
256 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
257 isync();
258 mtspr(SPRN_PID, next->context.id);
259 isync();
260 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
261 } else {
262 mtspr(SPRN_PID, next->context.id);
263 isync();
264 }
Aneesh Kumar K.V7e381c02016-04-29 23:26:02 +1000265}
266#endif