blob: 41cefdf0aaddc46144187cba72ddf3e2629a9f02 [file] [log] [blame]
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001/*
2 * linux/mm/mmu_notifier.c
3 *
4 * Copyright (C) 2008 Qumranet, Inc.
5 * Copyright (C) 2008 SGI
6 * Christoph Lameter <clameter@sgi.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
10 */
11
12#include <linux/rculist.h>
13#include <linux/mmu_notifier.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040014#include <linux/export.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070015#include <linux/mm.h>
16#include <linux/err.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -070017#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070018#include <linux/rcupdate.h>
19#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070021
Sagi Grimberg21a92732012-10-08 16:29:24 -070022/* global SRCU for all MMs */
Andrea Arcangeli70400302012-10-08 16:31:52 -070023static struct srcu_struct srcu;
Sagi Grimberg21a92732012-10-08 16:29:24 -070024
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070025/*
26 * This function can't run concurrently against mmu_notifier_register
27 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
28 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
29 * in parallel despite there being no task using this mm any more,
30 * through the vmas outside of the exit_mmap context, such as with
31 * vmtruncate. This serializes against mmu_notifier_unregister with
Sagi Grimberg21a92732012-10-08 16:29:24 -070032 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
33 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070034 * can't go away from under us as exit_mmap holds an mm_count pin
35 * itself.
36 */
37void __mmu_notifier_release(struct mm_struct *mm)
38{
39 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -070040 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -070041
42 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070043 * SRCU here will block mmu_notifier_unregister until
44 * ->release returns.
Xiao Guangrong3ad3d902012-07-31 16:45:52 -070045 */
Sagi Grimberg21a92732012-10-08 16:29:24 -070046 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -070047 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
48 /*
49 * If ->release runs before mmu_notifier_unregister it must be
50 * handled, as it's the only way for the driver to flush all
51 * existing sptes and stop the driver from establishing any more
52 * sptes before all the pages in the mm are freed.
53 */
54 if (mn->ops->release)
55 mn->ops->release(mn, mm);
56 srcu_read_unlock(&srcu, id);
57
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070058 spin_lock(&mm->mmu_notifier_mm->lock);
59 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
60 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
61 struct mmu_notifier,
62 hlist);
63 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070064 * We arrived before mmu_notifier_unregister so
65 * mmu_notifier_unregister will do nothing other than to wait
66 * for ->release to finish and for mmu_notifier_unregister to
67 * return.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070068 */
69 hlist_del_init_rcu(&mn->hlist);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070070 }
71 spin_unlock(&mm->mmu_notifier_mm->lock);
72
73 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070074 * synchronize_srcu here prevents mmu_notifier_release from returning to
75 * exit_mmap (which would proceed with freeing all pages in the mm)
76 * until the ->release method returns, if it was invoked by
77 * mmu_notifier_unregister.
78 *
79 * The mmu_notifier_mm can't go away from under us because one mm_count
80 * is held by exit_mmap.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070081 */
Sagi Grimberg21a92732012-10-08 16:29:24 -070082 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070083}
84
85/*
86 * If no young bitflag is supported by the hardware, ->clear_flush_young can
87 * unmap the address and return 1 or 0 depending if the mapping previously
88 * existed or not.
89 */
90int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
91 unsigned long address)
92{
93 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -070094 int young = 0, id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070095
Sagi Grimberg21a92732012-10-08 16:29:24 -070096 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -080097 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070098 if (mn->ops->clear_flush_young)
99 young |= mn->ops->clear_flush_young(mn, mm, address);
100 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700101 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700102
103 return young;
104}
105
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800106int __mmu_notifier_test_young(struct mm_struct *mm,
107 unsigned long address)
108{
109 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700110 int young = 0, id;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800111
Sagi Grimberg21a92732012-10-08 16:29:24 -0700112 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800113 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800114 if (mn->ops->test_young) {
115 young = mn->ops->test_young(mn, mm, address);
116 if (young)
117 break;
118 }
119 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700120 srcu_read_unlock(&srcu, id);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800121
122 return young;
123}
124
Izik Eidus828502d2009-09-21 17:01:51 -0700125void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
126 pte_t pte)
127{
128 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700129 int id;
Izik Eidus828502d2009-09-21 17:01:51 -0700130
Sagi Grimberg21a92732012-10-08 16:29:24 -0700131 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800132 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Izik Eidus828502d2009-09-21 17:01:51 -0700133 if (mn->ops->change_pte)
134 mn->ops->change_pte(mn, mm, address, pte);
Izik Eidus828502d2009-09-21 17:01:51 -0700135 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700136 srcu_read_unlock(&srcu, id);
Izik Eidus828502d2009-09-21 17:01:51 -0700137}
138
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700139void __mmu_notifier_invalidate_page(struct mm_struct *mm,
140 unsigned long address)
141{
142 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700143 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700144
Sagi Grimberg21a92732012-10-08 16:29:24 -0700145 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800146 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700147 if (mn->ops->invalidate_page)
148 mn->ops->invalidate_page(mn, mm, address);
149 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700150 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700151}
152
153void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
154 unsigned long start, unsigned long end)
155{
156 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700157 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700158
Sagi Grimberg21a92732012-10-08 16:29:24 -0700159 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800160 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700161 if (mn->ops->invalidate_range_start)
162 mn->ops->invalidate_range_start(mn, mm, start, end);
163 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700164 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700165}
Cliff Wickmanfa794192013-02-22 16:35:58 -0800166EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700167
168void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
169 unsigned long start, unsigned long end)
170{
171 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700172 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700173
Sagi Grimberg21a92732012-10-08 16:29:24 -0700174 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800175 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700176 if (mn->ops->invalidate_range_end)
177 mn->ops->invalidate_range_end(mn, mm, start, end);
178 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700179 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700180}
Cliff Wickmanfa794192013-02-22 16:35:58 -0800181EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700182
183static int do_mmu_notifier_register(struct mmu_notifier *mn,
184 struct mm_struct *mm,
185 int take_mmap_sem)
186{
187 struct mmu_notifier_mm *mmu_notifier_mm;
188 int ret;
189
190 BUG_ON(atomic_read(&mm->mm_users) <= 0);
191
Sagi Grimberg21a92732012-10-08 16:29:24 -0700192 /*
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700193 * Verify that mmu_notifier_init() already run and the global srcu is
194 * initialized.
195 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700196 BUG_ON(!srcu.per_cpu_ref);
197
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700198 ret = -ENOMEM;
199 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
200 if (unlikely(!mmu_notifier_mm))
201 goto out;
202
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700203 if (take_mmap_sem)
204 down_write(&mm->mmap_sem);
205 ret = mm_take_all_locks(mm);
206 if (unlikely(ret))
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700207 goto out_clean;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700208
209 if (!mm_has_notifiers(mm)) {
210 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
211 spin_lock_init(&mmu_notifier_mm->lock);
Gavin Shane0f3c3f2012-10-08 16:29:26 -0700212
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700213 mm->mmu_notifier_mm = mmu_notifier_mm;
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700214 mmu_notifier_mm = NULL;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700215 }
216 atomic_inc(&mm->mm_count);
217
218 /*
219 * Serialize the update against mmu_notifier_unregister. A
220 * side note: mmu_notifier_release can't run concurrently with
221 * us because we hold the mm_users pin (either implicitly as
222 * current->mm or explicitly with get_task_mm() or similar).
223 * We can't race against any other mmu notifier method either
224 * thanks to mm_take_all_locks().
225 */
226 spin_lock(&mm->mmu_notifier_mm->lock);
227 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
228 spin_unlock(&mm->mmu_notifier_mm->lock);
229
230 mm_drop_all_locks(mm);
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700231out_clean:
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700232 if (take_mmap_sem)
233 up_write(&mm->mmap_sem);
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700234 kfree(mmu_notifier_mm);
235out:
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700236 BUG_ON(atomic_read(&mm->mm_users) <= 0);
237 return ret;
238}
239
240/*
241 * Must not hold mmap_sem nor any other VM related lock when calling
242 * this registration function. Must also ensure mm_users can't go down
243 * to zero while this runs to avoid races with mmu_notifier_release,
244 * so mm has to be current->mm or the mm should be pinned safely such
245 * as with get_task_mm(). If the mm is not current->mm, the mm_users
246 * pin should be released by calling mmput after mmu_notifier_register
247 * returns. mmu_notifier_unregister must be always called to
248 * unregister the notifier. mm_count is automatically pinned to allow
249 * mmu_notifier_unregister to safely run at any time later, before or
250 * after exit_mmap. ->release will always be called before exit_mmap
251 * frees the pages.
252 */
253int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
254{
255 return do_mmu_notifier_register(mn, mm, 1);
256}
257EXPORT_SYMBOL_GPL(mmu_notifier_register);
258
259/*
260 * Same as mmu_notifier_register but here the caller must hold the
261 * mmap_sem in write mode.
262 */
263int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
264{
265 return do_mmu_notifier_register(mn, mm, 0);
266}
267EXPORT_SYMBOL_GPL(__mmu_notifier_register);
268
269/* this is called after the last mmu_notifier_unregister() returned */
270void __mmu_notifier_mm_destroy(struct mm_struct *mm)
271{
272 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
273 kfree(mm->mmu_notifier_mm);
274 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
275}
276
277/*
278 * This releases the mm_count pin automatically and frees the mm
279 * structure if it was the last user of it. It serializes against
Sagi Grimberg21a92732012-10-08 16:29:24 -0700280 * running mmu notifiers with SRCU and against mmu_notifier_unregister
281 * with the unregister lock + SRCU. All sptes must be dropped before
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700282 * calling mmu_notifier_unregister. ->release or any other notifier
283 * method may be invoked concurrently with mmu_notifier_unregister,
284 * and only after mmu_notifier_unregister returned we're guaranteed
285 * that ->release or any other method can't run anymore.
286 */
287void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
288{
289 BUG_ON(atomic_read(&mm->mm_count) <= 0);
290
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700291 if (!hlist_unhashed(&mn->hlist)) {
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700292 /*
293 * SRCU here will force exit_mmap to wait for ->release to
294 * finish before freeing the pages.
295 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700296 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -0700297
Robin Holt751efd82013-02-22 16:35:34 -0800298 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700299 /*
300 * exit_mmap will block in mmu_notifier_release to guarantee
301 * that ->release is called before freeing the pages.
302 */
Robin Holt751efd82013-02-22 16:35:34 -0800303 if (mn->ops->release)
304 mn->ops->release(mn, mm);
Robin Holt751efd82013-02-22 16:35:34 -0800305 srcu_read_unlock(&srcu, id);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700306
307 spin_lock(&mm->mmu_notifier_mm->lock);
308 /*
309 * Can not use list_del_rcu() since __mmu_notifier_release
310 * can delete it before we hold the lock.
311 */
312 hlist_del_init_rcu(&mn->hlist);
Robin Holt751efd82013-02-22 16:35:34 -0800313 spin_unlock(&mm->mmu_notifier_mm->lock);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700314 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700315
316 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700317 * Wait for any running method to finish, of course including
Geert Uytterhoeven83a35e32013-06-28 11:27:31 +0200318 * ->release if it was run by mmu_notifier_release instead of us.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700319 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700320 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700321
322 BUG_ON(atomic_read(&mm->mm_count) <= 0);
323
324 mmdrop(mm);
325}
326EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
Sagi Grimberg21a92732012-10-08 16:29:24 -0700327
328static int __init mmu_notifier_init(void)
329{
330 return init_srcu_struct(&srcu);
331}
Paul Gortmakera64fb3c2014-01-23 15:53:30 -0800332subsys_initcall(mmu_notifier_init);