Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/mmu_notifier.c |
| 3 | * |
| 4 | * Copyright (C) 2008 Qumranet, Inc. |
| 5 | * Copyright (C) 2008 SGI |
| 6 | * Christoph Lameter <clameter@sgi.com> |
| 7 | * |
| 8 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 9 | * the COPYING file in the top-level directory. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/rculist.h> |
| 13 | #include <linux/mmu_notifier.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 14 | #include <linux/export.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> |
| 16 | #include <linux/err.h> |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 17 | #include <linux/srcu.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 18 | #include <linux/rcupdate.h> |
| 19 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 21 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 22 | /* global SRCU for all MMs */ |
Andrea Arcangeli | a02f4a2 | 2012-10-08 16:31:52 -0700 | [diff] [blame] | 23 | static struct srcu_struct srcu; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 24 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 25 | /* |
| 26 | * This function can't run concurrently against mmu_notifier_register |
| 27 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap |
| 28 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers |
| 29 | * in parallel despite there being no task using this mm any more, |
| 30 | * through the vmas outside of the exit_mmap context, such as with |
| 31 | * vmtruncate. This serializes against mmu_notifier_unregister with |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 32 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
| 33 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 34 | * can't go away from under us as exit_mmap holds an mm_count pin |
| 35 | * itself. |
| 36 | */ |
| 37 | void __mmu_notifier_release(struct mm_struct *mm) |
| 38 | { |
| 39 | struct mmu_notifier *mn; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 40 | int id; |
Xiao Guangrong | b6f529d | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 41 | |
| 42 | /* |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 43 | * srcu_read_lock() here will block synchronize_srcu() in |
| 44 | * mmu_notifier_unregister() until all registered |
| 45 | * ->release() callouts this function makes have |
| 46 | * returned. |
Xiao Guangrong | b6f529d | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 47 | */ |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 48 | id = srcu_read_lock(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 49 | spin_lock(&mm->mmu_notifier_mm->lock); |
| 50 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
| 51 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
| 52 | struct mmu_notifier, |
| 53 | hlist); |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 54 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 55 | /* |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 56 | * Unlink. This will prevent mmu_notifier_unregister() |
| 57 | * from also making the ->release() callout. |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 58 | */ |
| 59 | hlist_del_init_rcu(&mn->hlist); |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 60 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 61 | |
| 62 | /* |
| 63 | * Clear sptes. (see 'release' description in mmu_notifier.h) |
| 64 | */ |
| 65 | if (mn->ops->release) |
| 66 | mn->ops->release(mn, mm); |
| 67 | |
| 68 | spin_lock(&mm->mmu_notifier_mm->lock); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 69 | } |
| 70 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 71 | |
| 72 | /* |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 73 | * All callouts to ->release() which we have done are complete. |
| 74 | * Allow synchronize_srcu() in mmu_notifier_unregister() to complete |
| 75 | */ |
| 76 | srcu_read_unlock(&srcu, id); |
| 77 | |
| 78 | /* |
| 79 | * mmu_notifier_unregister() may have unlinked a notifier and may |
| 80 | * still be calling out to it. Additionally, other notifiers |
| 81 | * may have been active via vmtruncate() et. al. Block here |
| 82 | * to ensure that all notifier callouts for this mm have been |
| 83 | * completed and the sptes are really cleaned up before returning |
| 84 | * to exit_mmap(). |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 85 | */ |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 86 | synchronize_srcu(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | /* |
| 90 | * If no young bitflag is supported by the hardware, ->clear_flush_young can |
| 91 | * unmap the address and return 1 or 0 depending if the mapping previously |
| 92 | * existed or not. |
| 93 | */ |
| 94 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
| 95 | unsigned long address) |
| 96 | { |
| 97 | struct mmu_notifier *mn; |
| 98 | struct hlist_node *n; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 99 | int young = 0, id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 100 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 101 | id = srcu_read_lock(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 102 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
| 103 | if (mn->ops->clear_flush_young) |
| 104 | young |= mn->ops->clear_flush_young(mn, mm, address); |
| 105 | } |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 106 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 107 | |
| 108 | return young; |
| 109 | } |
| 110 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 111 | int __mmu_notifier_test_young(struct mm_struct *mm, |
| 112 | unsigned long address) |
| 113 | { |
| 114 | struct mmu_notifier *mn; |
| 115 | struct hlist_node *n; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 116 | int young = 0, id; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 117 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 118 | id = srcu_read_lock(&srcu); |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 119 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
| 120 | if (mn->ops->test_young) { |
| 121 | young = mn->ops->test_young(mn, mm, address); |
| 122 | if (young) |
| 123 | break; |
| 124 | } |
| 125 | } |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 126 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 127 | |
| 128 | return young; |
| 129 | } |
| 130 | |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 131 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
| 132 | pte_t pte) |
| 133 | { |
| 134 | struct mmu_notifier *mn; |
| 135 | struct hlist_node *n; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 136 | int id; |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 137 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 138 | id = srcu_read_lock(&srcu); |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 139 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
| 140 | if (mn->ops->change_pte) |
| 141 | mn->ops->change_pte(mn, mm, address, pte); |
| 142 | /* |
| 143 | * Some drivers don't have change_pte, |
| 144 | * so we must call invalidate_page in that case. |
| 145 | */ |
| 146 | else if (mn->ops->invalidate_page) |
| 147 | mn->ops->invalidate_page(mn, mm, address); |
| 148 | } |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 149 | srcu_read_unlock(&srcu, id); |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 150 | } |
| 151 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 152 | void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
| 153 | unsigned long address) |
| 154 | { |
| 155 | struct mmu_notifier *mn; |
| 156 | struct hlist_node *n; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 157 | int id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 158 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 159 | id = srcu_read_lock(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 160 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
| 161 | if (mn->ops->invalidate_page) |
| 162 | mn->ops->invalidate_page(mn, mm, address); |
| 163 | } |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 164 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
| 168 | unsigned long start, unsigned long end) |
| 169 | { |
| 170 | struct mmu_notifier *mn; |
| 171 | struct hlist_node *n; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 172 | int id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 173 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 174 | id = srcu_read_lock(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 175 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
| 176 | if (mn->ops->invalidate_range_start) |
| 177 | mn->ops->invalidate_range_start(mn, mm, start, end); |
| 178 | } |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 179 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
| 183 | unsigned long start, unsigned long end) |
| 184 | { |
| 185 | struct mmu_notifier *mn; |
| 186 | struct hlist_node *n; |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 187 | int id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 188 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 189 | id = srcu_read_lock(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 190 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
| 191 | if (mn->ops->invalidate_range_end) |
| 192 | mn->ops->invalidate_range_end(mn, mm, start, end); |
| 193 | } |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 194 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static int do_mmu_notifier_register(struct mmu_notifier *mn, |
| 198 | struct mm_struct *mm, |
| 199 | int take_mmap_sem) |
| 200 | { |
| 201 | struct mmu_notifier_mm *mmu_notifier_mm; |
| 202 | int ret; |
| 203 | |
| 204 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
| 205 | |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 206 | /* |
| 207 | * Verify that mmu_notifier_init() already run and the global srcu is |
| 208 | * initialized. |
| 209 | */ |
| 210 | BUG_ON(!srcu.per_cpu_ref); |
| 211 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 212 | ret = -ENOMEM; |
| 213 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); |
| 214 | if (unlikely(!mmu_notifier_mm)) |
| 215 | goto out; |
| 216 | |
| 217 | if (take_mmap_sem) |
| 218 | down_write(&mm->mmap_sem); |
| 219 | ret = mm_take_all_locks(mm); |
| 220 | if (unlikely(ret)) |
| 221 | goto out_cleanup; |
| 222 | |
| 223 | if (!mm_has_notifiers(mm)) { |
| 224 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); |
| 225 | spin_lock_init(&mmu_notifier_mm->lock); |
| 226 | mm->mmu_notifier_mm = mmu_notifier_mm; |
| 227 | mmu_notifier_mm = NULL; |
| 228 | } |
| 229 | atomic_inc(&mm->mm_count); |
| 230 | |
| 231 | /* |
| 232 | * Serialize the update against mmu_notifier_unregister. A |
| 233 | * side note: mmu_notifier_release can't run concurrently with |
| 234 | * us because we hold the mm_users pin (either implicitly as |
| 235 | * current->mm or explicitly with get_task_mm() or similar). |
| 236 | * We can't race against any other mmu notifier method either |
| 237 | * thanks to mm_take_all_locks(). |
| 238 | */ |
| 239 | spin_lock(&mm->mmu_notifier_mm->lock); |
| 240 | hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); |
| 241 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 242 | |
| 243 | mm_drop_all_locks(mm); |
| 244 | out_cleanup: |
| 245 | if (take_mmap_sem) |
| 246 | up_write(&mm->mmap_sem); |
| 247 | /* kfree() does nothing if mmu_notifier_mm is NULL */ |
| 248 | kfree(mmu_notifier_mm); |
| 249 | out: |
| 250 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
| 251 | return ret; |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * Must not hold mmap_sem nor any other VM related lock when calling |
| 256 | * this registration function. Must also ensure mm_users can't go down |
| 257 | * to zero while this runs to avoid races with mmu_notifier_release, |
| 258 | * so mm has to be current->mm or the mm should be pinned safely such |
| 259 | * as with get_task_mm(). If the mm is not current->mm, the mm_users |
| 260 | * pin should be released by calling mmput after mmu_notifier_register |
| 261 | * returns. mmu_notifier_unregister must be always called to |
| 262 | * unregister the notifier. mm_count is automatically pinned to allow |
| 263 | * mmu_notifier_unregister to safely run at any time later, before or |
| 264 | * after exit_mmap. ->release will always be called before exit_mmap |
| 265 | * frees the pages. |
| 266 | */ |
| 267 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
| 268 | { |
| 269 | return do_mmu_notifier_register(mn, mm, 1); |
| 270 | } |
| 271 | EXPORT_SYMBOL_GPL(mmu_notifier_register); |
| 272 | |
| 273 | /* |
| 274 | * Same as mmu_notifier_register but here the caller must hold the |
| 275 | * mmap_sem in write mode. |
| 276 | */ |
| 277 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
| 278 | { |
| 279 | return do_mmu_notifier_register(mn, mm, 0); |
| 280 | } |
| 281 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); |
| 282 | |
| 283 | /* this is called after the last mmu_notifier_unregister() returned */ |
| 284 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) |
| 285 | { |
| 286 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); |
| 287 | kfree(mm->mmu_notifier_mm); |
| 288 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * This releases the mm_count pin automatically and frees the mm |
| 293 | * structure if it was the last user of it. It serializes against |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 294 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
| 295 | * with the unregister lock + SRCU. All sptes must be dropped before |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 296 | * calling mmu_notifier_unregister. ->release or any other notifier |
| 297 | * method may be invoked concurrently with mmu_notifier_unregister, |
| 298 | * and only after mmu_notifier_unregister returned we're guaranteed |
| 299 | * that ->release or any other method can't run anymore. |
| 300 | */ |
| 301 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
| 302 | { |
| 303 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
| 304 | |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 305 | spin_lock(&mm->mmu_notifier_mm->lock); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 306 | if (!hlist_unhashed(&mn->hlist)) { |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 307 | int id; |
Xiao Guangrong | b6f529d | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 308 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 309 | /* |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 310 | * Ensure we synchronize up with __mmu_notifier_release(). |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 311 | */ |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 312 | id = srcu_read_lock(&srcu); |
Xiao Guangrong | b6f529d | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 313 | |
Xiao Guangrong | b6f529d | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 314 | hlist_del_rcu(&mn->hlist); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 315 | spin_unlock(&mm->mmu_notifier_mm->lock); |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 316 | |
| 317 | if (mn->ops->release) |
| 318 | mn->ops->release(mn, mm); |
| 319 | |
| 320 | /* |
| 321 | * Allow __mmu_notifier_release() to complete. |
| 322 | */ |
| 323 | srcu_read_unlock(&srcu, id); |
| 324 | } else |
| 325 | spin_unlock(&mm->mmu_notifier_mm->lock); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 326 | |
| 327 | /* |
Robin Holt | 3eb048e | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 328 | * Wait for any running method to finish, including ->release() if it |
| 329 | * was run by __mmu_notifier_release() instead of us. |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 330 | */ |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 331 | synchronize_srcu(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 332 | |
| 333 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
| 334 | |
| 335 | mmdrop(mm); |
| 336 | } |
| 337 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); |
Sagi Grimberg | ebd4aa5 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 338 | |
| 339 | static int __init mmu_notifier_init(void) |
| 340 | { |
| 341 | return init_srcu_struct(&srcu); |
| 342 | } |
| 343 | |
| 344 | module_init(mmu_notifier_init); |