Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 1 | #include <linux/fs.h> |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 2 | #include <linux/sched.h> |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 3 | #include <linux/slab.h> |
Al Viro | 8fa1f1c | 2014-05-21 18:22:52 -0400 | [diff] [blame] | 4 | #include "internal.h" |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 5 | #include "mount.h" |
| 6 | |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 7 | static DEFINE_SPINLOCK(pin_lock); |
| 8 | |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 9 | void pin_remove(struct fs_pin *pin) |
| 10 | { |
| 11 | spin_lock(&pin_lock); |
Eric W. Biederman | 820f9f1 | 2015-04-02 16:35:48 -0500 | [diff] [blame] | 12 | hlist_del_init(&pin->m_list); |
| 13 | hlist_del_init(&pin->s_list); |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 14 | spin_unlock(&pin_lock); |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 15 | spin_lock_irq(&pin->wait.lock); |
| 16 | pin->done = 1; |
| 17 | wake_up_locked(&pin->wait); |
| 18 | spin_unlock_irq(&pin->wait.lock); |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 19 | } |
| 20 | |
Al Viro | fdab684 | 2015-01-11 10:57:27 -0500 | [diff] [blame] | 21 | void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p) |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 22 | { |
| 23 | spin_lock(&pin_lock); |
Al Viro | fdab684 | 2015-01-11 10:57:27 -0500 | [diff] [blame] | 24 | if (p) |
| 25 | hlist_add_head(&pin->s_list, p); |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 26 | hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins); |
| 27 | spin_unlock(&pin_lock); |
| 28 | } |
| 29 | |
Al Viro | fdab684 | 2015-01-11 10:57:27 -0500 | [diff] [blame] | 30 | void pin_insert(struct fs_pin *pin, struct vfsmount *m) |
| 31 | { |
| 32 | pin_insert_group(pin, m, &m->mnt_sb->s_pins); |
| 33 | } |
| 34 | |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 35 | void pin_kill(struct fs_pin *p) |
| 36 | { |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 37 | wait_queue_entry_t wait; |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 38 | |
| 39 | if (!p) { |
| 40 | rcu_read_unlock(); |
| 41 | return; |
| 42 | } |
| 43 | init_wait(&wait); |
| 44 | spin_lock_irq(&p->wait.lock); |
| 45 | if (likely(!p->done)) { |
| 46 | p->done = -1; |
| 47 | spin_unlock_irq(&p->wait.lock); |
| 48 | rcu_read_unlock(); |
| 49 | p->kill(p); |
| 50 | return; |
| 51 | } |
| 52 | if (p->done > 0) { |
| 53 | spin_unlock_irq(&p->wait.lock); |
| 54 | rcu_read_unlock(); |
| 55 | return; |
| 56 | } |
| 57 | __add_wait_queue(&p->wait, &wait); |
| 58 | while (1) { |
| 59 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 60 | spin_unlock_irq(&p->wait.lock); |
| 61 | rcu_read_unlock(); |
| 62 | schedule(); |
| 63 | rcu_read_lock(); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 64 | if (likely(list_empty(&wait.entry))) |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 65 | break; |
| 66 | /* OK, we know p couldn't have been freed yet */ |
| 67 | spin_lock_irq(&p->wait.lock); |
| 68 | if (p->done > 0) { |
| 69 | spin_unlock_irq(&p->wait.lock); |
| 70 | break; |
| 71 | } |
| 72 | } |
| 73 | rcu_read_unlock(); |
| 74 | } |
| 75 | |
Al Viro | 8fa1f1c | 2014-05-21 18:22:52 -0400 | [diff] [blame] | 76 | void mnt_pin_kill(struct mount *m) |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 77 | { |
| 78 | while (1) { |
| 79 | struct hlist_node *p; |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 80 | rcu_read_lock(); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame^] | 81 | p = READ_ONCE(m->mnt_pins.first); |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 82 | if (!p) { |
| 83 | rcu_read_unlock(); |
| 84 | break; |
| 85 | } |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 86 | pin_kill(hlist_entry(p, struct fs_pin, m_list)); |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 87 | } |
| 88 | } |
| 89 | |
Al Viro | fdab684 | 2015-01-11 10:57:27 -0500 | [diff] [blame] | 90 | void group_pin_kill(struct hlist_head *p) |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 91 | { |
| 92 | while (1) { |
Al Viro | fdab684 | 2015-01-11 10:57:27 -0500 | [diff] [blame] | 93 | struct hlist_node *q; |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 94 | rcu_read_lock(); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame^] | 95 | q = READ_ONCE(p->first); |
Al Viro | fdab684 | 2015-01-11 10:57:27 -0500 | [diff] [blame] | 96 | if (!q) { |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 97 | rcu_read_unlock(); |
| 98 | break; |
| 99 | } |
Al Viro | 59eda0e | 2015-01-10 17:53:21 -0500 | [diff] [blame] | 100 | pin_kill(hlist_entry(q, struct fs_pin, s_list)); |
Al Viro | efb170c | 2014-08-07 08:39:04 -0400 | [diff] [blame] | 101 | } |
| 102 | } |