blob: 25772476fa4afcfec54210527f548588c402c109 [file] [log] [blame]
Al Viro74c3cbe2007-07-22 08:04:18 -04001#include "audit.h"
Eric Paris28a3a7e2009-12-17 20:12:05 -05002#include <linux/fsnotify_backend.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04003#include <linux/namei.h>
4#include <linux/mount.h>
Al Viro916d7572009-06-24 00:02:38 -04005#include <linux/kthread.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Al Viro74c3cbe2007-07-22 08:04:18 -04007
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
21};
22
23struct audit_chunk {
24 struct list_head hash;
Eric Parise61ce862009-12-17 21:24:24 -050025 struct fsnotify_mark mark;
Al Viro74c3cbe2007-07-22 08:04:18 -040026 struct list_head trees; /* with root here */
27 int dead;
28 int count;
Al Viro8f7b0ba2008-11-15 01:15:43 +000029 atomic_long_t refs;
Al Viro74c3cbe2007-07-22 08:04:18 -040030 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
Imre Palikf1aaf262015-02-23 15:37:59 -050040static struct task_struct *prune_thread;
Al Viro74c3cbe2007-07-22 08:04:18 -040041
42/*
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
47 * the same tree.
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
52 *
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
59 *
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
62 *
Eric Paris28a3a7e2009-12-17 20:12:05 -050063 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
Al Viro8f7b0ba2008-11-15 01:15:43 +000064 * of watch contributes 1 to .refs).
Al Viro74c3cbe2007-07-22 08:04:18 -040065 *
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
70 */
71
Eric Paris28a3a7e2009-12-17 20:12:05 -050072static struct fsnotify_group *audit_tree_group;
Al Viro74c3cbe2007-07-22 08:04:18 -040073
74static struct audit_tree *alloc_tree(const char *s)
75{
76 struct audit_tree *tree;
77
78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 if (tree) {
80 atomic_set(&tree->count, 1);
81 tree->goner = 0;
82 INIT_LIST_HEAD(&tree->chunks);
83 INIT_LIST_HEAD(&tree->rules);
84 INIT_LIST_HEAD(&tree->list);
85 INIT_LIST_HEAD(&tree->same_root);
86 tree->root = NULL;
87 strcpy(tree->pathname, s);
88 }
89 return tree;
90}
91
92static inline void get_tree(struct audit_tree *tree)
93{
94 atomic_inc(&tree->count);
95}
96
Al Viro74c3cbe2007-07-22 08:04:18 -040097static inline void put_tree(struct audit_tree *tree)
98{
99 if (atomic_dec_and_test(&tree->count))
Lai Jiangshan3b097c42011-03-15 18:03:53 +0800100 kfree_rcu(tree, head);
Al Viro74c3cbe2007-07-22 08:04:18 -0400101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106 return tree->pathname;
107}
108
Al Viro8f7b0ba2008-11-15 01:15:43 +0000109static void free_chunk(struct audit_chunk *chunk)
Al Viro74c3cbe2007-07-22 08:04:18 -0400110{
Al Viro74c3cbe2007-07-22 08:04:18 -0400111 int i;
112
113 for (i = 0; i < chunk->count; i++) {
114 if (chunk->owners[i].owner)
115 put_tree(chunk->owners[i].owner);
116 }
117 kfree(chunk);
118}
119
Al Viro74c3cbe2007-07-22 08:04:18 -0400120void audit_put_chunk(struct audit_chunk *chunk)
121{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000122 if (atomic_long_dec_and_test(&chunk->refs))
123 free_chunk(chunk);
124}
125
126static void __put_chunk(struct rcu_head *rcu)
127{
128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 audit_put_chunk(chunk);
Al Viro74c3cbe2007-07-22 08:04:18 -0400130}
131
Eric Parise61ce862009-12-17 21:24:24 -0500132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
Eric Paris28a3a7e2009-12-17 20:12:05 -0500133{
134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 call_rcu(&chunk->head, __put_chunk);
136}
137
138static struct audit_chunk *alloc_chunk(int count)
139{
140 struct audit_chunk *chunk;
141 size_t size;
142 int i;
143
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 chunk = kzalloc(size, GFP_KERNEL);
146 if (!chunk)
147 return NULL;
148
149 INIT_LIST_HEAD(&chunk->hash);
150 INIT_LIST_HEAD(&chunk->trees);
151 chunk->count = count;
152 atomic_long_set(&chunk->refs, 1);
153 for (i = 0; i < count; i++) {
154 INIT_LIST_HEAD(&chunk->owners[i].list);
155 chunk->owners[i].index = i;
156 }
157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
Miklos Szeredi799b6012014-11-04 11:27:12 +0100158 chunk->mark.mask = FS_IN_IGNORED;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500159 return chunk;
160}
161
Al Viro74c3cbe2007-07-22 08:04:18 -0400162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
166static inline struct list_head *chunk_hash(const struct inode *inode)
167{
168 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169 return chunk_hash_heads + n % HASH_SIZE;
170}
171
Eric Paris28a3a7e2009-12-17 20:12:05 -0500172/* hash_lock & entry->lock is held by caller */
Al Viro74c3cbe2007-07-22 08:04:18 -0400173static void insert_hash(struct audit_chunk *chunk)
174{
Eric Parise61ce862009-12-17 21:24:24 -0500175 struct fsnotify_mark *entry = &chunk->mark;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500176 struct list_head *list;
177
Jan Kara0809ab62014-12-12 16:58:36 -0800178 if (!entry->inode)
Eric Paris28a3a7e2009-12-17 20:12:05 -0500179 return;
Jan Kara0809ab62014-12-12 16:58:36 -0800180 list = chunk_hash(entry->inode);
Al Viro74c3cbe2007-07-22 08:04:18 -0400181 list_add_rcu(&chunk->hash, list);
182}
183
184/* called under rcu_read_lock */
185struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186{
187 struct list_head *list = chunk_hash(inode);
Paul E. McKenney6793a052008-05-14 17:10:12 -0700188 struct audit_chunk *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400189
Paul E. McKenney6793a052008-05-14 17:10:12 -0700190 list_for_each_entry_rcu(p, list, hash) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500191 /* mark.inode may have gone NULL, but who cares? */
Jan Kara0809ab62014-12-12 16:58:36 -0800192 if (p->mark.inode == inode) {
Al Viro8f7b0ba2008-11-15 01:15:43 +0000193 atomic_long_inc(&p->refs);
Al Viro74c3cbe2007-07-22 08:04:18 -0400194 return p;
195 }
196 }
197 return NULL;
198}
199
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500200bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
Al Viro74c3cbe2007-07-22 08:04:18 -0400201{
202 int n;
203 for (n = 0; n < chunk->count; n++)
204 if (chunk->owners[n].owner == tree)
Yaowei Bai6f1b5d72015-11-04 08:23:51 -0500205 return true;
206 return false;
Al Viro74c3cbe2007-07-22 08:04:18 -0400207}
208
209/* tagging and untagging inodes with trees */
210
Al Viro8f7b0ba2008-11-15 01:15:43 +0000211static struct audit_chunk *find_chunk(struct node *p)
Al Viro74c3cbe2007-07-22 08:04:18 -0400212{
Al Viro8f7b0ba2008-11-15 01:15:43 +0000213 int index = p->index & ~(1U<<31);
214 p -= index;
215 return container_of(p, struct audit_chunk, owners[0]);
216}
217
218static void untag_chunk(struct node *p)
219{
220 struct audit_chunk *chunk = find_chunk(p);
Eric Parise61ce862009-12-17 21:24:24 -0500221 struct fsnotify_mark *entry = &chunk->mark;
Al Virof7a998a2010-10-30 02:18:32 -0400222 struct audit_chunk *new = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400223 struct audit_tree *owner;
224 int size = chunk->count - 1;
225 int i, j;
226
Eric Paris28a3a7e2009-12-17 20:12:05 -0500227 fsnotify_get_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000228
229 spin_unlock(&hash_lock);
230
Al Virof7a998a2010-10-30 02:18:32 -0400231 if (size)
232 new = alloc_chunk(size);
233
Eric Paris28a3a7e2009-12-17 20:12:05 -0500234 spin_lock(&entry->lock);
Jan Kara0809ab62014-12-12 16:58:36 -0800235 if (chunk->dead || !entry->inode) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500236 spin_unlock(&entry->lock);
Al Virof7a998a2010-10-30 02:18:32 -0400237 if (new)
238 free_chunk(new);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000239 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400240 }
241
242 owner = p->owner;
243
244 if (!size) {
245 chunk->dead = 1;
246 spin_lock(&hash_lock);
247 list_del_init(&chunk->trees);
248 if (owner->root == chunk)
249 owner->root = NULL;
250 list_del_init(&p->list);
251 list_del_rcu(&chunk->hash);
252 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500253 spin_unlock(&entry->lock);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200254 fsnotify_destroy_mark(entry, audit_tree_group);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000255 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400256 }
257
Al Viro74c3cbe2007-07-22 08:04:18 -0400258 if (!new)
259 goto Fallback;
Al Virof7a998a2010-10-30 02:18:32 -0400260
Eric Paris28a3a7e2009-12-17 20:12:05 -0500261 fsnotify_duplicate_mark(&new->mark, entry);
Jan Kara0809ab62014-12-12 16:58:36 -0800262 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200263 fsnotify_put_mark(&new->mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400264 goto Fallback;
265 }
266
267 chunk->dead = 1;
268 spin_lock(&hash_lock);
269 list_replace_init(&chunk->trees, &new->trees);
270 if (owner->root == chunk) {
271 list_del_init(&owner->same_root);
272 owner->root = NULL;
273 }
274
Al Viro6f5d5112009-12-19 15:59:45 +0000275 for (i = j = 0; j <= size; i++, j++) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400276 struct audit_tree *s;
277 if (&chunk->owners[j] == p) {
278 list_del_init(&p->list);
279 i--;
280 continue;
281 }
282 s = chunk->owners[j].owner;
283 new->owners[i].owner = s;
284 new->owners[i].index = chunk->owners[j].index - j + i;
285 if (!s) /* result of earlier fallback */
286 continue;
287 get_tree(s);
Al Viro6f5d5112009-12-19 15:59:45 +0000288 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400289 }
290
291 list_replace_rcu(&chunk->hash, &new->hash);
292 list_for_each_entry(owner, &new->trees, same_root)
293 owner->root = new;
294 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500295 spin_unlock(&entry->lock);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200296 fsnotify_destroy_mark(entry, audit_tree_group);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200297 fsnotify_put_mark(&new->mark); /* drop initial reference */
Al Viro8f7b0ba2008-11-15 01:15:43 +0000298 goto out;
Al Viro74c3cbe2007-07-22 08:04:18 -0400299
300Fallback:
301 // do the best we can
302 spin_lock(&hash_lock);
303 if (owner->root == chunk) {
304 list_del_init(&owner->same_root);
305 owner->root = NULL;
306 }
307 list_del_init(&p->list);
308 p->owner = NULL;
309 put_tree(owner);
310 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500311 spin_unlock(&entry->lock);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000312out:
Eric Paris28a3a7e2009-12-17 20:12:05 -0500313 fsnotify_put_mark(entry);
Al Viro8f7b0ba2008-11-15 01:15:43 +0000314 spin_lock(&hash_lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400315}
316
317static int create_chunk(struct inode *inode, struct audit_tree *tree)
318{
Eric Parise61ce862009-12-17 21:24:24 -0500319 struct fsnotify_mark *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400320 struct audit_chunk *chunk = alloc_chunk(1);
321 if (!chunk)
322 return -ENOMEM;
323
Eric Paris28a3a7e2009-12-17 20:12:05 -0500324 entry = &chunk->mark;
Eric Paris5444e292009-12-17 21:24:27 -0500325 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200326 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400327 return -ENOSPC;
328 }
329
Eric Paris28a3a7e2009-12-17 20:12:05 -0500330 spin_lock(&entry->lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400331 spin_lock(&hash_lock);
332 if (tree->goner) {
333 spin_unlock(&hash_lock);
334 chunk->dead = 1;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500335 spin_unlock(&entry->lock);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200336 fsnotify_destroy_mark(entry, audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500337 fsnotify_put_mark(entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400338 return 0;
339 }
340 chunk->owners[0].index = (1U << 31);
341 chunk->owners[0].owner = tree;
342 get_tree(tree);
343 list_add(&chunk->owners[0].list, &tree->chunks);
344 if (!tree->root) {
345 tree->root = chunk;
346 list_add(&tree->same_root, &chunk->trees);
347 }
348 insert_hash(chunk);
349 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500350 spin_unlock(&entry->lock);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200351 fsnotify_put_mark(entry); /* drop initial reference */
Al Viro74c3cbe2007-07-22 08:04:18 -0400352 return 0;
353}
354
355/* the first tagged inode becomes root of tree */
356static int tag_chunk(struct inode *inode, struct audit_tree *tree)
357{
Eric Parise61ce862009-12-17 21:24:24 -0500358 struct fsnotify_mark *old_entry, *chunk_entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400359 struct audit_tree *owner;
360 struct audit_chunk *chunk, *old;
361 struct node *p;
362 int n;
363
Eric Paris5444e292009-12-17 21:24:27 -0500364 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500365 if (!old_entry)
Al Viro74c3cbe2007-07-22 08:04:18 -0400366 return create_chunk(inode, tree);
367
Eric Paris28a3a7e2009-12-17 20:12:05 -0500368 old = container_of(old_entry, struct audit_chunk, mark);
Al Viro74c3cbe2007-07-22 08:04:18 -0400369
370 /* are we already there? */
371 spin_lock(&hash_lock);
372 for (n = 0; n < old->count; n++) {
373 if (old->owners[n].owner == tree) {
374 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500375 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400376 return 0;
377 }
378 }
379 spin_unlock(&hash_lock);
380
381 chunk = alloc_chunk(old->count + 1);
Al Virob4c30aa2009-12-19 16:03:30 +0000382 if (!chunk) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500383 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400384 return -ENOMEM;
Al Virob4c30aa2009-12-19 16:03:30 +0000385 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400386
Eric Paris28a3a7e2009-12-17 20:12:05 -0500387 chunk_entry = &chunk->mark;
388
389 spin_lock(&old_entry->lock);
Jan Kara0809ab62014-12-12 16:58:36 -0800390 if (!old_entry->inode) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500391 /* old_entry is being shot, lets just lie */
392 spin_unlock(&old_entry->lock);
393 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400394 free_chunk(chunk);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500395 return -ENOENT;
396 }
397
398 fsnotify_duplicate_mark(chunk_entry, old_entry);
Jan Kara0809ab62014-12-12 16:58:36 -0800399 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500400 spin_unlock(&old_entry->lock);
Miklos Szeredi0fe33aa2012-08-15 12:55:22 +0200401 fsnotify_put_mark(chunk_entry);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500402 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400403 return -ENOSPC;
404 }
Eric Paris28a3a7e2009-12-17 20:12:05 -0500405
406 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
407 spin_lock(&chunk_entry->lock);
Al Viro74c3cbe2007-07-22 08:04:18 -0400408 spin_lock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500409
410 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
Al Viro74c3cbe2007-07-22 08:04:18 -0400411 if (tree->goner) {
412 spin_unlock(&hash_lock);
413 chunk->dead = 1;
Eric Paris28a3a7e2009-12-17 20:12:05 -0500414 spin_unlock(&chunk_entry->lock);
415 spin_unlock(&old_entry->lock);
416
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200417 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500418
419 fsnotify_put_mark(chunk_entry);
420 fsnotify_put_mark(old_entry);
Al Viro74c3cbe2007-07-22 08:04:18 -0400421 return 0;
422 }
423 list_replace_init(&old->trees, &chunk->trees);
424 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
425 struct audit_tree *s = old->owners[n].owner;
426 p->owner = s;
427 p->index = old->owners[n].index;
428 if (!s) /* result of fallback in untag */
429 continue;
430 get_tree(s);
431 list_replace_init(&old->owners[n].list, &p->list);
432 }
433 p->index = (chunk->count - 1) | (1U<<31);
434 p->owner = tree;
435 get_tree(tree);
436 list_add(&p->list, &tree->chunks);
437 list_replace_rcu(&old->hash, &chunk->hash);
438 list_for_each_entry(owner, &chunk->trees, same_root)
439 owner->root = chunk;
440 old->dead = 1;
441 if (!tree->root) {
442 tree->root = chunk;
443 list_add(&tree->same_root, &chunk->trees);
444 }
445 spin_unlock(&hash_lock);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500446 spin_unlock(&chunk_entry->lock);
447 spin_unlock(&old_entry->lock);
Lino Sanfilippoe2a29942011-06-14 17:29:51 +0200448 fsnotify_destroy_mark(old_entry, audit_tree_group);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200449 fsnotify_put_mark(chunk_entry); /* drop initial reference */
Eric Paris28a3a7e2009-12-17 20:12:05 -0500450 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
Al Viro74c3cbe2007-07-22 08:04:18 -0400451 return 0;
452}
453
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400454static void audit_tree_log_remove_rule(struct audit_krule *rule)
Kees Cook0644ec02013-01-11 14:32:07 -0800455{
456 struct audit_buffer *ab;
457
458 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
459 if (unlikely(!ab))
460 return;
461 audit_log_format(ab, "op=");
Burn Altinge7df61f2014-04-04 16:00:38 +1100462 audit_log_string(ab, "remove_rule");
Kees Cook0644ec02013-01-11 14:32:07 -0800463 audit_log_format(ab, " dir=");
464 audit_log_untrustedstring(ab, rule->tree->pathname);
465 audit_log_key(ab, rule->filterkey);
466 audit_log_format(ab, " list=%d res=1", rule->listnr);
467 audit_log_end(ab);
468}
469
Al Viro74c3cbe2007-07-22 08:04:18 -0400470static void kill_rules(struct audit_tree *tree)
471{
472 struct audit_krule *rule, *next;
473 struct audit_entry *entry;
Al Viro74c3cbe2007-07-22 08:04:18 -0400474
475 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
476 entry = container_of(rule, struct audit_entry, rule);
477
478 list_del_init(&rule->rlist);
479 if (rule->tree) {
480 /* not a half-baked one */
Richard Guy Briggs2991dd22014-10-02 22:05:24 -0400481 audit_tree_log_remove_rule(rule);
Richard Guy Briggs34d99af52015-08-05 16:29:37 -0400482 if (entry->rule.exe)
483 audit_remove_mark(entry->rule.exe);
Al Viro74c3cbe2007-07-22 08:04:18 -0400484 rule->tree = NULL;
485 list_del_rcu(&entry->list);
Al Viroe45aa212008-12-15 01:17:50 -0500486 list_del(&entry->rule.list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400487 call_rcu(&entry->rcu, audit_free_rule_rcu);
488 }
489 }
490}
491
492/*
493 * finish killing struct audit_tree
494 */
495static void prune_one(struct audit_tree *victim)
496{
497 spin_lock(&hash_lock);
498 while (!list_empty(&victim->chunks)) {
499 struct node *p;
Al Viro74c3cbe2007-07-22 08:04:18 -0400500
501 p = list_entry(victim->chunks.next, struct node, list);
Al Viro74c3cbe2007-07-22 08:04:18 -0400502
Al Viro8f7b0ba2008-11-15 01:15:43 +0000503 untag_chunk(p);
Al Viro74c3cbe2007-07-22 08:04:18 -0400504 }
505 spin_unlock(&hash_lock);
506 put_tree(victim);
507}
508
509/* trim the uncommitted chunks from tree */
510
511static void trim_marked(struct audit_tree *tree)
512{
513 struct list_head *p, *q;
514 spin_lock(&hash_lock);
515 if (tree->goner) {
516 spin_unlock(&hash_lock);
517 return;
518 }
519 /* reorder */
520 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
521 struct node *node = list_entry(p, struct node, list);
522 q = p->next;
523 if (node->index & (1U<<31)) {
524 list_del_init(p);
525 list_add(p, &tree->chunks);
526 }
527 }
528
529 while (!list_empty(&tree->chunks)) {
530 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400531
532 node = list_entry(tree->chunks.next, struct node, list);
533
534 /* have we run out of marked? */
535 if (!(node->index & (1U<<31)))
536 break;
537
Al Viro8f7b0ba2008-11-15 01:15:43 +0000538 untag_chunk(node);
Al Viro74c3cbe2007-07-22 08:04:18 -0400539 }
540 if (!tree->root && !tree->goner) {
541 tree->goner = 1;
542 spin_unlock(&hash_lock);
543 mutex_lock(&audit_filter_mutex);
544 kill_rules(tree);
545 list_del_init(&tree->list);
546 mutex_unlock(&audit_filter_mutex);
547 prune_one(tree);
548 } else {
549 spin_unlock(&hash_lock);
550 }
551}
552
Al Viro916d7572009-06-24 00:02:38 -0400553static void audit_schedule_prune(void);
554
Al Viro74c3cbe2007-07-22 08:04:18 -0400555/* called with audit_filter_mutex */
556int audit_remove_tree_rule(struct audit_krule *rule)
557{
558 struct audit_tree *tree;
559 tree = rule->tree;
560 if (tree) {
561 spin_lock(&hash_lock);
562 list_del_init(&rule->rlist);
563 if (list_empty(&tree->rules) && !tree->goner) {
564 tree->root = NULL;
565 list_del_init(&tree->same_root);
566 tree->goner = 1;
567 list_move(&tree->list, &prune_list);
568 rule->tree = NULL;
569 spin_unlock(&hash_lock);
570 audit_schedule_prune();
571 return 1;
572 }
573 rule->tree = NULL;
574 spin_unlock(&hash_lock);
575 return 1;
576 }
577 return 0;
578}
579
Al Viro1f707132010-01-30 22:51:25 -0500580static int compare_root(struct vfsmount *mnt, void *arg)
581{
David Howells3b362152015-03-17 22:26:21 +0000582 return d_backing_inode(mnt->mnt_root) == arg;
Al Viro1f707132010-01-30 22:51:25 -0500583}
584
Al Viro74c3cbe2007-07-22 08:04:18 -0400585void audit_trim_trees(void)
586{
587 struct list_head cursor;
588
589 mutex_lock(&audit_filter_mutex);
590 list_add(&cursor, &tree_list);
591 while (cursor.next != &tree_list) {
592 struct audit_tree *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400593 struct path path;
Al Viro74c3cbe2007-07-22 08:04:18 -0400594 struct vfsmount *root_mnt;
595 struct node *node;
Al Viro74c3cbe2007-07-22 08:04:18 -0400596 int err;
597
598 tree = container_of(cursor.next, struct audit_tree, list);
599 get_tree(tree);
600 list_del(&cursor);
601 list_add(&cursor, &tree->list);
602 mutex_unlock(&audit_filter_mutex);
603
Al Viro98bc9932008-08-02 01:06:21 -0400604 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400605 if (err)
606 goto skip_it;
607
Al Viro589ff872009-04-18 03:28:19 -0400608 root_mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400609 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100610 if (IS_ERR(root_mnt))
Al Viro74c3cbe2007-07-22 08:04:18 -0400611 goto skip_it;
612
Al Viro74c3cbe2007-07-22 08:04:18 -0400613 spin_lock(&hash_lock);
614 list_for_each_entry(node, &tree->chunks, list) {
Eric Paris28a3a7e2009-12-17 20:12:05 -0500615 struct audit_chunk *chunk = find_chunk(node);
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300616 /* this could be NULL if the watch is dying else where... */
Jan Kara0809ab62014-12-12 16:58:36 -0800617 struct inode *inode = chunk->mark.inode;
Al Viro74c3cbe2007-07-22 08:04:18 -0400618 node->index |= 1U<<31;
Al Viro1f707132010-01-30 22:51:25 -0500619 if (iterate_mounts(compare_root, inode, root_mnt))
620 node->index &= ~(1U<<31);
Al Viro74c3cbe2007-07-22 08:04:18 -0400621 }
622 spin_unlock(&hash_lock);
623 trim_marked(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400624 drop_collected_mounts(root_mnt);
625skip_it:
Chen Gang12b2f112013-04-29 15:05:19 -0700626 put_tree(tree);
Al Viro74c3cbe2007-07-22 08:04:18 -0400627 mutex_lock(&audit_filter_mutex);
628 }
629 list_del(&cursor);
630 mutex_unlock(&audit_filter_mutex);
631}
632
Al Viro74c3cbe2007-07-22 08:04:18 -0400633int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
634{
635
636 if (pathname[0] != '/' ||
637 rule->listnr != AUDIT_FILTER_EXIT ||
Al Viro5af75d82008-12-16 05:59:26 -0500638 op != Audit_equal ||
Al Viro74c3cbe2007-07-22 08:04:18 -0400639 rule->inode_f || rule->watch || rule->tree)
640 return -EINVAL;
641 rule->tree = alloc_tree(pathname);
642 if (!rule->tree)
643 return -ENOMEM;
644 return 0;
645}
646
647void audit_put_tree(struct audit_tree *tree)
648{
649 put_tree(tree);
650}
651
Al Viro1f707132010-01-30 22:51:25 -0500652static int tag_mount(struct vfsmount *mnt, void *arg)
653{
David Howells3b362152015-03-17 22:26:21 +0000654 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
Al Viro1f707132010-01-30 22:51:25 -0500655}
656
Imre Palikf1aaf262015-02-23 15:37:59 -0500657/*
658 * That gets run when evict_chunk() ends up needing to kill audit_tree.
659 * Runs from a separate thread.
660 */
661static int prune_tree_thread(void *unused)
662{
663 for (;;) {
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200664 if (list_empty(&prune_list)) {
665 set_current_state(TASK_INTERRUPTIBLE);
Imre Palikf1aaf262015-02-23 15:37:59 -0500666 schedule();
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200667 }
Imre Palikf1aaf262015-02-23 15:37:59 -0500668
669 mutex_lock(&audit_cmd_mutex);
670 mutex_lock(&audit_filter_mutex);
671
672 while (!list_empty(&prune_list)) {
673 struct audit_tree *victim;
674
675 victim = list_entry(prune_list.next,
676 struct audit_tree, list);
677 list_del_init(&victim->list);
678
679 mutex_unlock(&audit_filter_mutex);
680
681 prune_one(victim);
682
683 mutex_lock(&audit_filter_mutex);
684 }
685
686 mutex_unlock(&audit_filter_mutex);
687 mutex_unlock(&audit_cmd_mutex);
688 }
689 return 0;
690}
691
692static int audit_launch_prune(void)
693{
694 if (prune_thread)
695 return 0;
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200696 prune_thread = kthread_run(prune_tree_thread, NULL,
Imre Palikf1aaf262015-02-23 15:37:59 -0500697 "audit_prune_tree");
698 if (IS_ERR(prune_thread)) {
699 pr_err("cannot start thread audit_prune_tree");
700 prune_thread = NULL;
701 return -ENOMEM;
Imre Palikf1aaf262015-02-23 15:37:59 -0500702 }
Jiri Slaby0bf676d2016-03-31 10:49:28 +0200703 return 0;
Imre Palikf1aaf262015-02-23 15:37:59 -0500704}
705
Al Viro74c3cbe2007-07-22 08:04:18 -0400706/* called with audit_filter_mutex */
707int audit_add_tree_rule(struct audit_krule *rule)
708{
709 struct audit_tree *seed = rule->tree, *tree;
Al Viro98bc9932008-08-02 01:06:21 -0400710 struct path path;
Al Viro1f707132010-01-30 22:51:25 -0500711 struct vfsmount *mnt;
Al Viro74c3cbe2007-07-22 08:04:18 -0400712 int err;
713
Chen Gang736f3202013-06-12 14:05:07 -0700714 rule->tree = NULL;
Al Viro74c3cbe2007-07-22 08:04:18 -0400715 list_for_each_entry(tree, &tree_list, list) {
716 if (!strcmp(seed->pathname, tree->pathname)) {
717 put_tree(seed);
718 rule->tree = tree;
719 list_add(&rule->rlist, &tree->rules);
720 return 0;
721 }
722 }
723 tree = seed;
724 list_add(&tree->list, &tree_list);
725 list_add(&rule->rlist, &tree->rules);
726 /* do not set rule->tree yet */
727 mutex_unlock(&audit_filter_mutex);
728
Imre Palikf1aaf262015-02-23 15:37:59 -0500729 if (unlikely(!prune_thread)) {
730 err = audit_launch_prune();
731 if (err)
732 goto Err;
733 }
734
Al Viro98bc9932008-08-02 01:06:21 -0400735 err = kern_path(tree->pathname, 0, &path);
Al Viro74c3cbe2007-07-22 08:04:18 -0400736 if (err)
737 goto Err;
Al Viro589ff872009-04-18 03:28:19 -0400738 mnt = collect_mounts(&path);
Al Viro98bc9932008-08-02 01:06:21 -0400739 path_put(&path);
David Howellsbe34d1a2012-06-25 12:55:18 +0100740 if (IS_ERR(mnt)) {
741 err = PTR_ERR(mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400742 goto Err;
743 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400744
745 get_tree(tree);
Al Viro1f707132010-01-30 22:51:25 -0500746 err = iterate_mounts(tag_mount, tree, mnt);
Al Viro74c3cbe2007-07-22 08:04:18 -0400747 drop_collected_mounts(mnt);
748
749 if (!err) {
750 struct node *node;
751 spin_lock(&hash_lock);
752 list_for_each_entry(node, &tree->chunks, list)
753 node->index &= ~(1U<<31);
754 spin_unlock(&hash_lock);
755 } else {
756 trim_marked(tree);
757 goto Err;
758 }
759
760 mutex_lock(&audit_filter_mutex);
761 if (list_empty(&rule->rlist)) {
762 put_tree(tree);
763 return -ENOENT;
764 }
765 rule->tree = tree;
766 put_tree(tree);
767
768 return 0;
769Err:
770 mutex_lock(&audit_filter_mutex);
771 list_del_init(&tree->list);
772 list_del_init(&tree->rules);
773 put_tree(tree);
774 return err;
775}
776
777int audit_tag_tree(char *old, char *new)
778{
779 struct list_head cursor, barrier;
780 int failed = 0;
Al Viro2096f752010-01-30 13:16:21 -0500781 struct path path1, path2;
Al Viro74c3cbe2007-07-22 08:04:18 -0400782 struct vfsmount *tagged;
Al Viro74c3cbe2007-07-22 08:04:18 -0400783 int err;
784
Al Viro2096f752010-01-30 13:16:21 -0500785 err = kern_path(new, 0, &path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400786 if (err)
787 return err;
Al Viro2096f752010-01-30 13:16:21 -0500788 tagged = collect_mounts(&path2);
789 path_put(&path2);
David Howellsbe34d1a2012-06-25 12:55:18 +0100790 if (IS_ERR(tagged))
791 return PTR_ERR(tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400792
Al Viro2096f752010-01-30 13:16:21 -0500793 err = kern_path(old, 0, &path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400794 if (err) {
795 drop_collected_mounts(tagged);
796 return err;
797 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400798
Al Viro74c3cbe2007-07-22 08:04:18 -0400799 mutex_lock(&audit_filter_mutex);
800 list_add(&barrier, &tree_list);
801 list_add(&cursor, &barrier);
802
803 while (cursor.next != &tree_list) {
804 struct audit_tree *tree;
Al Viro2096f752010-01-30 13:16:21 -0500805 int good_one = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400806
807 tree = container_of(cursor.next, struct audit_tree, list);
808 get_tree(tree);
809 list_del(&cursor);
810 list_add(&cursor, &tree->list);
811 mutex_unlock(&audit_filter_mutex);
812
Al Viro2096f752010-01-30 13:16:21 -0500813 err = kern_path(tree->pathname, 0, &path2);
814 if (!err) {
815 good_one = path_is_under(&path1, &path2);
816 path_put(&path2);
Al Viro74c3cbe2007-07-22 08:04:18 -0400817 }
818
Al Viro2096f752010-01-30 13:16:21 -0500819 if (!good_one) {
Al Viro74c3cbe2007-07-22 08:04:18 -0400820 put_tree(tree);
821 mutex_lock(&audit_filter_mutex);
822 continue;
823 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400824
Al Viro1f707132010-01-30 22:51:25 -0500825 failed = iterate_mounts(tag_mount, tree, tagged);
Al Viro74c3cbe2007-07-22 08:04:18 -0400826 if (failed) {
827 put_tree(tree);
828 mutex_lock(&audit_filter_mutex);
829 break;
830 }
831
832 mutex_lock(&audit_filter_mutex);
833 spin_lock(&hash_lock);
834 if (!tree->goner) {
835 list_del(&tree->list);
836 list_add(&tree->list, &tree_list);
837 }
838 spin_unlock(&hash_lock);
839 put_tree(tree);
840 }
841
842 while (barrier.prev != &tree_list) {
843 struct audit_tree *tree;
844
845 tree = container_of(barrier.prev, struct audit_tree, list);
846 get_tree(tree);
847 list_del(&tree->list);
848 list_add(&tree->list, &barrier);
849 mutex_unlock(&audit_filter_mutex);
850
851 if (!failed) {
852 struct node *node;
853 spin_lock(&hash_lock);
854 list_for_each_entry(node, &tree->chunks, list)
855 node->index &= ~(1U<<31);
856 spin_unlock(&hash_lock);
857 } else {
858 trim_marked(tree);
859 }
860
861 put_tree(tree);
862 mutex_lock(&audit_filter_mutex);
863 }
864 list_del(&barrier);
865 list_del(&cursor);
Al Viro74c3cbe2007-07-22 08:04:18 -0400866 mutex_unlock(&audit_filter_mutex);
Al Viro2096f752010-01-30 13:16:21 -0500867 path_put(&path1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400868 drop_collected_mounts(tagged);
869 return failed;
870}
871
Al Viro916d7572009-06-24 00:02:38 -0400872
873static void audit_schedule_prune(void)
874{
Imre Palikf1aaf262015-02-23 15:37:59 -0500875 wake_up_process(prune_thread);
Al Viro916d7572009-06-24 00:02:38 -0400876}
877
878/*
879 * ... and that one is done if evict_chunk() decides to delay until the end
880 * of syscall. Runs synchronously.
881 */
882void audit_kill_trees(struct list_head *list)
883{
884 mutex_lock(&audit_cmd_mutex);
885 mutex_lock(&audit_filter_mutex);
886
887 while (!list_empty(list)) {
888 struct audit_tree *victim;
889
890 victim = list_entry(list->next, struct audit_tree, list);
891 kill_rules(victim);
892 list_del_init(&victim->list);
893
894 mutex_unlock(&audit_filter_mutex);
895
896 prune_one(victim);
897
898 mutex_lock(&audit_filter_mutex);
899 }
900
901 mutex_unlock(&audit_filter_mutex);
902 mutex_unlock(&audit_cmd_mutex);
Al Viro74c3cbe2007-07-22 08:04:18 -0400903}
904
905/*
906 * Here comes the stuff asynchronous to auditctl operations
907 */
908
Al Viro74c3cbe2007-07-22 08:04:18 -0400909static void evict_chunk(struct audit_chunk *chunk)
910{
911 struct audit_tree *owner;
Al Viro916d7572009-06-24 00:02:38 -0400912 struct list_head *postponed = audit_killed_trees();
913 int need_prune = 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400914 int n;
915
916 if (chunk->dead)
917 return;
918
919 chunk->dead = 1;
920 mutex_lock(&audit_filter_mutex);
921 spin_lock(&hash_lock);
922 while (!list_empty(&chunk->trees)) {
923 owner = list_entry(chunk->trees.next,
924 struct audit_tree, same_root);
925 owner->goner = 1;
926 owner->root = NULL;
927 list_del_init(&owner->same_root);
928 spin_unlock(&hash_lock);
Al Viro916d7572009-06-24 00:02:38 -0400929 if (!postponed) {
930 kill_rules(owner);
931 list_move(&owner->list, &prune_list);
932 need_prune = 1;
933 } else {
934 list_move(&owner->list, postponed);
935 }
Al Viro74c3cbe2007-07-22 08:04:18 -0400936 spin_lock(&hash_lock);
937 }
938 list_del_rcu(&chunk->hash);
939 for (n = 0; n < chunk->count; n++)
940 list_del_init(&chunk->owners[n].list);
941 spin_unlock(&hash_lock);
Imre Palikf1aaf262015-02-23 15:37:59 -0500942 mutex_unlock(&audit_filter_mutex);
Al Viro916d7572009-06-24 00:02:38 -0400943 if (need_prune)
944 audit_schedule_prune();
Al Viro74c3cbe2007-07-22 08:04:18 -0400945}
946
Eric Paris3a9b16b2010-07-28 10:18:38 -0400947static int audit_tree_handle_event(struct fsnotify_group *group,
Jan Kara7053aee2014-01-21 15:48:14 -0800948 struct inode *to_tell,
Eric Parisce8f76f2010-07-28 10:18:39 -0400949 struct fsnotify_mark *inode_mark,
Jan Kara7053aee2014-01-21 15:48:14 -0800950 struct fsnotify_mark *vfsmount_mark,
951 u32 mask, void *data, int data_type,
Jan Kara45a22f42014-02-17 13:09:50 +0100952 const unsigned char *file_name, u32 cookie)
Al Viro74c3cbe2007-07-22 08:04:18 -0400953{
Jan Kara83c4c4b2014-01-21 15:48:15 -0800954 return 0;
Al Viro74c3cbe2007-07-22 08:04:18 -0400955}
956
Eric Parise61ce862009-12-17 21:24:24 -0500957static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
Al Viro74c3cbe2007-07-22 08:04:18 -0400958{
Eric Paris28a3a7e2009-12-17 20:12:05 -0500959 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
960
961 evict_chunk(chunk);
Miklos Szeredib3e86922012-08-15 12:55:22 +0200962
963 /*
964 * We are guaranteed to have at least one reference to the mark from
965 * either the inode or the caller of fsnotify_destroy_mark().
966 */
967 BUG_ON(atomic_read(&entry->refcnt) < 1);
Al Viro74c3cbe2007-07-22 08:04:18 -0400968}
969
Eric Paris28a3a7e2009-12-17 20:12:05 -0500970static const struct fsnotify_ops audit_tree_ops = {
971 .handle_event = audit_tree_handle_event,
Eric Paris28a3a7e2009-12-17 20:12:05 -0500972 .freeing_mark = audit_tree_freeing_mark,
Al Viro74c3cbe2007-07-22 08:04:18 -0400973};
974
975static int __init audit_tree_init(void)
976{
977 int i;
978
Eric Paris0d2e2a12009-12-17 21:24:22 -0500979 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
Eric Paris28a3a7e2009-12-17 20:12:05 -0500980 if (IS_ERR(audit_tree_group))
981 audit_panic("cannot initialize fsnotify group for rectree watches");
Al Viro74c3cbe2007-07-22 08:04:18 -0400982
983 for (i = 0; i < HASH_SIZE; i++)
984 INIT_LIST_HEAD(&chunk_hash_heads[i]);
985
986 return 0;
987}
988__initcall(audit_tree_init);