blob: 95d12755f847cc0e7058f631a220180583e08d47 [file] [log] [blame]
Miklos Szeredie9be9d52014-10-24 00:14:38 +02001/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/namei.h>
13#include <linux/file.h>
14#include <linux/xattr.h>
15#include <linux/rbtree.h>
16#include <linux/security.h>
17#include <linux/cred.h>
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +030018#include <linux/ratelimit.h>
Miklos Szeredie9be9d52014-10-24 00:14:38 +020019#include "overlayfs.h"
20
21struct ovl_cache_entry {
Miklos Szeredie9be9d52014-10-24 00:14:38 +020022 unsigned int len;
23 unsigned int type;
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +030024 u64 real_ino;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020025 u64 ino;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020026 struct list_head l_node;
27 struct rb_node node;
Miklos Szeredicdb67272015-06-22 13:53:48 +020028 struct ovl_cache_entry *next_maybe_whiteout;
Miklos Szeredic2096532014-10-27 13:48:48 +010029 bool is_whiteout;
Al Viro68bf8612014-10-23 22:58:56 -040030 char name[];
Miklos Szeredie9be9d52014-10-24 00:14:38 +020031};
32
33struct ovl_dir_cache {
34 long refcount;
35 u64 version;
36 struct list_head entries;
Miklos Szeredi4edb83b2017-07-27 21:54:06 +020037 struct rb_root root;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020038};
39
40struct ovl_readdir_data {
41 struct dir_context ctx;
Antonio Murdaca3fe6e522016-04-07 15:48:25 +020042 struct dentry *dentry;
Miklos Szeredi56656e92016-03-21 17:31:46 +010043 bool is_lowest;
Miklos Szeredi4edb83b2017-07-27 21:54:06 +020044 struct rb_root *root;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020045 struct list_head *list;
Al Virodb6ec212014-10-23 23:03:03 -040046 struct list_head middle;
Miklos Szeredicdb67272015-06-22 13:53:48 +020047 struct ovl_cache_entry *first_maybe_whiteout;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020048 int count;
49 int err;
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +030050 bool is_upper;
Vivek Goyal45aebea2016-02-22 09:28:34 -050051 bool d_type_supported;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020052};
53
54struct ovl_dir_file {
55 bool is_real;
56 bool is_upper;
57 struct ovl_dir_cache *cache;
hujianyang43303972014-12-11 10:30:18 +080058 struct list_head *cursor;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020059 struct file *realfile;
60 struct file *upperfile;
61};
62
63static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
64{
Miklos Szeredi4edb83b2017-07-27 21:54:06 +020065 return rb_entry(n, struct ovl_cache_entry, node);
66}
67
68static bool ovl_cache_entry_find_link(const char *name, int len,
69 struct rb_node ***link,
70 struct rb_node **parent)
71{
72 bool found = false;
73 struct rb_node **newp = *link;
74
75 while (!found && *newp) {
76 int cmp;
77 struct ovl_cache_entry *tmp;
78
79 *parent = *newp;
80 tmp = ovl_cache_entry_from_node(*newp);
81 cmp = strncmp(name, tmp->name, len);
82 if (cmp > 0)
83 newp = &tmp->node.rb_right;
84 else if (cmp < 0 || len < tmp->len)
85 newp = &tmp->node.rb_left;
86 else
87 found = true;
88 }
89 *link = newp;
90
91 return found;
Miklos Szeredie9be9d52014-10-24 00:14:38 +020092}
93
94static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
95 const char *name, int len)
96{
97 struct rb_node *node = root->rb_node;
98 int cmp;
99
100 while (node) {
101 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
102
103 cmp = strncmp(name, p->name, len);
104 if (cmp > 0)
105 node = p->node.rb_right;
106 else if (cmp < 0 || len < p->len)
107 node = p->node.rb_left;
108 else
109 return p;
110 }
111
112 return NULL;
113}
114
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300115static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
116 struct ovl_cache_entry *p)
117{
118 /* Don't care if not doing ovl_iter() */
119 if (!rdd->dentry)
120 return false;
121
122 /* Always recalc d_ino for parent */
123 if (strcmp(p->name, "..") == 0)
124 return true;
125
126 /* If this is lower, then native d_ino will do */
127 if (!rdd->is_upper)
128 return false;
129
130 /*
131 * Recalc d_ino for '.' and for all entries if dir is impure (contains
132 * copied up entries)
133 */
134 if ((p->name[0] == '.' && p->len == 1) ||
135 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry)))
136 return true;
137
138 return false;
139}
140
Miklos Szeredicdb67272015-06-22 13:53:48 +0200141static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100142 const char *name, int len,
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200143 u64 ino, unsigned int d_type)
144{
145 struct ovl_cache_entry *p;
Al Viro68bf8612014-10-23 22:58:56 -0400146 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200147
Al Viro68bf8612014-10-23 22:58:56 -0400148 p = kmalloc(size, GFP_KERNEL);
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100149 if (!p)
150 return NULL;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200151
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100152 memcpy(p->name, name, len);
153 p->name[len] = '\0';
154 p->len = len;
155 p->type = d_type;
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300156 p->real_ino = ino;
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100157 p->ino = ino;
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300158 /* Defer setting d_ino for upper entry to ovl_iterate() */
159 if (ovl_calc_d_ino(rdd, p))
160 p->ino = 0;
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100161 p->is_whiteout = false;
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100162
163 if (d_type == DT_CHR) {
Miklos Szeredicdb67272015-06-22 13:53:48 +0200164 p->next_maybe_whiteout = rdd->first_maybe_whiteout;
165 rdd->first_maybe_whiteout = p;
Miklos Szeredi3e01cee2014-12-13 00:59:45 +0100166 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200167 return p;
168}
169
170static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
171 const char *name, int len, u64 ino,
172 unsigned int d_type)
173{
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200174 struct rb_node **newp = &rdd->root->rb_node;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200175 struct rb_node *parent = NULL;
176 struct ovl_cache_entry *p;
177
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200178 if (ovl_cache_entry_find_link(name, len, &newp, &parent))
179 return 0;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200180
Miklos Szeredicdb67272015-06-22 13:53:48 +0200181 p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
Miklos Szeredi31e8cce2017-07-27 21:54:06 +0200182 if (p == NULL) {
183 rdd->err = -ENOMEM;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200184 return -ENOMEM;
Miklos Szeredi31e8cce2017-07-27 21:54:06 +0200185 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200186
187 list_add_tail(&p->l_node, rdd->list);
188 rb_link_node(&p->node, parent, newp);
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200189 rb_insert_color(&p->node, rdd->root);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200190
191 return 0;
192}
193
Miklos Szeredi56656e92016-03-21 17:31:46 +0100194static int ovl_fill_lowest(struct ovl_readdir_data *rdd,
195 const char *name, int namelen,
196 loff_t offset, u64 ino, unsigned int d_type)
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200197{
198 struct ovl_cache_entry *p;
199
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200200 p = ovl_cache_entry_find(rdd->root, name, namelen);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200201 if (p) {
Al Virodb6ec212014-10-23 23:03:03 -0400202 list_move_tail(&p->l_node, &rdd->middle);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200203 } else {
Miklos Szeredicdb67272015-06-22 13:53:48 +0200204 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200205 if (p == NULL)
206 rdd->err = -ENOMEM;
207 else
Al Virodb6ec212014-10-23 23:03:03 -0400208 list_add_tail(&p->l_node, &rdd->middle);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200209 }
210
211 return rdd->err;
212}
213
214void ovl_cache_free(struct list_head *list)
215{
216 struct ovl_cache_entry *p;
217 struct ovl_cache_entry *n;
218
219 list_for_each_entry_safe(p, n, list, l_node)
220 kfree(p);
221
222 INIT_LIST_HEAD(list);
223}
224
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200225void ovl_dir_cache_free(struct inode *inode)
226{
227 struct ovl_dir_cache *cache = ovl_dir_cache(inode);
228
229 if (cache) {
230 ovl_cache_free(&cache->entries);
231 kfree(cache);
232 }
233}
234
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200235static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
236{
237 struct ovl_dir_cache *cache = od->cache;
238
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200239 WARN_ON(cache->refcount <= 0);
240 cache->refcount--;
241 if (!cache->refcount) {
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200242 if (ovl_dir_cache(d_inode(dentry)) == cache)
243 ovl_set_dir_cache(d_inode(dentry), NULL);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200244
245 ovl_cache_free(&cache->entries);
246 kfree(cache);
247 }
248}
249
Miklos Szerediac7576f2014-10-30 17:37:34 +0100250static int ovl_fill_merge(struct dir_context *ctx, const char *name,
251 int namelen, loff_t offset, u64 ino,
252 unsigned int d_type)
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200253{
Miklos Szerediac7576f2014-10-30 17:37:34 +0100254 struct ovl_readdir_data *rdd =
255 container_of(ctx, struct ovl_readdir_data, ctx);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200256
257 rdd->count++;
Miklos Szeredi56656e92016-03-21 17:31:46 +0100258 if (!rdd->is_lowest)
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200259 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
260 else
Miklos Szeredi56656e92016-03-21 17:31:46 +0100261 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200262}
263
Miklos Szeredicdb67272015-06-22 13:53:48 +0200264static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
265{
266 int err;
267 struct ovl_cache_entry *p;
268 struct dentry *dentry;
269 const struct cred *old_cred;
Miklos Szeredicdb67272015-06-22 13:53:48 +0200270
Antonio Murdaca3fe6e522016-04-07 15:48:25 +0200271 old_cred = ovl_override_creds(rdd->dentry->d_sb);
Miklos Szeredicdb67272015-06-22 13:53:48 +0200272
Al Viro00235412016-05-26 00:05:12 -0400273 err = down_write_killable(&dir->d_inode->i_rwsem);
Miklos Szeredicdb67272015-06-22 13:53:48 +0200274 if (!err) {
275 while (rdd->first_maybe_whiteout) {
276 p = rdd->first_maybe_whiteout;
277 rdd->first_maybe_whiteout = p->next_maybe_whiteout;
278 dentry = lookup_one_len(p->name, dir, p->len);
279 if (!IS_ERR(dentry)) {
280 p->is_whiteout = ovl_is_whiteout(dentry);
281 dput(dentry);
282 }
283 }
Al Viro59551022016-01-22 15:40:57 -0500284 inode_unlock(dir->d_inode);
Miklos Szeredicdb67272015-06-22 13:53:48 +0200285 }
286 revert_creds(old_cred);
Miklos Szeredicdb67272015-06-22 13:53:48 +0200287
288 return err;
289}
290
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200291static inline int ovl_dir_read(struct path *realpath,
292 struct ovl_readdir_data *rdd)
293{
294 struct file *realfile;
295 int err;
296
297 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
298 if (IS_ERR(realfile))
299 return PTR_ERR(realfile);
300
Miklos Szeredicdb67272015-06-22 13:53:48 +0200301 rdd->first_maybe_whiteout = NULL;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200302 rdd->ctx.pos = 0;
303 do {
304 rdd->count = 0;
305 rdd->err = 0;
306 err = iterate_dir(realfile, &rdd->ctx);
307 if (err >= 0)
308 err = rdd->err;
309 } while (!err && rdd->count);
Miklos Szeredicdb67272015-06-22 13:53:48 +0200310
Miklos Szeredieea2fb42016-09-01 11:11:59 +0200311 if (!err && rdd->first_maybe_whiteout && rdd->dentry)
Miklos Szeredicdb67272015-06-22 13:53:48 +0200312 err = ovl_check_whiteouts(realpath->dentry, rdd);
313
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200314 fput(realfile);
315
316 return err;
317}
318
Amir Goldsteinb79e05a2017-06-25 16:37:17 +0300319/*
320 * Can we iterate real dir directly?
321 *
322 * Non-merge dir may contain whiteouts from a time it was a merge upper, before
323 * lower dir was removed under it and possibly before it was rotated from upper
324 * to lower layer.
325 */
326static bool ovl_dir_is_real(struct dentry *dir)
327{
328 return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
329}
330
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200331static void ovl_dir_reset(struct file *file)
332{
333 struct ovl_dir_file *od = file->private_data;
334 struct ovl_dir_cache *cache = od->cache;
335 struct dentry *dentry = file->f_path.dentry;
Amir Goldsteinb79e05a2017-06-25 16:37:17 +0300336 bool is_real;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200337
338 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
339 ovl_cache_put(od, dentry);
340 od->cache = NULL;
hujianyang43303972014-12-11 10:30:18 +0800341 od->cursor = NULL;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200342 }
Amir Goldsteinb79e05a2017-06-25 16:37:17 +0300343 is_real = ovl_dir_is_real(dentry);
344 if (od->is_real != is_real) {
345 /* is_real can only become false when dir is copied up */
346 if (WARN_ON(is_real))
347 return;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200348 od->is_real = false;
Amir Goldsteinb79e05a2017-06-25 16:37:17 +0300349 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200350}
351
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200352static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
353 struct rb_root *root)
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200354{
355 int err;
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100356 struct path realpath;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200357 struct ovl_readdir_data rdd = {
358 .ctx.actor = ovl_fill_merge,
Antonio Murdaca3fe6e522016-04-07 15:48:25 +0200359 .dentry = dentry,
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200360 .list = list,
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200361 .root = root,
Miklos Szeredi56656e92016-03-21 17:31:46 +0100362 .is_lowest = false,
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200363 };
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100364 int idx, next;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200365
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100366 for (idx = 0; idx != -1; idx = next) {
367 next = ovl_path_next(idx, dentry, &realpath);
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300368 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
Miklos Szeredic9f00fd2014-11-20 16:40:01 +0100369
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100370 if (next != -1) {
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100371 err = ovl_dir_read(&realpath, &rdd);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200372 if (err)
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100373 break;
374 } else {
375 /*
376 * Insert lowest layer entries before upper ones, this
377 * allows offsets to be reasonably constant
378 */
379 list_add(&rdd.middle, rdd.list);
Miklos Szeredi56656e92016-03-21 17:31:46 +0100380 rdd.is_lowest = true;
Miklos Szeredi9d7459d2014-12-13 00:59:44 +0100381 err = ovl_dir_read(&realpath, &rdd);
382 list_del(&rdd.middle);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200383 }
384 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200385 return err;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200386}
387
388static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
389{
hujianyang43303972014-12-11 10:30:18 +0800390 struct list_head *p;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200391 loff_t off = 0;
392
hujianyang43303972014-12-11 10:30:18 +0800393 list_for_each(p, &od->cache->entries) {
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200394 if (off >= pos)
395 break;
396 off++;
397 }
hujianyang43303972014-12-11 10:30:18 +0800398 /* Cursor is safe since the cache is stable */
399 od->cursor = p;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200400}
401
402static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
403{
404 int res;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200405 struct ovl_dir_cache *cache;
406
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200407 cache = ovl_dir_cache(d_inode(dentry));
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200408 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200409 WARN_ON(!cache->refcount);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200410 cache->refcount++;
411 return cache;
412 }
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200413 ovl_set_dir_cache(d_inode(dentry), NULL);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200414
415 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
416 if (!cache)
417 return ERR_PTR(-ENOMEM);
418
419 cache->refcount = 1;
420 INIT_LIST_HEAD(&cache->entries);
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200421 cache->root = RB_ROOT;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200422
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200423 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200424 if (res) {
425 ovl_cache_free(&cache->entries);
426 kfree(cache);
427 return ERR_PTR(res);
428 }
429
430 cache->version = ovl_dentry_version_get(dentry);
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200431 ovl_set_dir_cache(d_inode(dentry), cache);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200432
433 return cache;
434}
435
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300436/*
437 * Set d_ino for upper entries. Non-upper entries should always report
438 * the uppermost real inode ino and should not call this function.
439 *
440 * When not all layer are on same fs, report real ino also for upper.
441 *
442 * When all layers are on the same fs, and upper has a reference to
443 * copy up origin, call vfs_getattr() on the overlay entry to make
444 * sure that d_ino will be consistent with st_ino from stat(2).
445 */
446static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
447
448{
449 struct dentry *dir = path->dentry;
450 struct dentry *this = NULL;
451 enum ovl_path_type type;
452 u64 ino = p->real_ino;
453 int err = 0;
454
455 if (!ovl_same_sb(dir->d_sb))
456 goto out;
457
458 if (p->name[0] == '.') {
459 if (p->len == 1) {
460 this = dget(dir);
461 goto get;
462 }
463 if (p->len == 2 && p->name[1] == '.') {
464 /* we shall not be moved */
465 this = dget(dir->d_parent);
466 goto get;
467 }
468 }
469 this = lookup_one_len(p->name, dir, p->len);
470 if (IS_ERR_OR_NULL(this) || !this->d_inode) {
471 if (IS_ERR(this)) {
472 err = PTR_ERR(this);
473 this = NULL;
474 goto fail;
475 }
476 goto out;
477 }
478
479get:
480 type = ovl_path_type(this);
481 if (OVL_TYPE_ORIGIN(type)) {
482 struct kstat stat;
483 struct path statpath = *path;
484
485 statpath.dentry = this;
486 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
487 if (err)
488 goto fail;
489
490 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
491 ino = stat.ino;
492 }
493
494out:
495 p->ino = ino;
496 dput(this);
497 return err;
498
499fail:
500 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
501 p->name, err);
502 goto out;
503}
504
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200505static int ovl_fill_plain(struct dir_context *ctx, const char *name,
506 int namelen, loff_t offset, u64 ino,
507 unsigned int d_type)
508{
509 struct ovl_cache_entry *p;
510 struct ovl_readdir_data *rdd =
511 container_of(ctx, struct ovl_readdir_data, ctx);
512
513 rdd->count++;
514 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
515 if (p == NULL) {
516 rdd->err = -ENOMEM;
517 return -ENOMEM;
518 }
519 list_add_tail(&p->l_node, rdd->list);
520
521 return 0;
522}
523
524static int ovl_dir_read_impure(struct path *path, struct list_head *list,
525 struct rb_root *root)
526{
527 int err;
528 struct path realpath;
529 struct ovl_cache_entry *p, *n;
530 struct ovl_readdir_data rdd = {
531 .ctx.actor = ovl_fill_plain,
532 .list = list,
533 .root = root,
534 };
535
536 INIT_LIST_HEAD(list);
537 *root = RB_ROOT;
538 ovl_path_upper(path->dentry, &realpath);
539
540 err = ovl_dir_read(&realpath, &rdd);
541 if (err)
542 return err;
543
544 list_for_each_entry_safe(p, n, list, l_node) {
545 if (strcmp(p->name, ".") != 0 &&
546 strcmp(p->name, "..") != 0) {
547 err = ovl_cache_update_ino(path, p);
548 if (err)
549 return err;
550 }
551 if (p->ino == p->real_ino) {
552 list_del(&p->l_node);
553 kfree(p);
554 } else {
555 struct rb_node **newp = &root->rb_node;
556 struct rb_node *parent = NULL;
557
558 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len,
559 &newp, &parent)))
560 return -EIO;
561
562 rb_link_node(&p->node, parent, newp);
563 rb_insert_color(&p->node, root);
564 }
565 }
566 return 0;
567}
568
569static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
570{
571 int res;
572 struct dentry *dentry = path->dentry;
573 struct ovl_dir_cache *cache;
574
575 cache = ovl_dir_cache(d_inode(dentry));
576 if (cache && ovl_dentry_version_get(dentry) == cache->version)
577 return cache;
578
579 /* Impure cache is not refcounted, free it here */
580 ovl_dir_cache_free(d_inode(dentry));
581 ovl_set_dir_cache(d_inode(dentry), NULL);
582
583 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
584 if (!cache)
585 return ERR_PTR(-ENOMEM);
586
587 res = ovl_dir_read_impure(path, &cache->entries, &cache->root);
588 if (res) {
589 ovl_cache_free(&cache->entries);
590 kfree(cache);
591 return ERR_PTR(res);
592 }
593 if (list_empty(&cache->entries)) {
594 /* Good oportunity to get rid of an unnecessary "impure" flag */
595 ovl_do_removexattr(ovl_dentry_upper(dentry), OVL_XATTR_IMPURE);
596 ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
597 kfree(cache);
598 return NULL;
599 }
600
601 cache->version = ovl_dentry_version_get(dentry);
602 ovl_set_dir_cache(d_inode(dentry), cache);
603
604 return cache;
605}
606
607struct ovl_readdir_translate {
608 struct dir_context *orig_ctx;
609 struct ovl_dir_cache *cache;
610 struct dir_context ctx;
611 u64 parent_ino;
612};
613
614static int ovl_fill_real(struct dir_context *ctx, const char *name,
615 int namelen, loff_t offset, u64 ino,
616 unsigned int d_type)
617{
618 struct ovl_readdir_translate *rdt =
619 container_of(ctx, struct ovl_readdir_translate, ctx);
620 struct dir_context *orig_ctx = rdt->orig_ctx;
621
622 if (rdt->parent_ino && strcmp(name, "..") == 0)
623 ino = rdt->parent_ino;
624 else if (rdt->cache) {
625 struct ovl_cache_entry *p;
626
627 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
628 if (p)
629 ino = p->ino;
630 }
631
632 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
633}
634
635static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
636{
637 int err;
638 struct ovl_dir_file *od = file->private_data;
639 struct dentry *dir = file->f_path.dentry;
640 struct ovl_readdir_translate rdt = {
641 .ctx.actor = ovl_fill_real,
642 .orig_ctx = ctx,
643 };
644
645 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) {
646 struct kstat stat;
647 struct path statpath = file->f_path;
648
649 statpath.dentry = dir->d_parent;
650 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
651 if (err)
652 return err;
653
654 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
655 rdt.parent_ino = stat.ino;
656 }
657
658 if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
659 rdt.cache = ovl_cache_get_impure(&file->f_path);
660 if (IS_ERR(rdt.cache))
661 return PTR_ERR(rdt.cache);
662 }
663
664 return iterate_dir(od->realfile, &rdt.ctx);
665}
666
667
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200668static int ovl_iterate(struct file *file, struct dir_context *ctx)
669{
670 struct ovl_dir_file *od = file->private_data;
671 struct dentry *dentry = file->f_path.dentry;
hujianyang43303972014-12-11 10:30:18 +0800672 struct ovl_cache_entry *p;
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300673 int err;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200674
675 if (!ctx->pos)
676 ovl_dir_reset(file);
677
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200678 if (od->is_real) {
679 /*
680 * If parent is merge, then need to adjust d_ino for '..', if
681 * dir is impure then need to adjust d_ino for copied up
682 * entries.
683 */
684 if (ovl_same_sb(dentry->d_sb) &&
685 (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
686 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
687 return ovl_iterate_real(file, ctx);
688 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200689 return iterate_dir(od->realfile, ctx);
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200690 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200691
692 if (!od->cache) {
693 struct ovl_dir_cache *cache;
694
695 cache = ovl_cache_get(dentry);
696 if (IS_ERR(cache))
697 return PTR_ERR(cache);
698
699 od->cache = cache;
700 ovl_seek_cursor(od, ctx->pos);
701 }
702
hujianyang43303972014-12-11 10:30:18 +0800703 while (od->cursor != &od->cache->entries) {
704 p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300705 if (!p->is_whiteout) {
706 if (!p->ino) {
707 err = ovl_cache_update_ino(&file->f_path, p);
708 if (err)
709 return err;
710 }
hujianyang43303972014-12-11 10:30:18 +0800711 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
712 break;
Amir Goldsteinb5efccbe2017-05-11 16:42:27 +0300713 }
hujianyang43303972014-12-11 10:30:18 +0800714 od->cursor = p->l_node.next;
715 ctx->pos++;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200716 }
717 return 0;
718}
719
720static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
721{
722 loff_t res;
723 struct ovl_dir_file *od = file->private_data;
724
Al Viro59551022016-01-22 15:40:57 -0500725 inode_lock(file_inode(file));
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200726 if (!file->f_pos)
727 ovl_dir_reset(file);
728
729 if (od->is_real) {
730 res = vfs_llseek(od->realfile, offset, origin);
731 file->f_pos = od->realfile->f_pos;
732 } else {
733 res = -EINVAL;
734
735 switch (origin) {
736 case SEEK_CUR:
737 offset += file->f_pos;
738 break;
739 case SEEK_SET:
740 break;
741 default:
742 goto out_unlock;
743 }
744 if (offset < 0)
745 goto out_unlock;
746
747 if (offset != file->f_pos) {
748 file->f_pos = offset;
749 if (od->cache)
750 ovl_seek_cursor(od, offset);
751 }
752 res = offset;
753 }
754out_unlock:
Al Viro59551022016-01-22 15:40:57 -0500755 inode_unlock(file_inode(file));
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200756
757 return res;
758}
759
760static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
761 int datasync)
762{
763 struct ovl_dir_file *od = file->private_data;
764 struct dentry *dentry = file->f_path.dentry;
765 struct file *realfile = od->realfile;
766
767 /*
768 * Need to check if we started out being a lower dir, but got copied up
769 */
Miklos Szeredi1afaba12014-12-13 00:59:42 +0100770 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200771 struct inode *inode = file_inode(file);
772
Miklos Szeredi76768952014-11-20 16:40:02 +0100773 realfile = lockless_dereference(od->upperfile);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200774 if (!realfile) {
775 struct path upperpath;
776
777 ovl_path_upper(dentry, &upperpath);
778 realfile = ovl_path_open(&upperpath, O_RDONLY);
Peter Zijlstraff7a5fb2017-06-07 17:43:46 +0200779
Al Viro59551022016-01-22 15:40:57 -0500780 inode_lock(inode);
Al Viro3d268c92014-10-23 22:56:05 -0400781 if (!od->upperfile) {
782 if (IS_ERR(realfile)) {
Al Viro59551022016-01-22 15:40:57 -0500783 inode_unlock(inode);
Al Viro3d268c92014-10-23 22:56:05 -0400784 return PTR_ERR(realfile);
785 }
Peter Zijlstraff7a5fb2017-06-07 17:43:46 +0200786 smp_store_release(&od->upperfile, realfile);
Al Viro3d268c92014-10-23 22:56:05 -0400787 } else {
788 /* somebody has beaten us to it */
789 if (!IS_ERR(realfile))
790 fput(realfile);
791 realfile = od->upperfile;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200792 }
Al Viro59551022016-01-22 15:40:57 -0500793 inode_unlock(inode);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200794 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200795 }
796
797 return vfs_fsync_range(realfile, start, end, datasync);
798}
799
800static int ovl_dir_release(struct inode *inode, struct file *file)
801{
802 struct ovl_dir_file *od = file->private_data;
803
804 if (od->cache) {
Al Viro59551022016-01-22 15:40:57 -0500805 inode_lock(inode);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200806 ovl_cache_put(od, file->f_path.dentry);
Al Viro59551022016-01-22 15:40:57 -0500807 inode_unlock(inode);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200808 }
809 fput(od->realfile);
810 if (od->upperfile)
811 fput(od->upperfile);
812 kfree(od);
813
814 return 0;
815}
816
817static int ovl_dir_open(struct inode *inode, struct file *file)
818{
819 struct path realpath;
820 struct file *realfile;
821 struct ovl_dir_file *od;
822 enum ovl_path_type type;
823
824 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
825 if (!od)
826 return -ENOMEM;
827
828 type = ovl_path_real(file->f_path.dentry, &realpath);
829 realfile = ovl_path_open(&realpath, file->f_flags);
830 if (IS_ERR(realfile)) {
831 kfree(od);
832 return PTR_ERR(realfile);
833 }
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200834 od->realfile = realfile;
Amir Goldsteinb79e05a2017-06-25 16:37:17 +0300835 od->is_real = ovl_dir_is_real(file->f_path.dentry);
Miklos Szeredi1afaba12014-12-13 00:59:42 +0100836 od->is_upper = OVL_TYPE_UPPER(type);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200837 file->private_data = od;
838
839 return 0;
840}
841
842const struct file_operations ovl_dir_operations = {
843 .read = generic_read_dir,
844 .open = ovl_dir_open,
845 .iterate = ovl_iterate,
846 .llseek = ovl_dir_llseek,
847 .fsync = ovl_dir_fsync,
848 .release = ovl_dir_release,
849};
850
851int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
852{
853 int err;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200854 struct ovl_cache_entry *p;
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200855 struct rb_root root = RB_ROOT;
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200856
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200857 err = ovl_dir_read_merged(dentry, list, &root);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200858 if (err)
859 return err;
860
861 err = 0;
862
863 list_for_each_entry(p, list, l_node) {
864 if (p->is_whiteout)
865 continue;
866
867 if (p->name[0] == '.') {
868 if (p->len == 1)
869 continue;
870 if (p->len == 2 && p->name[1] == '.')
871 continue;
872 }
873 err = -ENOTEMPTY;
874 break;
875 }
876
877 return err;
878}
879
880void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
881{
882 struct ovl_cache_entry *p;
883
Al Viro59551022016-01-22 15:40:57 -0500884 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200885 list_for_each_entry(p, list, l_node) {
886 struct dentry *dentry;
887
888 if (!p->is_whiteout)
889 continue;
890
891 dentry = lookup_one_len(p->name, upper, p->len);
892 if (IS_ERR(dentry)) {
893 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
894 upper->d_name.name, p->len, p->name,
895 (int) PTR_ERR(dentry));
896 continue;
897 }
Konstantin Khlebnikov84889d42015-11-16 18:44:11 +0300898 if (dentry->d_inode)
899 ovl_cleanup(upper->d_inode, dentry);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200900 dput(dentry);
901 }
Al Viro59551022016-01-22 15:40:57 -0500902 inode_unlock(upper->d_inode);
Miklos Szeredie9be9d52014-10-24 00:14:38 +0200903}
Vivek Goyal45aebea2016-02-22 09:28:34 -0500904
905static int ovl_check_d_type(struct dir_context *ctx, const char *name,
906 int namelen, loff_t offset, u64 ino,
907 unsigned int d_type)
908{
909 struct ovl_readdir_data *rdd =
910 container_of(ctx, struct ovl_readdir_data, ctx);
911
912 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
913 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
914 return 0;
915
916 if (d_type != DT_UNKNOWN)
917 rdd->d_type_supported = true;
918
919 return 0;
920}
921
922/*
923 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
924 * if error is encountered.
925 */
926int ovl_check_d_type_supported(struct path *realpath)
927{
928 int err;
929 struct ovl_readdir_data rdd = {
930 .ctx.actor = ovl_check_d_type,
931 .d_type_supported = false,
932 };
933
934 err = ovl_dir_read(realpath, &rdd);
935 if (err)
936 return err;
937
938 return rdd.d_type_supported;
939}
Miklos Szeredieea2fb42016-09-01 11:11:59 +0200940
941static void ovl_workdir_cleanup_recurse(struct path *path, int level)
942{
943 int err;
944 struct inode *dir = path->dentry->d_inode;
945 LIST_HEAD(list);
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200946 struct rb_root root = RB_ROOT;
Miklos Szeredieea2fb42016-09-01 11:11:59 +0200947 struct ovl_cache_entry *p;
948 struct ovl_readdir_data rdd = {
949 .ctx.actor = ovl_fill_merge,
950 .dentry = NULL,
951 .list = &list,
Miklos Szeredi4edb83b2017-07-27 21:54:06 +0200952 .root = &root,
Miklos Szeredieea2fb42016-09-01 11:11:59 +0200953 .is_lowest = false,
954 };
955
956 err = ovl_dir_read(path, &rdd);
957 if (err)
958 goto out;
959
960 inode_lock_nested(dir, I_MUTEX_PARENT);
961 list_for_each_entry(p, &list, l_node) {
962 struct dentry *dentry;
963
964 if (p->name[0] == '.') {
965 if (p->len == 1)
966 continue;
967 if (p->len == 2 && p->name[1] == '.')
968 continue;
969 }
970 dentry = lookup_one_len(p->name, path->dentry, p->len);
971 if (IS_ERR(dentry))
972 continue;
973 if (dentry->d_inode)
974 ovl_workdir_cleanup(dir, path->mnt, dentry, level);
975 dput(dentry);
976 }
977 inode_unlock(dir);
978out:
979 ovl_cache_free(&list);
980}
981
982void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
983 struct dentry *dentry, int level)
984{
985 int err;
986
987 if (!d_is_dir(dentry) || level > 1) {
988 ovl_cleanup(dir, dentry);
989 return;
990 }
991
992 err = ovl_do_rmdir(dir, dentry);
993 if (err) {
994 struct path path = { .mnt = mnt, .dentry = dentry };
995
996 inode_unlock(dir);
997 ovl_workdir_cleanup_recurse(&path, level + 1);
998 inode_lock_nested(dir, I_MUTEX_PARENT);
999 ovl_cleanup(dir, dentry);
1000 }
1001}
Amir Goldstein415543d2017-06-21 15:28:42 +03001002
1003int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
1004 struct path *lowerstack, unsigned int numlower)
1005{
1006 int err;
Amir Goldsteindc7ab672017-09-24 22:19:10 +03001007 struct dentry *index = NULL;
Amir Goldstein415543d2017-06-21 15:28:42 +03001008 struct inode *dir = dentry->d_inode;
1009 struct path path = { .mnt = mnt, .dentry = dentry };
1010 LIST_HEAD(list);
Miklos Szeredi4edb83b2017-07-27 21:54:06 +02001011 struct rb_root root = RB_ROOT;
Amir Goldstein415543d2017-06-21 15:28:42 +03001012 struct ovl_cache_entry *p;
1013 struct ovl_readdir_data rdd = {
1014 .ctx.actor = ovl_fill_merge,
1015 .dentry = NULL,
1016 .list = &list,
Miklos Szeredi4edb83b2017-07-27 21:54:06 +02001017 .root = &root,
Amir Goldstein415543d2017-06-21 15:28:42 +03001018 .is_lowest = false,
1019 };
1020
1021 err = ovl_dir_read(&path, &rdd);
1022 if (err)
1023 goto out;
1024
1025 inode_lock_nested(dir, I_MUTEX_PARENT);
1026 list_for_each_entry(p, &list, l_node) {
Amir Goldstein415543d2017-06-21 15:28:42 +03001027 if (p->name[0] == '.') {
1028 if (p->len == 1)
1029 continue;
1030 if (p->len == 2 && p->name[1] == '.')
1031 continue;
1032 }
1033 index = lookup_one_len(p->name, dentry, p->len);
1034 if (IS_ERR(index)) {
1035 err = PTR_ERR(index);
Amir Goldsteindc7ab672017-09-24 22:19:10 +03001036 index = NULL;
Amir Goldstein415543d2017-06-21 15:28:42 +03001037 break;
1038 }
Amir Goldstein61b67472017-07-18 21:07:42 +03001039 err = ovl_verify_index(index, lowerstack, numlower);
Amir Goldsteinfa0096e2017-10-24 12:24:11 +03001040 /* Cleanup stale and orphan index entries */
1041 if (err && (err == -ESTALE || err == -ENOENT))
Amir Goldstein415543d2017-06-21 15:28:42 +03001042 err = ovl_cleanup(dir, index);
Amir Goldsteinfa0096e2017-10-24 12:24:11 +03001043 if (err)
1044 break;
1045
Amir Goldstein415543d2017-06-21 15:28:42 +03001046 dput(index);
Amir Goldsteindc7ab672017-09-24 22:19:10 +03001047 index = NULL;
Amir Goldstein415543d2017-06-21 15:28:42 +03001048 }
Amir Goldsteindc7ab672017-09-24 22:19:10 +03001049 dput(index);
Amir Goldstein415543d2017-06-21 15:28:42 +03001050 inode_unlock(dir);
1051out:
1052 ovl_cache_free(&list);
1053 if (err)
1054 pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
1055 return err;
1056}