blob: 32ef543672247127186ff4e9a7b34e3ac36ab1fd [file] [log] [blame]
Sage Weil2817b002009-10-06 11:31:08 -07001#include "ceph_debug.h"
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
6#include <linux/sched.h>
7
8#include "super.h"
9
10/*
11 * Directory operations: readdir, lookup, create, link, unlink,
12 * rename, etc.
13 */
14
15/*
16 * Ceph MDS operations are specified in terms of a base ino and
17 * relative path. Thus, the client can specify an operation on a
18 * specific inode (e.g., a getattr due to fstat(2)), or as a path
19 * relative to, say, the root directory.
20 *
21 * Normally, we limit ourselves to strict inode ops (no path component)
22 * or dentry operations (a single path component relative to an ino). The
23 * exception to this is open_root_dentry(), which will open the mount
24 * point by name.
25 */
26
27const struct inode_operations ceph_dir_iops;
28const struct file_operations ceph_dir_fops;
29struct dentry_operations ceph_dentry_ops;
30
31/*
32 * Initialize ceph dentry state.
33 */
34int ceph_init_dentry(struct dentry *dentry)
35{
36 struct ceph_dentry_info *di;
37
38 if (dentry->d_fsdata)
39 return 0;
40
41 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
42 dentry->d_op = &ceph_dentry_ops;
43 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
44 dentry->d_op = &ceph_snapdir_dentry_ops;
45 else
46 dentry->d_op = &ceph_snap_dentry_ops;
47
48 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
49 if (!di)
50 return -ENOMEM; /* oh well */
51
52 spin_lock(&dentry->d_lock);
53 if (dentry->d_fsdata) /* lost a race */
54 goto out_unlock;
55 di->dentry = dentry;
56 di->lease_session = NULL;
57 dentry->d_fsdata = di;
58 dentry->d_time = jiffies;
59 ceph_dentry_lru_add(dentry);
60out_unlock:
61 spin_unlock(&dentry->d_lock);
62 return 0;
63}
64
65
66
67/*
68 * for readdir, we encode the directory frag and offset within that
69 * frag into f_pos.
70 */
71static unsigned fpos_frag(loff_t p)
72{
73 return p >> 32;
74}
75static unsigned fpos_off(loff_t p)
76{
77 return p & 0xffffffff;
78}
79
80/*
81 * When possible, we try to satisfy a readdir by peeking at the
82 * dcache. We make this work by carefully ordering dentries on
83 * d_u.d_child when we initially get results back from the MDS, and
84 * falling back to a "normal" sync readdir if any dentries in the dir
85 * are dropped.
86 *
87 * I_COMPLETE tells indicates we have all dentries in the dir. It is
88 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
89 * the MDS if/when the directory is modified).
90 */
91static int __dcache_readdir(struct file *filp,
92 void *dirent, filldir_t filldir)
93{
94 struct inode *inode = filp->f_dentry->d_inode;
95 struct ceph_file_info *fi = filp->private_data;
96 struct dentry *parent = filp->f_dentry;
97 struct inode *dir = parent->d_inode;
98 struct list_head *p;
99 struct dentry *dentry, *last;
100 struct ceph_dentry_info *di;
101 int err = 0;
102
103 /* claim ref on last dentry we returned */
104 last = fi->dentry;
105 fi->dentry = NULL;
106
107 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
108 last);
109
110 spin_lock(&dcache_lock);
111
112 /* start at beginning? */
113 if (filp->f_pos == 2 || (last &&
114 filp->f_pos < ceph_dentry(last)->offset)) {
115 if (list_empty(&parent->d_subdirs))
116 goto out_unlock;
117 p = parent->d_subdirs.prev;
118 dout(" initial p %p/%p\n", p->prev, p->next);
119 } else {
120 p = last->d_u.d_child.prev;
121 }
122
123more:
124 dentry = list_entry(p, struct dentry, d_u.d_child);
125 di = ceph_dentry(dentry);
126 while (1) {
127 dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
128 parent->d_subdirs.prev, parent->d_subdirs.next);
129 if (p == &parent->d_subdirs) {
130 fi->at_end = 1;
131 goto out_unlock;
132 }
133 if (!d_unhashed(dentry) && dentry->d_inode &&
Sage Weil09b8a7d2009-11-11 15:21:27 -0800134 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
Sage Weil2817b002009-10-06 11:31:08 -0700135 filp->f_pos <= di->offset)
136 break;
137 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
138 dentry->d_name.len, dentry->d_name.name, di->offset,
139 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
140 !dentry->d_inode ? " null" : "");
141 p = p->prev;
142 dentry = list_entry(p, struct dentry, d_u.d_child);
143 di = ceph_dentry(dentry);
144 }
145
146 atomic_inc(&dentry->d_count);
147 spin_unlock(&dcache_lock);
148 spin_unlock(&inode->i_lock);
149
150 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
151 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
152 filp->f_pos = di->offset;
153 err = filldir(dirent, dentry->d_name.name,
154 dentry->d_name.len, di->offset,
155 dentry->d_inode->i_ino,
156 dentry->d_inode->i_mode >> 12);
157
158 if (last) {
159 if (err < 0) {
160 /* remember our position */
161 fi->dentry = last;
162 fi->next_offset = di->offset;
163 } else {
164 dput(last);
165 }
166 last = NULL;
167 }
168
169 spin_lock(&inode->i_lock);
170 spin_lock(&dcache_lock);
171
172 if (err < 0)
173 goto out_unlock;
174
175 last = dentry;
176
177 p = p->prev;
178 filp->f_pos++;
179
180 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
181 if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
182 goto more;
183 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
184 err = -EAGAIN;
185
186out_unlock:
187 spin_unlock(&dcache_lock);
188
189 if (last) {
190 spin_unlock(&inode->i_lock);
191 dput(last);
192 spin_lock(&inode->i_lock);
193 }
194
195 return err;
196}
197
198/*
199 * make note of the last dentry we read, so we can
200 * continue at the same lexicographical point,
201 * regardless of what dir changes take place on the
202 * server.
203 */
204static int note_last_dentry(struct ceph_file_info *fi, const char *name,
205 int len)
206{
207 kfree(fi->last_name);
208 fi->last_name = kmalloc(len+1, GFP_NOFS);
209 if (!fi->last_name)
210 return -ENOMEM;
211 memcpy(fi->last_name, name, len);
212 fi->last_name[len] = 0;
213 dout("note_last_dentry '%s'\n", fi->last_name);
214 return 0;
215}
216
217static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
218{
219 struct ceph_file_info *fi = filp->private_data;
220 struct inode *inode = filp->f_dentry->d_inode;
221 struct ceph_inode_info *ci = ceph_inode(inode);
222 struct ceph_client *client = ceph_inode_to_client(inode);
223 struct ceph_mds_client *mdsc = &client->mdsc;
224 unsigned frag = fpos_frag(filp->f_pos);
225 int off = fpos_off(filp->f_pos);
226 int err;
227 u32 ftype;
228 struct ceph_mds_reply_info_parsed *rinfo;
Sage Weil6b805182009-10-27 11:50:50 -0700229 const int max_entries = client->mount_args->max_readdir;
Sage Weil2817b002009-10-06 11:31:08 -0700230
231 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
232 if (fi->at_end)
233 return 0;
234
235 /* always start with . and .. */
236 if (filp->f_pos == 0) {
237 /* note dir version at start of readdir so we can tell
238 * if any dentries get dropped */
239 fi->dir_release_count = ci->i_release_count;
240
241 dout("readdir off 0 -> '.'\n");
242 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
243 inode->i_ino, inode->i_mode >> 12) < 0)
244 return 0;
245 filp->f_pos = 1;
246 off = 1;
247 }
248 if (filp->f_pos == 1) {
249 dout("readdir off 1 -> '..'\n");
250 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
251 filp->f_dentry->d_parent->d_inode->i_ino,
252 inode->i_mode >> 12) < 0)
253 return 0;
254 filp->f_pos = 2;
255 off = 2;
256 }
257
258 /* can we use the dcache? */
259 spin_lock(&inode->i_lock);
260 if ((filp->f_pos == 2 || fi->dentry) &&
261 !ceph_test_opt(client, NOASYNCREADDIR) &&
262 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
263 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
264 err = __dcache_readdir(filp, dirent, filldir);
265 if (err != -EAGAIN) {
266 spin_unlock(&inode->i_lock);
267 return err;
268 }
269 }
270 spin_unlock(&inode->i_lock);
271 if (fi->dentry) {
272 err = note_last_dentry(fi, fi->dentry->d_name.name,
273 fi->dentry->d_name.len);
274 if (err)
275 return err;
276 dput(fi->dentry);
277 fi->dentry = NULL;
278 }
279
280 /* proceed with a normal readdir */
281
282more:
283 /* do we have the correct frag content buffered? */
284 if (fi->frag != frag || fi->last_readdir == NULL) {
285 struct ceph_mds_request *req;
286 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
287 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
288
289 /* discard old result, if any */
290 if (fi->last_readdir)
291 ceph_mdsc_put_request(fi->last_readdir);
292
293 /* requery frag tree, as the frag topology may have changed */
294 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
295
296 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
297 ceph_vinop(inode), frag, fi->last_name);
298 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
299 if (IS_ERR(req))
300 return PTR_ERR(req);
301 req->r_inode = igrab(inode);
302 req->r_dentry = dget(filp->f_dentry);
303 /* hints to request -> mds selection code */
304 req->r_direct_mode = USE_AUTH_MDS;
305 req->r_direct_hash = ceph_frag_value(frag);
306 req->r_direct_is_hash = true;
307 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
308 req->r_readdir_offset = fi->next_offset;
309 req->r_args.readdir.frag = cpu_to_le32(frag);
310 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
311 req->r_num_caps = max_entries;
312 err = ceph_mdsc_do_request(mdsc, NULL, req);
313 if (err < 0) {
314 ceph_mdsc_put_request(req);
315 return err;
316 }
317 dout("readdir got and parsed readdir result=%d"
318 " on frag %x, end=%d, complete=%d\n", err, frag,
319 (int)req->r_reply_info.dir_end,
320 (int)req->r_reply_info.dir_complete);
321
322 if (!req->r_did_prepopulate) {
323 dout("readdir !did_prepopulate");
324 fi->dir_release_count--; /* preclude I_COMPLETE */
325 }
326
327 /* note next offset and last dentry name */
328 fi->offset = fi->next_offset;
329 fi->last_readdir = req;
330
331 if (req->r_reply_info.dir_end) {
332 kfree(fi->last_name);
333 fi->last_name = NULL;
334 fi->next_offset = 0;
335 } else {
336 rinfo = &req->r_reply_info;
337 err = note_last_dentry(fi,
338 rinfo->dir_dname[rinfo->dir_nr-1],
339 rinfo->dir_dname_len[rinfo->dir_nr-1]);
340 if (err)
341 return err;
342 fi->next_offset += rinfo->dir_nr;
343 }
344 }
345
346 rinfo = &fi->last_readdir->r_reply_info;
347 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
348 rinfo->dir_nr, off, fi->offset);
349 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
350 u64 pos = ceph_make_fpos(frag, off);
351 struct ceph_mds_reply_inode *in =
352 rinfo->dir_in[off - fi->offset].in;
353 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
354 off, off - fi->offset, rinfo->dir_nr, pos,
355 rinfo->dir_dname_len[off - fi->offset],
356 rinfo->dir_dname[off - fi->offset], in);
357 BUG_ON(!in);
358 ftype = le32_to_cpu(in->mode) >> 12;
359 if (filldir(dirent,
360 rinfo->dir_dname[off - fi->offset],
361 rinfo->dir_dname_len[off - fi->offset],
362 pos,
363 le64_to_cpu(in->ino),
364 ftype) < 0) {
365 dout("filldir stopping us...\n");
366 return 0;
367 }
368 off++;
369 filp->f_pos = pos + 1;
370 }
371
372 if (fi->last_name) {
373 ceph_mdsc_put_request(fi->last_readdir);
374 fi->last_readdir = NULL;
375 goto more;
376 }
377
378 /* more frags? */
379 if (!ceph_frag_is_rightmost(frag)) {
380 frag = ceph_frag_next(frag);
381 off = 0;
382 filp->f_pos = ceph_make_fpos(frag, off);
383 dout("readdir next frag is %x\n", frag);
384 goto more;
385 }
386 fi->at_end = 1;
387
388 /*
389 * if dir_release_count still matches the dir, no dentries
390 * were released during the whole readdir, and we should have
391 * the complete dir contents in our cache.
392 */
393 spin_lock(&inode->i_lock);
394 if (ci->i_release_count == fi->dir_release_count) {
395 dout(" marking %p complete\n", inode);
396 ci->i_ceph_flags |= CEPH_I_COMPLETE;
397 ci->i_max_offset = filp->f_pos;
398 }
399 spin_unlock(&inode->i_lock);
400
401 dout("readdir %p filp %p done.\n", inode, filp);
402 return 0;
403}
404
405static void reset_readdir(struct ceph_file_info *fi)
406{
407 if (fi->last_readdir) {
408 ceph_mdsc_put_request(fi->last_readdir);
409 fi->last_readdir = NULL;
410 }
411 kfree(fi->last_name);
412 fi->next_offset = 2; /* compensate for . and .. */
413 if (fi->dentry) {
414 dput(fi->dentry);
415 fi->dentry = NULL;
416 }
417 fi->at_end = 0;
418}
419
420static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
421{
422 struct ceph_file_info *fi = file->private_data;
423 struct inode *inode = file->f_mapping->host;
424 loff_t old_offset = offset;
425 loff_t retval;
426
427 mutex_lock(&inode->i_mutex);
428 switch (origin) {
429 case SEEK_END:
430 offset += inode->i_size + 2; /* FIXME */
431 break;
432 case SEEK_CUR:
433 offset += file->f_pos;
434 }
435 retval = -EINVAL;
436 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
437 if (offset != file->f_pos) {
438 file->f_pos = offset;
439 file->f_version = 0;
440 fi->at_end = 0;
441 }
442 retval = offset;
443
444 /*
445 * discard buffered readdir content on seekdir(0), or
446 * seek to new frag, or seek prior to current chunk.
447 */
448 if (offset == 0 ||
449 fpos_frag(offset) != fpos_frag(old_offset) ||
450 fpos_off(offset) < fi->offset) {
451 dout("dir_llseek dropping %p content\n", file);
452 reset_readdir(fi);
453 }
454
455 /* bump dir_release_count if we did a forward seek */
456 if (offset > old_offset)
457 fi->dir_release_count--;
458 }
459 mutex_unlock(&inode->i_mutex);
460 return retval;
461}
462
463/*
464 * Process result of a lookup/open request.
465 *
466 * Mainly, make sure we return the final req->r_dentry (if it already
467 * existed) in place of the original VFS-provided dentry when they
468 * differ.
469 *
470 * Gracefully handle the case where the MDS replies with -ENOENT and
471 * no trace (which it may do, at its discretion, e.g., if it doesn't
472 * care to issue a lease on the negative dentry).
473 */
474struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
475 struct dentry *dentry, int err)
476{
477 struct ceph_client *client = ceph_client(dentry->d_sb);
478 struct inode *parent = dentry->d_parent->d_inode;
479
480 /* .snap dir? */
481 if (err == -ENOENT &&
482 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
Sage Weil6b805182009-10-27 11:50:50 -0700483 strcmp(dentry->d_name.name,
484 client->mount_args->snapdir_name) == 0) {
Sage Weil2817b002009-10-06 11:31:08 -0700485 struct inode *inode = ceph_get_snapdir(parent);
486 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
487 dentry, dentry->d_name.len, dentry->d_name.name, inode);
488 d_add(dentry, inode);
489 err = 0;
490 }
491
492 if (err == -ENOENT) {
493 /* no trace? */
494 err = 0;
495 if (!req->r_reply_info.head->is_dentry) {
496 dout("ENOENT and no trace, dentry %p inode %p\n",
497 dentry, dentry->d_inode);
498 if (dentry->d_inode) {
499 d_drop(dentry);
500 err = -ENOENT;
501 } else {
502 d_add(dentry, NULL);
503 }
504 }
505 }
506 if (err)
507 dentry = ERR_PTR(err);
508 else if (dentry != req->r_dentry)
509 dentry = dget(req->r_dentry); /* we got spliced */
510 else
511 dentry = NULL;
512 return dentry;
513}
514
515/*
516 * Look up a single dir entry. If there is a lookup intent, inform
517 * the MDS so that it gets our 'caps wanted' value in a single op.
518 */
519static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
520 struct nameidata *nd)
521{
522 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
523 struct ceph_mds_client *mdsc = &client->mdsc;
524 struct ceph_mds_request *req;
525 int op;
526 int err;
527
528 dout("lookup %p dentry %p '%.*s'\n",
529 dir, dentry, dentry->d_name.len, dentry->d_name.name);
530
531 if (dentry->d_name.len > NAME_MAX)
532 return ERR_PTR(-ENAMETOOLONG);
533
534 err = ceph_init_dentry(dentry);
535 if (err < 0)
536 return ERR_PTR(err);
537
538 /* open (but not create!) intent? */
539 if (nd &&
540 (nd->flags & LOOKUP_OPEN) &&
541 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
542 !(nd->intent.open.flags & O_CREAT)) {
543 int mode = nd->intent.open.create_mode & ~current->fs->umask;
544 return ceph_lookup_open(dir, dentry, nd, mode, 1);
545 }
546
547 /* can we conclude ENOENT locally? */
548 if (dentry->d_inode == NULL) {
549 struct ceph_inode_info *ci = ceph_inode(dir);
550 struct ceph_dentry_info *di = ceph_dentry(dentry);
551
552 spin_lock(&dir->i_lock);
553 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
554 if (strncmp(dentry->d_name.name,
Sage Weil6b805182009-10-27 11:50:50 -0700555 client->mount_args->snapdir_name,
Sage Weil2817b002009-10-06 11:31:08 -0700556 dentry->d_name.len) &&
557 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
558 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
559 di->offset = ci->i_max_offset++;
560 spin_unlock(&dir->i_lock);
561 dout(" dir %p complete, -ENOENT\n", dir);
562 d_add(dentry, NULL);
563 di->lease_shared_gen = ci->i_shared_gen;
564 return NULL;
565 }
566 spin_unlock(&dir->i_lock);
567 }
568
569 op = ceph_snap(dir) == CEPH_SNAPDIR ?
570 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
571 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
572 if (IS_ERR(req))
573 return ERR_PTR(PTR_ERR(req));
574 req->r_dentry = dget(dentry);
575 req->r_num_caps = 2;
576 /* we only need inode linkage */
577 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
578 req->r_locked_dir = dir;
579 err = ceph_mdsc_do_request(mdsc, NULL, req);
580 dentry = ceph_finish_lookup(req, dentry, err);
581 ceph_mdsc_put_request(req); /* will dput(dentry) */
582 dout("lookup result=%p\n", dentry);
583 return dentry;
584}
585
586/*
587 * If we do a create but get no trace back from the MDS, follow up with
588 * a lookup (the VFS expects us to link up the provided dentry).
589 */
590int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
591{
592 struct dentry *result = ceph_lookup(dir, dentry, NULL);
593
594 if (result && !IS_ERR(result)) {
595 /*
596 * We created the item, then did a lookup, and found
597 * it was already linked to another inode we already
598 * had in our cache (and thus got spliced). Link our
599 * dentry to that inode, but don't hash it, just in
600 * case the VFS wants to dereference it.
601 */
602 BUG_ON(!result->d_inode);
603 d_instantiate(dentry, result->d_inode);
604 return 0;
605 }
606 return PTR_ERR(result);
607}
608
609static int ceph_mknod(struct inode *dir, struct dentry *dentry,
610 int mode, dev_t rdev)
611{
612 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
613 struct ceph_mds_client *mdsc = &client->mdsc;
614 struct ceph_mds_request *req;
615 int err;
616
617 if (ceph_snap(dir) != CEPH_NOSNAP)
618 return -EROFS;
619
620 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
621 dir, dentry, mode, rdev);
622 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
623 if (IS_ERR(req)) {
624 d_drop(dentry);
625 return PTR_ERR(req);
626 }
627 req->r_dentry = dget(dentry);
628 req->r_num_caps = 2;
629 req->r_locked_dir = dir;
630 req->r_args.mknod.mode = cpu_to_le32(mode);
631 req->r_args.mknod.rdev = cpu_to_le32(rdev);
632 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
633 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
634 err = ceph_mdsc_do_request(mdsc, dir, req);
635 if (!err && !req->r_reply_info.head->is_dentry)
636 err = ceph_handle_notrace_create(dir, dentry);
637 ceph_mdsc_put_request(req);
638 if (err)
639 d_drop(dentry);
640 return err;
641}
642
643static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
644 struct nameidata *nd)
645{
646 dout("create in dir %p dentry %p name '%.*s'\n",
647 dir, dentry, dentry->d_name.len, dentry->d_name.name);
648
649 if (ceph_snap(dir) != CEPH_NOSNAP)
650 return -EROFS;
651
652 if (nd) {
653 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
654 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
655 /* hrm, what should i do here if we get aliased? */
656 if (IS_ERR(dentry))
657 return PTR_ERR(dentry);
658 return 0;
659 }
660
661 /* fall back to mknod */
662 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
663}
664
665static int ceph_symlink(struct inode *dir, struct dentry *dentry,
666 const char *dest)
667{
668 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
669 struct ceph_mds_client *mdsc = &client->mdsc;
670 struct ceph_mds_request *req;
671 int err;
672
673 if (ceph_snap(dir) != CEPH_NOSNAP)
674 return -EROFS;
675
676 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
677 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
678 if (IS_ERR(req)) {
679 d_drop(dentry);
680 return PTR_ERR(req);
681 }
682 req->r_dentry = dget(dentry);
683 req->r_num_caps = 2;
684 req->r_path2 = kstrdup(dest, GFP_NOFS);
685 req->r_locked_dir = dir;
686 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
687 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
688 err = ceph_mdsc_do_request(mdsc, dir, req);
689 if (!err && !req->r_reply_info.head->is_dentry)
690 err = ceph_handle_notrace_create(dir, dentry);
691 ceph_mdsc_put_request(req);
692 if (err)
693 d_drop(dentry);
694 return err;
695}
696
697static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
698{
699 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
700 struct ceph_mds_client *mdsc = &client->mdsc;
701 struct ceph_mds_request *req;
702 int err = -EROFS;
703 int op;
704
705 if (ceph_snap(dir) == CEPH_SNAPDIR) {
706 /* mkdir .snap/foo is a MKSNAP */
707 op = CEPH_MDS_OP_MKSNAP;
708 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
709 dentry->d_name.len, dentry->d_name.name, dentry);
710 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
711 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
712 op = CEPH_MDS_OP_MKDIR;
713 } else {
714 goto out;
715 }
716 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
717 if (IS_ERR(req)) {
718 err = PTR_ERR(req);
719 goto out;
720 }
721
722 req->r_dentry = dget(dentry);
723 req->r_num_caps = 2;
724 req->r_locked_dir = dir;
725 req->r_args.mkdir.mode = cpu_to_le32(mode);
726 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
727 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
728 err = ceph_mdsc_do_request(mdsc, dir, req);
729 if (!err && !req->r_reply_info.head->is_dentry)
730 err = ceph_handle_notrace_create(dir, dentry);
731 ceph_mdsc_put_request(req);
732out:
733 if (err < 0)
734 d_drop(dentry);
735 return err;
736}
737
738static int ceph_link(struct dentry *old_dentry, struct inode *dir,
739 struct dentry *dentry)
740{
741 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
742 struct ceph_mds_client *mdsc = &client->mdsc;
743 struct ceph_mds_request *req;
744 int err;
745
746 if (ceph_snap(dir) != CEPH_NOSNAP)
747 return -EROFS;
748
749 dout("link in dir %p old_dentry %p dentry %p\n", dir,
750 old_dentry, dentry);
751 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
752 if (IS_ERR(req)) {
753 d_drop(dentry);
754 return PTR_ERR(req);
755 }
756 req->r_dentry = dget(dentry);
757 req->r_num_caps = 2;
758 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
759 req->r_locked_dir = dir;
760 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
761 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
762 err = ceph_mdsc_do_request(mdsc, dir, req);
763 if (err)
764 d_drop(dentry);
765 else if (!req->r_reply_info.head->is_dentry)
766 d_instantiate(dentry, igrab(old_dentry->d_inode));
767 ceph_mdsc_put_request(req);
768 return err;
769}
770
771/*
772 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
773 * looks like the link count will hit 0, drop any other caps (other
774 * than PIN) we don't specifically want (due to the file still being
775 * open).
776 */
777static int drop_caps_for_unlink(struct inode *inode)
778{
779 struct ceph_inode_info *ci = ceph_inode(inode);
780 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
781
782 spin_lock(&inode->i_lock);
783 if (inode->i_nlink == 1) {
784 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
785 ci->i_ceph_flags |= CEPH_I_NODELAY;
786 }
787 spin_unlock(&inode->i_lock);
788 return drop;
789}
790
791/*
792 * rmdir and unlink are differ only by the metadata op code
793 */
794static int ceph_unlink(struct inode *dir, struct dentry *dentry)
795{
796 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
797 struct ceph_mds_client *mdsc = &client->mdsc;
798 struct inode *inode = dentry->d_inode;
799 struct ceph_mds_request *req;
800 int err = -EROFS;
801 int op;
802
803 if (ceph_snap(dir) == CEPH_SNAPDIR) {
804 /* rmdir .snap/foo is RMSNAP */
805 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
806 dentry->d_name.name, dentry);
807 op = CEPH_MDS_OP_RMSNAP;
808 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
809 dout("unlink/rmdir dir %p dn %p inode %p\n",
810 dir, dentry, inode);
811 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
812 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
813 } else
814 goto out;
815 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
816 if (IS_ERR(req)) {
817 err = PTR_ERR(req);
818 goto out;
819 }
820 req->r_dentry = dget(dentry);
821 req->r_num_caps = 2;
822 req->r_locked_dir = dir;
823 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
824 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
825 req->r_inode_drop = drop_caps_for_unlink(inode);
826 err = ceph_mdsc_do_request(mdsc, dir, req);
827 if (!err && !req->r_reply_info.head->is_dentry)
828 d_delete(dentry);
829 ceph_mdsc_put_request(req);
830out:
831 return err;
832}
833
834static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
835 struct inode *new_dir, struct dentry *new_dentry)
836{
837 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
838 struct ceph_mds_client *mdsc = &client->mdsc;
839 struct ceph_mds_request *req;
840 int err;
841
842 if (ceph_snap(old_dir) != ceph_snap(new_dir))
843 return -EXDEV;
844 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
845 ceph_snap(new_dir) != CEPH_NOSNAP)
846 return -EROFS;
847 dout("rename dir %p dentry %p to dir %p dentry %p\n",
848 old_dir, old_dentry, new_dir, new_dentry);
849 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
850 if (IS_ERR(req))
851 return PTR_ERR(req);
852 req->r_dentry = dget(new_dentry);
853 req->r_num_caps = 2;
854 req->r_old_dentry = dget(old_dentry);
855 req->r_locked_dir = new_dir;
856 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
857 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
858 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
859 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
860 /* release LINK_RDCACHE on source inode (mds will lock it) */
861 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
862 if (new_dentry->d_inode)
863 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
864 err = ceph_mdsc_do_request(mdsc, old_dir, req);
865 if (!err && !req->r_reply_info.head->is_dentry) {
866 /*
867 * Normally d_move() is done by fill_trace (called by
868 * do_request, above). If there is no trace, we need
869 * to do it here.
870 */
871 d_move(old_dentry, new_dentry);
872 }
873 ceph_mdsc_put_request(req);
874 return err;
875}
876
877
878/*
879 * Check if dentry lease is valid. If not, delete the lease. Try to
880 * renew if the least is more than half up.
881 */
882static int dentry_lease_is_valid(struct dentry *dentry)
883{
884 struct ceph_dentry_info *di;
885 struct ceph_mds_session *s;
886 int valid = 0;
887 u32 gen;
888 unsigned long ttl;
889 struct ceph_mds_session *session = NULL;
890 struct inode *dir = NULL;
891 u32 seq = 0;
892
893 spin_lock(&dentry->d_lock);
894 di = ceph_dentry(dentry);
895 if (di && di->lease_session) {
896 s = di->lease_session;
897 spin_lock(&s->s_cap_lock);
898 gen = s->s_cap_gen;
899 ttl = s->s_cap_ttl;
900 spin_unlock(&s->s_cap_lock);
901
902 if (di->lease_gen == gen &&
903 time_before(jiffies, dentry->d_time) &&
904 time_before(jiffies, ttl)) {
905 valid = 1;
906 if (di->lease_renew_after &&
907 time_after(jiffies, di->lease_renew_after)) {
908 /* we should renew */
909 dir = dentry->d_parent->d_inode;
910 session = ceph_get_mds_session(s);
911 seq = di->lease_seq;
912 di->lease_renew_after = 0;
913 di->lease_renew_from = jiffies;
914 }
915 } else {
916 __ceph_mdsc_drop_dentry_lease(dentry);
917 }
918 }
919 spin_unlock(&dentry->d_lock);
920
921 if (session) {
922 ceph_mdsc_lease_send_msg(session, dir, dentry,
923 CEPH_MDS_LEASE_RENEW, seq);
924 ceph_put_mds_session(session);
925 }
926 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
927 return valid;
928}
929
930/*
931 * Check if directory-wide content lease/cap is valid.
932 */
933static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
934{
935 struct ceph_inode_info *ci = ceph_inode(dir);
936 struct ceph_dentry_info *di = ceph_dentry(dentry);
937 int valid = 0;
938
939 spin_lock(&dir->i_lock);
940 if (ci->i_shared_gen == di->lease_shared_gen)
941 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
942 spin_unlock(&dir->i_lock);
943 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
944 dir, (unsigned)ci->i_shared_gen, dentry,
945 (unsigned)di->lease_shared_gen, valid);
946 return valid;
947}
948
949/*
950 * Check if cached dentry can be trusted.
951 */
952static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
953{
954 struct inode *dir = dentry->d_parent->d_inode;
955
956 dout("d_revalidate %p '%.*s' inode %p\n", dentry,
957 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
958
959 /* always trust cached snapped dentries, snapdir dentry */
960 if (ceph_snap(dir) != CEPH_NOSNAP) {
961 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
962 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
963 goto out_touch;
964 }
965 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
966 goto out_touch;
967
968 if (dentry_lease_is_valid(dentry) ||
969 dir_lease_is_valid(dir, dentry))
970 goto out_touch;
971
972 dout("d_revalidate %p invalid\n", dentry);
973 d_drop(dentry);
974 return 0;
975out_touch:
976 ceph_dentry_lru_touch(dentry);
977 return 1;
978}
979
980/*
981 * When a dentry is released, clear the dir I_COMPLETE if it was part
982 * of the current dir gen.
983 */
984static void ceph_dentry_release(struct dentry *dentry)
985{
986 struct ceph_dentry_info *di = ceph_dentry(dentry);
987 struct inode *parent_inode = dentry->d_parent->d_inode;
988
989 if (parent_inode) {
990 struct ceph_inode_info *ci = ceph_inode(parent_inode);
991
992 spin_lock(&parent_inode->i_lock);
993 if (ci->i_shared_gen == di->lease_shared_gen) {
994 dout(" clearing %p complete (d_release)\n",
995 parent_inode);
996 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
997 ci->i_release_count++;
998 }
999 spin_unlock(&parent_inode->i_lock);
1000 }
1001 if (di) {
1002 ceph_dentry_lru_del(dentry);
1003 if (di->lease_session)
1004 ceph_put_mds_session(di->lease_session);
1005 kmem_cache_free(ceph_dentry_cachep, di);
1006 dentry->d_fsdata = NULL;
1007 }
1008}
1009
1010static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1011 struct nameidata *nd)
1012{
1013 /*
1014 * Eventually, we'll want to revalidate snapped metadata
1015 * too... probably...
1016 */
1017 return 1;
1018}
1019
1020
1021
1022/*
1023 * read() on a dir. This weird interface hack only works if mounted
1024 * with '-o dirstat'.
1025 */
1026static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1027 loff_t *ppos)
1028{
1029 struct ceph_file_info *cf = file->private_data;
1030 struct inode *inode = file->f_dentry->d_inode;
1031 struct ceph_inode_info *ci = ceph_inode(inode);
1032 int left;
1033
1034 if (!ceph_test_opt(ceph_client(inode->i_sb), DIRSTAT))
1035 return -EISDIR;
1036
1037 if (!cf->dir_info) {
1038 cf->dir_info = kmalloc(1024, GFP_NOFS);
1039 if (!cf->dir_info)
1040 return -ENOMEM;
1041 cf->dir_info_len =
1042 sprintf(cf->dir_info,
1043 "entries: %20lld\n"
1044 " files: %20lld\n"
1045 " subdirs: %20lld\n"
1046 "rentries: %20lld\n"
1047 " rfiles: %20lld\n"
1048 " rsubdirs: %20lld\n"
1049 "rbytes: %20lld\n"
1050 "rctime: %10ld.%09ld\n",
1051 ci->i_files + ci->i_subdirs,
1052 ci->i_files,
1053 ci->i_subdirs,
1054 ci->i_rfiles + ci->i_rsubdirs,
1055 ci->i_rfiles,
1056 ci->i_rsubdirs,
1057 ci->i_rbytes,
1058 (long)ci->i_rctime.tv_sec,
1059 (long)ci->i_rctime.tv_nsec);
1060 }
1061
1062 if (*ppos >= cf->dir_info_len)
1063 return 0;
1064 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1065 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1066 if (left == size)
1067 return -EFAULT;
1068 *ppos += (size - left);
1069 return size - left;
1070}
1071
1072/*
1073 * an fsync() on a dir will wait for any uncommitted directory
1074 * operations to commit.
1075 */
1076static int ceph_dir_fsync(struct file *file, struct dentry *dentry,
1077 int datasync)
1078{
1079 struct inode *inode = dentry->d_inode;
1080 struct ceph_inode_info *ci = ceph_inode(inode);
1081 struct list_head *head = &ci->i_unsafe_dirops;
1082 struct ceph_mds_request *req;
1083 u64 last_tid;
1084 int ret = 0;
1085
1086 dout("dir_fsync %p\n", inode);
1087 spin_lock(&ci->i_unsafe_lock);
1088 if (list_empty(head))
1089 goto out;
1090
1091 req = list_entry(head->prev,
1092 struct ceph_mds_request, r_unsafe_dir_item);
1093 last_tid = req->r_tid;
1094
1095 do {
1096 ceph_mdsc_get_request(req);
1097 spin_unlock(&ci->i_unsafe_lock);
1098 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1099 inode, req->r_tid, last_tid);
1100 if (req->r_timeout) {
1101 ret = wait_for_completion_timeout(
1102 &req->r_safe_completion, req->r_timeout);
1103 if (ret > 0)
1104 ret = 0;
1105 else if (ret == 0)
1106 ret = -EIO; /* timed out */
1107 } else {
1108 wait_for_completion(&req->r_safe_completion);
1109 }
1110 spin_lock(&ci->i_unsafe_lock);
1111 ceph_mdsc_put_request(req);
1112
1113 if (ret || list_empty(head))
1114 break;
1115 req = list_entry(head->next,
1116 struct ceph_mds_request, r_unsafe_dir_item);
1117 } while (req->r_tid < last_tid);
1118out:
1119 spin_unlock(&ci->i_unsafe_lock);
1120 return ret;
1121}
1122
1123/*
1124 * We maintain a private dentry LRU.
1125 *
1126 * FIXME: this needs to be changed to a per-mds lru to be useful.
1127 */
1128void ceph_dentry_lru_add(struct dentry *dn)
1129{
1130 struct ceph_dentry_info *di = ceph_dentry(dn);
1131 struct ceph_mds_client *mdsc;
1132 dout("dentry_lru_add %p %p\t%.*s\n",
1133 di, dn, dn->d_name.len, dn->d_name.name);
1134
1135 if (di) {
1136 mdsc = &ceph_client(dn->d_sb)->mdsc;
1137 spin_lock(&mdsc->dentry_lru_lock);
1138 list_add_tail(&di->lru, &mdsc->dentry_lru);
1139 mdsc->num_dentry++;
1140 spin_unlock(&mdsc->dentry_lru_lock);
1141 }
1142}
1143
1144void ceph_dentry_lru_touch(struct dentry *dn)
1145{
1146 struct ceph_dentry_info *di = ceph_dentry(dn);
1147 struct ceph_mds_client *mdsc;
1148 dout("dentry_lru_touch %p %p\t%.*s\n",
1149 di, dn, dn->d_name.len, dn->d_name.name);
1150
1151 if (di) {
1152 mdsc = &ceph_client(dn->d_sb)->mdsc;
1153 spin_lock(&mdsc->dentry_lru_lock);
1154 list_move_tail(&di->lru, &mdsc->dentry_lru);
1155 spin_unlock(&mdsc->dentry_lru_lock);
1156 }
1157}
1158
1159void ceph_dentry_lru_del(struct dentry *dn)
1160{
1161 struct ceph_dentry_info *di = ceph_dentry(dn);
1162 struct ceph_mds_client *mdsc;
1163
1164 dout("dentry_lru_del %p %p\t%.*s\n",
1165 di, dn, dn->d_name.len, dn->d_name.name);
1166 if (di) {
1167 mdsc = &ceph_client(dn->d_sb)->mdsc;
1168 spin_lock(&mdsc->dentry_lru_lock);
1169 list_del_init(&di->lru);
1170 mdsc->num_dentry--;
1171 spin_unlock(&mdsc->dentry_lru_lock);
1172 }
1173}
1174
1175const struct file_operations ceph_dir_fops = {
1176 .read = ceph_read_dir,
1177 .readdir = ceph_readdir,
1178 .llseek = ceph_dir_llseek,
1179 .open = ceph_open,
1180 .release = ceph_release,
1181 .unlocked_ioctl = ceph_ioctl,
1182 .fsync = ceph_dir_fsync,
1183};
1184
1185const struct inode_operations ceph_dir_iops = {
1186 .lookup = ceph_lookup,
1187 .permission = ceph_permission,
1188 .getattr = ceph_getattr,
1189 .setattr = ceph_setattr,
1190 .setxattr = ceph_setxattr,
1191 .getxattr = ceph_getxattr,
1192 .listxattr = ceph_listxattr,
1193 .removexattr = ceph_removexattr,
1194 .mknod = ceph_mknod,
1195 .symlink = ceph_symlink,
1196 .mkdir = ceph_mkdir,
1197 .link = ceph_link,
1198 .unlink = ceph_unlink,
1199 .rmdir = ceph_unlink,
1200 .rename = ceph_rename,
1201 .create = ceph_create,
1202};
1203
1204struct dentry_operations ceph_dentry_ops = {
1205 .d_revalidate = ceph_d_revalidate,
1206 .d_release = ceph_dentry_release,
1207};
1208
1209struct dentry_operations ceph_snapdir_dentry_ops = {
1210 .d_revalidate = ceph_snapdir_d_revalidate,
1211};
1212
1213struct dentry_operations ceph_snap_dentry_ops = {
1214};