blob: cbb2f54a301971cea4e7ed02b029c3e7979cbc18 [file] [log] [blame]
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07001#include <linux/ceph/ceph_debug.h>
Sage Weil963b61e2009-10-06 11:31:12 -07002
Sage Weil963b61e2009-10-06 11:31:12 -07003#include <linux/sort.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Sage Weil963b61e2009-10-06 11:31:12 -07005
6#include "super.h"
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07007#include "mds_client.h"
8
9#include <linux/ceph/decode.h>
Sage Weil963b61e2009-10-06 11:31:12 -070010
11/*
12 * Snapshots in ceph are driven in large part by cooperation from the
13 * client. In contrast to local file systems or file servers that
14 * implement snapshots at a single point in the system, ceph's
15 * distributed access to storage requires clients to help decide
16 * whether a write logically occurs before or after a recently created
17 * snapshot.
18 *
19 * This provides a perfect instantanous client-wide snapshot. Between
20 * clients, however, snapshots may appear to be applied at slightly
21 * different points in time, depending on delays in delivering the
22 * snapshot notification.
23 *
24 * Snapshots are _not_ file system-wide. Instead, each snapshot
25 * applies to the subdirectory nested beneath some directory. This
26 * effectively divides the hierarchy into multiple "realms," where all
27 * of the files contained by each realm share the same set of
28 * snapshots. An individual realm's snap set contains snapshots
29 * explicitly created on that realm, as well as any snaps in its
30 * parent's snap set _after_ the point at which the parent became it's
31 * parent (due to, say, a rename). Similarly, snaps from prior parents
32 * during the time intervals during which they were the parent are included.
33 *
34 * The client is spared most of this detail, fortunately... it must only
35 * maintains a hierarchy of realms reflecting the current parent/child
36 * realm relationship, and for each realm has an explicit list of snaps
37 * inherited from prior parents.
38 *
39 * A snap_realm struct is maintained for realms containing every inode
40 * with an open cap in the system. (The needed snap realm information is
41 * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq'
42 * version number is used to ensure that as realm parameters change (new
43 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
44 *
45 * The realm hierarchy drives the generation of a 'snap context' for each
46 * realm, which simply lists the resulting set of snaps for the realm. This
47 * is attached to any writes sent to OSDs.
48 */
49/*
50 * Unfortunately error handling is a bit mixed here. If we get a snap
51 * update, but don't have enough memory to update our realm hierarchy,
52 * it's not clear what we can do about it (besides complaining to the
53 * console).
54 */
55
56
57/*
58 * increase ref count for the realm
59 *
60 * caller must hold snap_rwsem for write.
61 */
62void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
63 struct ceph_snap_realm *realm)
64{
65 dout("get_realm %p %d -> %d\n", realm,
66 atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
67 /*
68 * since we _only_ increment realm refs or empty the empty
69 * list with snap_rwsem held, adjusting the empty list here is
70 * safe. we do need to protect against concurrent empty list
71 * additions, however.
72 */
73 if (atomic_read(&realm->nref) == 0) {
74 spin_lock(&mdsc->snap_empty_lock);
75 list_del_init(&realm->empty_item);
76 spin_unlock(&mdsc->snap_empty_lock);
77 }
78
79 atomic_inc(&realm->nref);
80}
81
Sage Weila105f002010-02-15 14:37:55 -080082static void __insert_snap_realm(struct rb_root *root,
83 struct ceph_snap_realm *new)
84{
85 struct rb_node **p = &root->rb_node;
86 struct rb_node *parent = NULL;
87 struct ceph_snap_realm *r = NULL;
88
89 while (*p) {
90 parent = *p;
91 r = rb_entry(parent, struct ceph_snap_realm, node);
92 if (new->ino < r->ino)
93 p = &(*p)->rb_left;
94 else if (new->ino > r->ino)
95 p = &(*p)->rb_right;
96 else
97 BUG();
98 }
99
100 rb_link_node(&new->node, parent, p);
101 rb_insert_color(&new->node, root);
102}
103
Sage Weil963b61e2009-10-06 11:31:12 -0700104/*
105 * create and get the realm rooted at @ino and bump its ref count.
106 *
107 * caller must hold snap_rwsem for write.
108 */
109static struct ceph_snap_realm *ceph_create_snap_realm(
110 struct ceph_mds_client *mdsc,
111 u64 ino)
112{
113 struct ceph_snap_realm *realm;
114
115 realm = kzalloc(sizeof(*realm), GFP_NOFS);
116 if (!realm)
117 return ERR_PTR(-ENOMEM);
118
Sage Weil963b61e2009-10-06 11:31:12 -0700119 atomic_set(&realm->nref, 0); /* tree does not take a ref */
120 realm->ino = ino;
121 INIT_LIST_HEAD(&realm->children);
122 INIT_LIST_HEAD(&realm->child_item);
123 INIT_LIST_HEAD(&realm->empty_item);
Sage Weilae00d4f2010-09-16 16:26:51 -0700124 INIT_LIST_HEAD(&realm->dirty_item);
Sage Weil963b61e2009-10-06 11:31:12 -0700125 INIT_LIST_HEAD(&realm->inodes_with_caps);
126 spin_lock_init(&realm->inodes_with_caps_lock);
Sage Weila105f002010-02-15 14:37:55 -0800127 __insert_snap_realm(&mdsc->snap_realms, realm);
Sage Weil963b61e2009-10-06 11:31:12 -0700128 dout("create_snap_realm %llx %p\n", realm->ino, realm);
129 return realm;
130}
131
132/*
Sage Weila105f002010-02-15 14:37:55 -0800133 * lookup the realm rooted at @ino.
Sage Weil963b61e2009-10-06 11:31:12 -0700134 *
135 * caller must hold snap_rwsem for write.
136 */
137struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
138 u64 ino)
139{
Sage Weila105f002010-02-15 14:37:55 -0800140 struct rb_node *n = mdsc->snap_realms.rb_node;
141 struct ceph_snap_realm *r;
Sage Weil963b61e2009-10-06 11:31:12 -0700142
Sage Weila105f002010-02-15 14:37:55 -0800143 while (n) {
144 r = rb_entry(n, struct ceph_snap_realm, node);
145 if (ino < r->ino)
146 n = n->rb_left;
147 else if (ino > r->ino)
148 n = n->rb_right;
149 else {
150 dout("lookup_snap_realm %llx %p\n", r->ino, r);
151 return r;
152 }
153 }
154 return NULL;
Sage Weil963b61e2009-10-06 11:31:12 -0700155}
156
157static void __put_snap_realm(struct ceph_mds_client *mdsc,
158 struct ceph_snap_realm *realm);
159
160/*
161 * called with snap_rwsem (write)
162 */
163static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
164 struct ceph_snap_realm *realm)
165{
166 dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
167
Sage Weila105f002010-02-15 14:37:55 -0800168 rb_erase(&realm->node, &mdsc->snap_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700169
170 if (realm->parent) {
171 list_del_init(&realm->child_item);
172 __put_snap_realm(mdsc, realm->parent);
173 }
174
175 kfree(realm->prior_parent_snaps);
176 kfree(realm->snaps);
177 ceph_put_snap_context(realm->cached_context);
178 kfree(realm);
179}
180
181/*
182 * caller holds snap_rwsem (write)
183 */
184static void __put_snap_realm(struct ceph_mds_client *mdsc,
185 struct ceph_snap_realm *realm)
186{
187 dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
188 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
189 if (atomic_dec_and_test(&realm->nref))
190 __destroy_snap_realm(mdsc, realm);
191}
192
193/*
194 * caller needn't hold any locks
195 */
196void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
197 struct ceph_snap_realm *realm)
198{
199 dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
200 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
201 if (!atomic_dec_and_test(&realm->nref))
202 return;
203
204 if (down_write_trylock(&mdsc->snap_rwsem)) {
205 __destroy_snap_realm(mdsc, realm);
206 up_write(&mdsc->snap_rwsem);
207 } else {
208 spin_lock(&mdsc->snap_empty_lock);
Henry C Changa26a1852011-05-11 10:29:53 +0000209 list_add(&realm->empty_item, &mdsc->snap_empty);
Sage Weil963b61e2009-10-06 11:31:12 -0700210 spin_unlock(&mdsc->snap_empty_lock);
211 }
212}
213
214/*
215 * Clean up any realms whose ref counts have dropped to zero. Note
216 * that this does not include realms who were created but not yet
217 * used.
218 *
219 * Called under snap_rwsem (write)
220 */
221static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
222{
223 struct ceph_snap_realm *realm;
224
225 spin_lock(&mdsc->snap_empty_lock);
226 while (!list_empty(&mdsc->snap_empty)) {
227 realm = list_first_entry(&mdsc->snap_empty,
228 struct ceph_snap_realm, empty_item);
229 list_del(&realm->empty_item);
230 spin_unlock(&mdsc->snap_empty_lock);
231 __destroy_snap_realm(mdsc, realm);
232 spin_lock(&mdsc->snap_empty_lock);
233 }
234 spin_unlock(&mdsc->snap_empty_lock);
235}
236
237void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
238{
239 down_write(&mdsc->snap_rwsem);
240 __cleanup_empty_realms(mdsc);
241 up_write(&mdsc->snap_rwsem);
242}
243
244/*
245 * adjust the parent realm of a given @realm. adjust child list, and parent
246 * pointers, and ref counts appropriately.
247 *
248 * return true if parent was changed, 0 if unchanged, <0 on error.
249 *
250 * caller must hold snap_rwsem for write.
251 */
252static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
253 struct ceph_snap_realm *realm,
254 u64 parentino)
255{
256 struct ceph_snap_realm *parent;
257
258 if (realm->parent_ino == parentino)
259 return 0;
260
261 parent = ceph_lookup_snap_realm(mdsc, parentino);
Sage Weil963b61e2009-10-06 11:31:12 -0700262 if (!parent) {
263 parent = ceph_create_snap_realm(mdsc, parentino);
264 if (IS_ERR(parent))
265 return PTR_ERR(parent);
266 }
267 dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
268 realm->ino, realm, realm->parent_ino, realm->parent,
269 parentino, parent);
270 if (realm->parent) {
271 list_del_init(&realm->child_item);
272 ceph_put_snap_realm(mdsc, realm->parent);
273 }
274 realm->parent_ino = parentino;
275 realm->parent = parent;
276 ceph_get_snap_realm(mdsc, parent);
277 list_add(&realm->child_item, &parent->children);
278 return 1;
279}
280
281
282static int cmpu64_rev(const void *a, const void *b)
283{
284 if (*(u64 *)a < *(u64 *)b)
285 return 1;
286 if (*(u64 *)a > *(u64 *)b)
287 return -1;
288 return 0;
289}
290
291/*
292 * build the snap context for a given realm.
293 */
294static int build_snap_context(struct ceph_snap_realm *realm)
295{
296 struct ceph_snap_realm *parent = realm->parent;
297 struct ceph_snap_context *snapc;
298 int err = 0;
Alex Elderaa711ee2012-07-13 20:35:11 -0500299 u32 num = realm->num_prior_parent_snaps + realm->num_snaps;
Sage Weil963b61e2009-10-06 11:31:12 -0700300
301 /*
302 * build parent context, if it hasn't been built.
303 * conservatively estimate that all parent snaps might be
304 * included by us.
305 */
306 if (parent) {
307 if (!parent->cached_context) {
308 err = build_snap_context(parent);
309 if (err)
310 goto fail;
311 }
312 num += parent->cached_context->num_snaps;
313 }
314
315 /* do i actually need to update? not if my context seq
316 matches realm seq, and my parents' does to. (this works
317 because we rebuild_snap_realms() works _downward_ in
318 hierarchy after each update.) */
319 if (realm->cached_context &&
Sage Weilec4318bc2010-03-19 13:24:39 -0700320 realm->cached_context->seq == realm->seq &&
Sage Weil963b61e2009-10-06 11:31:12 -0700321 (!parent ||
Sage Weilec4318bc2010-03-19 13:24:39 -0700322 realm->cached_context->seq >= parent->cached_context->seq)) {
Alex Elderaa711ee2012-07-13 20:35:11 -0500323 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)"
Sage Weil963b61e2009-10-06 11:31:12 -0700324 " (unchanged)\n",
325 realm->ino, realm, realm->cached_context,
326 realm->cached_context->seq,
Alex Elderaa711ee2012-07-13 20:35:11 -0500327 (unsigned int) realm->cached_context->num_snaps);
Sage Weil963b61e2009-10-06 11:31:12 -0700328 return 0;
329 }
330
331 /* alloc new snap context */
332 err = -ENOMEM;
Xi Wanga3860c12012-05-31 16:26:04 -0700333 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
Sage Weil963b61e2009-10-06 11:31:12 -0700334 goto fail;
335 snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
336 if (!snapc)
337 goto fail;
338 atomic_set(&snapc->nref, 1);
339
340 /* build (reverse sorted) snap vector */
341 num = 0;
342 snapc->seq = realm->seq;
343 if (parent) {
Alex Elderaa711ee2012-07-13 20:35:11 -0500344 u32 i;
345
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300346 /* include any of parent's snaps occurring _after_ my
Sage Weil963b61e2009-10-06 11:31:12 -0700347 parent became my parent */
348 for (i = 0; i < parent->cached_context->num_snaps; i++)
349 if (parent->cached_context->snaps[i] >=
350 realm->parent_since)
351 snapc->snaps[num++] =
352 parent->cached_context->snaps[i];
353 if (parent->cached_context->seq > snapc->seq)
354 snapc->seq = parent->cached_context->seq;
355 }
356 memcpy(snapc->snaps + num, realm->snaps,
357 sizeof(u64)*realm->num_snaps);
358 num += realm->num_snaps;
359 memcpy(snapc->snaps + num, realm->prior_parent_snaps,
360 sizeof(u64)*realm->num_prior_parent_snaps);
361 num += realm->num_prior_parent_snaps;
362
363 sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
364 snapc->num_snaps = num;
Alex Elderaa711ee2012-07-13 20:35:11 -0500365 dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n",
366 realm->ino, realm, snapc, snapc->seq,
367 (unsigned int) snapc->num_snaps);
Sage Weil963b61e2009-10-06 11:31:12 -0700368
369 if (realm->cached_context)
370 ceph_put_snap_context(realm->cached_context);
371 realm->cached_context = snapc;
372 return 0;
373
374fail:
375 /*
376 * if we fail, clear old (incorrect) cached_context... hopefully
377 * we'll have better luck building it later
378 */
379 if (realm->cached_context) {
380 ceph_put_snap_context(realm->cached_context);
381 realm->cached_context = NULL;
382 }
383 pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
384 realm, err);
385 return err;
386}
387
388/*
389 * rebuild snap context for the given realm and all of its children.
390 */
391static void rebuild_snap_realms(struct ceph_snap_realm *realm)
392{
393 struct ceph_snap_realm *child;
394
395 dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
396 build_snap_context(realm);
397
398 list_for_each_entry(child, &realm->children, child_item)
399 rebuild_snap_realms(child);
400}
401
402
403/*
404 * helper to allocate and decode an array of snapids. free prior
405 * instance, if any.
406 */
Alex Elderaa711ee2012-07-13 20:35:11 -0500407static int dup_array(u64 **dst, __le64 *src, u32 num)
Sage Weil963b61e2009-10-06 11:31:12 -0700408{
Alex Elderaa711ee2012-07-13 20:35:11 -0500409 u32 i;
Sage Weil963b61e2009-10-06 11:31:12 -0700410
411 kfree(*dst);
412 if (num) {
413 *dst = kcalloc(num, sizeof(u64), GFP_NOFS);
414 if (!*dst)
415 return -ENOMEM;
416 for (i = 0; i < num; i++)
417 (*dst)[i] = get_unaligned_le64(src + i);
418 } else {
419 *dst = NULL;
420 }
421 return 0;
422}
423
424
425/*
426 * When a snapshot is applied, the size/mtime inode metadata is queued
427 * in a ceph_cap_snap (one for each snapshot) until writeback
428 * completes and the metadata can be flushed back to the MDS.
429 *
430 * However, if a (sync) write is currently in-progress when we apply
431 * the snapshot, we have to wait until the write succeeds or fails
432 * (and a final size/mtime is known). In this case the
433 * cap_snap->writing = 1, and is said to be "pending." When the write
434 * finishes, we __ceph_finish_cap_snap().
435 *
436 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
437 * change).
438 */
Sage Weilfc837c8f2010-04-13 11:41:22 -0700439void ceph_queue_cap_snap(struct ceph_inode_info *ci)
Sage Weil963b61e2009-10-06 11:31:12 -0700440{
441 struct inode *inode = &ci->vfs_inode;
442 struct ceph_cap_snap *capsnap;
Sage Weil4a625be2010-08-22 15:03:56 -0700443 int used, dirty;
Sage Weil963b61e2009-10-06 11:31:12 -0700444
445 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
446 if (!capsnap) {
447 pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
448 return;
449 }
450
Sage Weilbe655592011-11-30 09:47:09 -0800451 spin_lock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700452 used = __ceph_caps_used(ci);
Sage Weil4a625be2010-08-22 15:03:56 -0700453 dirty = __ceph_caps_dirty(ci);
Sage Weilaf0ed562011-07-26 11:26:31 -0700454
455 /*
456 * If there is a write in progress, treat that as a dirty Fw,
457 * even though it hasn't completed yet; by the time we finish
458 * up this capsnap it will be.
459 */
460 if (used & CEPH_CAP_FILE_WR)
461 dirty |= CEPH_CAP_FILE_WR;
462
Sage Weil963b61e2009-10-06 11:31:12 -0700463 if (__ceph_have_pending_cap_snap(ci)) {
464 /* there is no point in queuing multiple "pending" cap_snaps,
465 as no new writes are allowed to start when pending, so any
466 writes in progress now were started before the previous
467 cap_snap. lucky us. */
Sage Weilfc837c8f2010-04-13 11:41:22 -0700468 dout("queue_cap_snap %p already pending\n", inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700469 kfree(capsnap);
Sage Weile77dc3e2011-07-26 11:26:41 -0700470 } else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
471 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) {
Sage Weilfc837c8f2010-04-13 11:41:22 -0700472 struct ceph_snap_context *snapc = ci->i_head_snapc;
473
Sage Weilaf0ed562011-07-26 11:26:31 -0700474 /*
475 * if we are a sync write, we may need to go to the snaprealm
476 * to get the current snapc.
477 */
478 if (!snapc)
479 snapc = ci->i_snap_realm->cached_context;
480
481 dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n",
482 inode, capsnap, snapc, ceph_cap_string(dirty));
Dave Chinner0444d762011-03-29 18:08:50 +1100483 ihold(inode);
484
Sage Weil963b61e2009-10-06 11:31:12 -0700485 atomic_set(&capsnap->nref, 1);
486 capsnap->ci = ci;
487 INIT_LIST_HEAD(&capsnap->ci_item);
488 INIT_LIST_HEAD(&capsnap->flushing_item);
489
Sage Weil8bef9232010-09-14 15:45:44 -0700490 capsnap->follows = snapc->seq;
Sage Weil963b61e2009-10-06 11:31:12 -0700491 capsnap->issued = __ceph_caps_issued(ci, NULL);
Sage Weil4a625be2010-08-22 15:03:56 -0700492 capsnap->dirty = dirty;
Sage Weil963b61e2009-10-06 11:31:12 -0700493
494 capsnap->mode = inode->i_mode;
495 capsnap->uid = inode->i_uid;
496 capsnap->gid = inode->i_gid;
497
Sage Weil4a625be2010-08-22 15:03:56 -0700498 if (dirty & CEPH_CAP_XATTR_EXCL) {
499 __ceph_build_xattrs_blob(ci);
500 capsnap->xattr_blob =
501 ceph_buffer_get(ci->i_xattrs.blob);
502 capsnap->xattr_version = ci->i_xattrs.version;
503 } else {
504 capsnap->xattr_blob = NULL;
505 capsnap->xattr_version = 0;
506 }
Sage Weil963b61e2009-10-06 11:31:12 -0700507
508 /* dirty page count moved from _head to this cap_snap;
509 all subsequent writes page dirties occur _after_ this
510 snapshot. */
511 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
512 ci->i_wrbuffer_ref_head = 0;
Sage Weilfc837c8f2010-04-13 11:41:22 -0700513 capsnap->context = snapc;
Sage Weil7d8cb262010-08-24 08:44:16 -0700514 ci->i_head_snapc =
515 ceph_get_snap_context(ci->i_snap_realm->cached_context);
516 dout(" new snapc is %p\n", ci->i_head_snapc);
Sage Weil963b61e2009-10-06 11:31:12 -0700517 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
518
519 if (used & CEPH_CAP_FILE_WR) {
520 dout("queue_cap_snap %p cap_snap %p snapc %p"
521 " seq %llu used WR, now pending\n", inode,
522 capsnap, snapc, snapc->seq);
523 capsnap->writing = 1;
524 } else {
525 /* note mtime, size NOW. */
526 __ceph_finish_cap_snap(ci, capsnap);
527 }
528 } else {
529 dout("queue_cap_snap %p nothing dirty|writing\n", inode);
530 kfree(capsnap);
531 }
532
Sage Weilbe655592011-11-30 09:47:09 -0800533 spin_unlock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700534}
535
536/*
537 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
538 * to be used for the snapshot, to be flushed back to the mds.
539 *
540 * If capsnap can now be flushed, add to snap_flush list, and return 1.
541 *
Sage Weilbe655592011-11-30 09:47:09 -0800542 * Caller must hold i_ceph_lock.
Sage Weil963b61e2009-10-06 11:31:12 -0700543 */
544int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
545 struct ceph_cap_snap *capsnap)
546{
547 struct inode *inode = &ci->vfs_inode;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700548 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
Sage Weil963b61e2009-10-06 11:31:12 -0700549
550 BUG_ON(capsnap->writing);
551 capsnap->size = inode->i_size;
552 capsnap->mtime = inode->i_mtime;
553 capsnap->atime = inode->i_atime;
554 capsnap->ctime = inode->i_ctime;
555 capsnap->time_warp_seq = ci->i_time_warp_seq;
556 if (capsnap->dirty_pages) {
Sage Weil819ccbf2010-04-01 09:33:46 -0700557 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
Sage Weil963b61e2009-10-06 11:31:12 -0700558 "still has %d dirty pages\n", inode, capsnap,
559 capsnap->context, capsnap->context->seq,
Sage Weil819ccbf2010-04-01 09:33:46 -0700560 ceph_cap_string(capsnap->dirty), capsnap->size,
561 capsnap->dirty_pages);
Sage Weil963b61e2009-10-06 11:31:12 -0700562 return 0;
563 }
Sage Weil819ccbf2010-04-01 09:33:46 -0700564 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
Sage Weil963b61e2009-10-06 11:31:12 -0700565 inode, capsnap, capsnap->context,
Sage Weil819ccbf2010-04-01 09:33:46 -0700566 capsnap->context->seq, ceph_cap_string(capsnap->dirty),
567 capsnap->size);
Sage Weil963b61e2009-10-06 11:31:12 -0700568
569 spin_lock(&mdsc->snap_flush_lock);
570 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
571 spin_unlock(&mdsc->snap_flush_lock);
572 return 1; /* caller may want to ceph_flush_snaps */
573}
574
Sage Weiled326042010-08-16 13:37:31 -0700575/*
576 * Queue cap_snaps for snap writeback for this realm and its children.
577 * Called under snap_rwsem, so realm topology won't change.
578 */
579static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
580{
581 struct ceph_inode_info *ci;
582 struct inode *lastinode = NULL;
583 struct ceph_snap_realm *child;
584
585 dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
586
587 spin_lock(&realm->inodes_with_caps_lock);
588 list_for_each_entry(ci, &realm->inodes_with_caps,
589 i_snap_realm_item) {
590 struct inode *inode = igrab(&ci->vfs_inode);
591 if (!inode)
592 continue;
593 spin_unlock(&realm->inodes_with_caps_lock);
594 if (lastinode)
595 iput(lastinode);
596 lastinode = inode;
597 ceph_queue_cap_snap(ci);
598 spin_lock(&realm->inodes_with_caps_lock);
599 }
600 spin_unlock(&realm->inodes_with_caps_lock);
601 if (lastinode)
602 iput(lastinode);
603
Sage Weile8e1ba962011-02-04 20:45:58 -0800604 list_for_each_entry(child, &realm->children, child_item) {
605 dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
606 realm, realm->ino, child, child->ino);
607 list_del_init(&child->dirty_item);
608 list_add(&child->dirty_item, &realm->dirty_item);
609 }
Sage Weiled326042010-08-16 13:37:31 -0700610
Sage Weile8e1ba962011-02-04 20:45:58 -0800611 list_del_init(&realm->dirty_item);
Sage Weiled326042010-08-16 13:37:31 -0700612 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
613}
Sage Weil963b61e2009-10-06 11:31:12 -0700614
615/*
616 * Parse and apply a snapblob "snap trace" from the MDS. This specifies
617 * the snap realm parameters from a given realm and all of its ancestors,
618 * up to the root.
619 *
620 * Caller must hold snap_rwsem for write.
621 */
622int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
623 void *p, void *e, bool deletion)
624{
625 struct ceph_mds_snap_realm *ri; /* encoded */
626 __le64 *snaps; /* encoded */
627 __le64 *prior_parent_snaps; /* encoded */
628 struct ceph_snap_realm *realm;
629 int invalidate = 0;
630 int err = -ENOMEM;
Sage Weilae00d4f2010-09-16 16:26:51 -0700631 LIST_HEAD(dirty_realms);
Sage Weil963b61e2009-10-06 11:31:12 -0700632
633 dout("update_snap_trace deletion=%d\n", deletion);
634more:
635 ceph_decode_need(&p, e, sizeof(*ri), bad);
636 ri = p;
637 p += sizeof(*ri);
638 ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
639 le32_to_cpu(ri->num_prior_parent_snaps)), bad);
640 snaps = p;
641 p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
642 prior_parent_snaps = p;
643 p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
644
645 realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
Sage Weil963b61e2009-10-06 11:31:12 -0700646 if (!realm) {
647 realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
648 if (IS_ERR(realm)) {
649 err = PTR_ERR(realm);
650 goto fail;
651 }
652 }
653
Sage Weil963b61e2009-10-06 11:31:12 -0700654 /* ensure the parent is correct */
655 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
656 if (err < 0)
657 goto fail;
658 invalidate += err;
659
660 if (le64_to_cpu(ri->seq) > realm->seq) {
Sage Weilae00d4f2010-09-16 16:26:51 -0700661 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
662 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
Sage Weil963b61e2009-10-06 11:31:12 -0700663 /* update realm parameters, snap lists */
664 realm->seq = le64_to_cpu(ri->seq);
665 realm->created = le64_to_cpu(ri->created);
666 realm->parent_since = le64_to_cpu(ri->parent_since);
667
668 realm->num_snaps = le32_to_cpu(ri->num_snaps);
669 err = dup_array(&realm->snaps, snaps, realm->num_snaps);
670 if (err < 0)
671 goto fail;
672
673 realm->num_prior_parent_snaps =
674 le32_to_cpu(ri->num_prior_parent_snaps);
675 err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
676 realm->num_prior_parent_snaps);
677 if (err < 0)
678 goto fail;
679
Sage Weilae00d4f2010-09-16 16:26:51 -0700680 /* queue realm for cap_snap creation */
681 list_add(&realm->dirty_item, &dirty_realms);
682
Sage Weil963b61e2009-10-06 11:31:12 -0700683 invalidate = 1;
684 } else if (!realm->cached_context) {
Sage Weilae00d4f2010-09-16 16:26:51 -0700685 dout("update_snap_trace %llx %p seq %lld new\n",
686 realm->ino, realm, realm->seq);
Sage Weil963b61e2009-10-06 11:31:12 -0700687 invalidate = 1;
Sage Weilae00d4f2010-09-16 16:26:51 -0700688 } else {
689 dout("update_snap_trace %llx %p seq %lld unchanged\n",
690 realm->ino, realm, realm->seq);
Sage Weil963b61e2009-10-06 11:31:12 -0700691 }
692
693 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
694 realm, invalidate, p, e);
695
696 if (p < e)
697 goto more;
698
699 /* invalidate when we reach the _end_ (root) of the trace */
700 if (invalidate)
701 rebuild_snap_realms(realm);
702
Sage Weilae00d4f2010-09-16 16:26:51 -0700703 /*
704 * queue cap snaps _after_ we've built the new snap contexts,
705 * so that i_head_snapc can be set appropriately.
706 */
Sage Weile8e1ba962011-02-04 20:45:58 -0800707 while (!list_empty(&dirty_realms)) {
708 realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
709 dirty_item);
Sage Weilae00d4f2010-09-16 16:26:51 -0700710 queue_realm_cap_snaps(realm);
711 }
712
Sage Weil963b61e2009-10-06 11:31:12 -0700713 __cleanup_empty_realms(mdsc);
714 return 0;
715
716bad:
717 err = -EINVAL;
718fail:
719 pr_err("update_snap_trace error %d\n", err);
720 return err;
721}
722
723
724/*
725 * Send any cap_snaps that are queued for flush. Try to carry
726 * s_mutex across multiple snap flushes to avoid locking overhead.
727 *
728 * Caller holds no locks.
729 */
730static void flush_snaps(struct ceph_mds_client *mdsc)
731{
732 struct ceph_inode_info *ci;
733 struct inode *inode;
734 struct ceph_mds_session *session = NULL;
735
736 dout("flush_snaps\n");
737 spin_lock(&mdsc->snap_flush_lock);
738 while (!list_empty(&mdsc->snap_flush_list)) {
739 ci = list_first_entry(&mdsc->snap_flush_list,
740 struct ceph_inode_info, i_snap_flush_item);
741 inode = &ci->vfs_inode;
Sage Weil70b666c2011-05-27 09:24:26 -0700742 ihold(inode);
Sage Weil963b61e2009-10-06 11:31:12 -0700743 spin_unlock(&mdsc->snap_flush_lock);
Sage Weilbe655592011-11-30 09:47:09 -0800744 spin_lock(&ci->i_ceph_lock);
Sage Weile8351242010-09-17 08:03:08 -0700745 __ceph_flush_snaps(ci, &session, 0);
Sage Weilbe655592011-11-30 09:47:09 -0800746 spin_unlock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700747 iput(inode);
748 spin_lock(&mdsc->snap_flush_lock);
749 }
750 spin_unlock(&mdsc->snap_flush_lock);
751
752 if (session) {
753 mutex_unlock(&session->s_mutex);
754 ceph_put_mds_session(session);
755 }
756 dout("flush_snaps done\n");
757}
758
759
760/*
761 * Handle a snap notification from the MDS.
762 *
763 * This can take two basic forms: the simplest is just a snap creation
764 * or deletion notification on an existing realm. This should update the
765 * realm and its children.
766 *
767 * The more difficult case is realm creation, due to snap creation at a
768 * new point in the file hierarchy, or due to a rename that moves a file or
769 * directory into another realm.
770 */
771void ceph_handle_snap(struct ceph_mds_client *mdsc,
Sage Weil2600d2d2010-02-22 15:12:16 -0800772 struct ceph_mds_session *session,
Sage Weil963b61e2009-10-06 11:31:12 -0700773 struct ceph_msg *msg)
774{
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700775 struct super_block *sb = mdsc->fsc->sb;
Sage Weil2600d2d2010-02-22 15:12:16 -0800776 int mds = session->s_mds;
Sage Weil963b61e2009-10-06 11:31:12 -0700777 u64 split;
778 int op;
779 int trace_len;
780 struct ceph_snap_realm *realm = NULL;
781 void *p = msg->front.iov_base;
782 void *e = p + msg->front.iov_len;
783 struct ceph_mds_snap_head *h;
784 int num_split_inos, num_split_realms;
785 __le64 *split_inos = NULL, *split_realms = NULL;
786 int i;
787 int locked_rwsem = 0;
788
Sage Weil963b61e2009-10-06 11:31:12 -0700789 /* decode */
790 if (msg->front.iov_len < sizeof(*h))
791 goto bad;
792 h = p;
793 op = le32_to_cpu(h->op);
794 split = le64_to_cpu(h->split); /* non-zero if we are splitting an
795 * existing realm */
796 num_split_inos = le32_to_cpu(h->num_split_inos);
797 num_split_realms = le32_to_cpu(h->num_split_realms);
798 trace_len = le32_to_cpu(h->trace_len);
799 p += sizeof(*h);
800
801 dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
802 ceph_snap_op_name(op), split, trace_len);
803
Sage Weil963b61e2009-10-06 11:31:12 -0700804 mutex_lock(&session->s_mutex);
805 session->s_seq++;
806 mutex_unlock(&session->s_mutex);
807
808 down_write(&mdsc->snap_rwsem);
809 locked_rwsem = 1;
810
811 if (op == CEPH_SNAP_OP_SPLIT) {
812 struct ceph_mds_snap_realm *ri;
813
814 /*
815 * A "split" breaks part of an existing realm off into
816 * a new realm. The MDS provides a list of inodes
817 * (with caps) and child realms that belong to the new
818 * child.
819 */
820 split_inos = p;
821 p += sizeof(u64) * num_split_inos;
822 split_realms = p;
823 p += sizeof(u64) * num_split_realms;
824 ceph_decode_need(&p, e, sizeof(*ri), bad);
825 /* we will peek at realm info here, but will _not_
826 * advance p, as the realm update will occur below in
827 * ceph_update_snap_trace. */
828 ri = p;
829
830 realm = ceph_lookup_snap_realm(mdsc, split);
Sage Weil963b61e2009-10-06 11:31:12 -0700831 if (!realm) {
832 realm = ceph_create_snap_realm(mdsc, split);
833 if (IS_ERR(realm))
834 goto out;
835 }
836 ceph_get_snap_realm(mdsc, realm);
837
838 dout("splitting snap_realm %llx %p\n", realm->ino, realm);
839 for (i = 0; i < num_split_inos; i++) {
840 struct ceph_vino vino = {
841 .ino = le64_to_cpu(split_inos[i]),
842 .snap = CEPH_NOSNAP,
843 };
844 struct inode *inode = ceph_find_inode(sb, vino);
845 struct ceph_inode_info *ci;
Sage Weilae00d4f2010-09-16 16:26:51 -0700846 struct ceph_snap_realm *oldrealm;
Sage Weil963b61e2009-10-06 11:31:12 -0700847
848 if (!inode)
849 continue;
850 ci = ceph_inode(inode);
851
Sage Weilbe655592011-11-30 09:47:09 -0800852 spin_lock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700853 if (!ci->i_snap_realm)
854 goto skip_inode;
855 /*
856 * If this inode belongs to a realm that was
857 * created after our new realm, we experienced
858 * a race (due to another split notifications
859 * arriving from a different MDS). So skip
860 * this inode.
861 */
862 if (ci->i_snap_realm->created >
863 le64_to_cpu(ri->created)) {
864 dout(" leaving %p in newer realm %llx %p\n",
865 inode, ci->i_snap_realm->ino,
866 ci->i_snap_realm);
867 goto skip_inode;
868 }
869 dout(" will move %p to split realm %llx %p\n",
870 inode, realm->ino, realm);
871 /*
Sage Weilae00d4f2010-09-16 16:26:51 -0700872 * Move the inode to the new realm
Sage Weil963b61e2009-10-06 11:31:12 -0700873 */
Sage Weil052bb342010-03-09 12:52:26 -0800874 spin_lock(&realm->inodes_with_caps_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700875 list_del_init(&ci->i_snap_realm_item);
Sage Weilae00d4f2010-09-16 16:26:51 -0700876 list_add(&ci->i_snap_realm_item,
877 &realm->inodes_with_caps);
878 oldrealm = ci->i_snap_realm;
879 ci->i_snap_realm = realm;
Sage Weil052bb342010-03-09 12:52:26 -0800880 spin_unlock(&realm->inodes_with_caps_lock);
Sage Weilbe655592011-11-30 09:47:09 -0800881 spin_unlock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700882
Sage Weilae00d4f2010-09-16 16:26:51 -0700883 ceph_get_snap_realm(mdsc, realm);
884 ceph_put_snap_realm(mdsc, oldrealm);
Sage Weil963b61e2009-10-06 11:31:12 -0700885
886 iput(inode);
887 continue;
888
889skip_inode:
Sage Weilbe655592011-11-30 09:47:09 -0800890 spin_unlock(&ci->i_ceph_lock);
Sage Weil963b61e2009-10-06 11:31:12 -0700891 iput(inode);
892 }
893
894 /* we may have taken some of the old realm's children. */
895 for (i = 0; i < num_split_realms; i++) {
896 struct ceph_snap_realm *child =
897 ceph_lookup_snap_realm(mdsc,
898 le64_to_cpu(split_realms[i]));
Sage Weil963b61e2009-10-06 11:31:12 -0700899 if (!child)
900 continue;
901 adjust_snap_realm_parent(mdsc, child, realm->ino);
902 }
903 }
904
905 /*
906 * update using the provided snap trace. if we are deleting a
907 * snap, we can avoid queueing cap_snaps.
908 */
909 ceph_update_snap_trace(mdsc, p, e,
910 op == CEPH_SNAP_OP_DESTROY);
911
Sage Weilae00d4f2010-09-16 16:26:51 -0700912 if (op == CEPH_SNAP_OP_SPLIT)
Sage Weil963b61e2009-10-06 11:31:12 -0700913 /* we took a reference when we created the realm, above */
914 ceph_put_snap_realm(mdsc, realm);
Sage Weil963b61e2009-10-06 11:31:12 -0700915
916 __cleanup_empty_realms(mdsc);
917
918 up_write(&mdsc->snap_rwsem);
919
920 flush_snaps(mdsc);
921 return;
922
923bad:
924 pr_err("corrupt snap message from mds%d\n", mds);
Sage Weil9ec7cab2009-12-14 15:13:47 -0800925 ceph_msg_dump(msg);
Sage Weil963b61e2009-10-06 11:31:12 -0700926out:
927 if (locked_rwsem)
928 up_write(&mdsc->snap_rwsem);
929 return;
930}
931
932
933