blob: c9b342c8b503dc4b50cd60f48b6698d3f68a3d4f [file] [log] [blame]
Koji Sato29619802009-04-06 19:01:31 -07001/*
2 * cpfile.c - NILFS checkpoint file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/kernel.h>
24#include <linux/fs.h>
25#include <linux/string.h>
26#include <linux/buffer_head.h>
27#include <linux/errno.h>
28#include <linux/nilfs2_fs.h>
29#include "mdt.h"
30#include "cpfile.h"
31
32
33static inline unsigned long
34nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
35{
36 return NILFS_MDT(cpfile)->mi_entries_per_block;
37}
38
39/* block number from the beginning of the file */
40static unsigned long
41nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
42{
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -070043 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
Koji Sato29619802009-04-06 19:01:31 -070044 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno;
46}
47
48/* offset in block */
49static unsigned long
50nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
51{
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
54}
55
56static unsigned long
57nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
58 __u64 curr,
59 __u64 max)
60{
61 return min_t(__u64,
62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr),
64 max - curr);
65}
66
67static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
68 __u64 cno)
69{
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
71}
72
73static unsigned int
74nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh,
76 void *kaddr,
77 unsigned int n)
78{
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
80 unsigned int count;
81
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count);
84 return count;
85}
86
87static unsigned int
88nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh,
90 void *kaddr,
91 unsigned int n)
92{
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
94 unsigned int count;
95
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -070096 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
Koji Sato29619802009-04-06 19:01:31 -070097 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count);
99 return count;
100}
101
102static inline struct nilfs_cpfile_header *
103nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh,
105 void *kaddr)
106{
107 return kaddr + bh_offset(bh);
108}
109
110static struct nilfs_checkpoint *
111nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh,
113 void *kaddr)
114{
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size;
117}
118
119static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh,
121 void *kaddr)
122{
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
126
127 while (n-- > 0) {
128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz;
130 }
131}
132
133static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp)
135{
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
137}
138
139static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
140 __u64 cno,
141 int create,
142 struct buffer_head **bhp)
143{
144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp);
147}
148
149static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
150 __u64 cno)
151{
152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno));
154}
155
156/**
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
163 *
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
167 *
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
172 *
173 * %-EIO - I/O error.
174 *
175 * %-ENOMEM - Insufficient amount of memory available.
176 *
177 * %-ENOENT - No such checkpoint.
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700178 *
179 * %-EINVAL - invalid checkpoint.
Koji Sato29619802009-04-06 19:01:31 -0700180 */
181int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
182 __u64 cno,
183 int create,
184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp)
186{
187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp;
190 void *kaddr;
191 int ret;
192
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create)))
195 return -EINVAL;
Koji Sato29619802009-04-06 19:01:31 -0700196
197 down_write(&NILFS_MDT(cpfile)->mi_sem);
198
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
200 if (ret < 0)
201 goto out_sem;
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
203 if (ret < 0)
204 goto out_header;
205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) {
208 if (!create) {
209 kunmap(cp_bh->b_page);
210 brelse(cp_bh);
211 ret = -ENOENT;
212 goto out_header;
213 }
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
218 kaddr, 1);
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900219 mark_buffer_dirty(cp_bh);
Koji Sato29619802009-04-06 19:01:31 -0700220
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900226 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700227 nilfs_mdt_mark_dirty(cpfile);
228 }
229
230 if (cpp != NULL)
231 *cpp = cp;
232 *bhp = cp_bh;
233
234 out_header:
235 brelse(header_bh);
236
237 out_sem:
238 up_write(&NILFS_MDT(cpfile)->mi_sem);
239 return ret;
240}
241
242/**
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
246 * @bh: buffer head
247 *
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
251 */
252void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh)
254{
255 kunmap(bh->b_page);
256 brelse(bh);
257}
258
259/**
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
264 *
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
268 *
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
271 *
272 * %-EIO - I/O error.
273 *
274 * %-ENOMEM - Insufficient amount of memory available.
275 *
276 * %-EINVAL - invalid checkpoints.
277 */
278int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
279 __u64 start,
280 __u64 end)
281{
282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
286 __u64 cno;
287 void *kaddr;
288 unsigned long tnicps;
289 int ret, ncps, nicps, count, i;
290
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end);
295 return -EINVAL;
Koji Sato29619802009-04-06 19:01:31 -0700296 }
297
Koji Sato29619802009-04-06 19:01:31 -0700298 down_write(&NILFS_MDT(cpfile)->mi_sem);
299
300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
301 if (ret < 0)
302 goto out_sem;
303 tnicps = 0;
304
305 for (cno = start; cno < end; cno += ncps) {
306 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
308 if (ret < 0) {
309 if (ret != -ENOENT)
Jiro SEKIBAd9a0a342009-07-04 23:00:53 +0900310 break;
Koji Sato29619802009-04-06 19:01:31 -0700311 /* skip hole */
312 ret = 0;
313 continue;
314 }
315
316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
317 cp = nilfs_cpfile_block_get_checkpoint(
318 cpfile, cno, cp_bh, kaddr);
319 nicps = 0;
320 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700321 WARN_ON(nilfs_checkpoint_snapshot(cp));
Koji Sato29619802009-04-06 19:01:31 -0700322 if (!nilfs_checkpoint_invalid(cp)) {
323 nilfs_checkpoint_set_invalid(cp);
324 nicps++;
325 }
326 }
327 if (nicps > 0) {
328 tnicps += nicps;
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900329 mark_buffer_dirty(cp_bh);
Koji Sato29619802009-04-06 19:01:31 -0700330 nilfs_mdt_mark_dirty(cpfile);
Jiro SEKIBA5ee58142009-12-06 15:43:56 +0900331 if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
332 count =
333 nilfs_cpfile_block_sub_valid_checkpoints(
334 cpfile, cp_bh, kaddr, nicps);
335 if (count == 0) {
336 /* make hole */
337 kunmap_atomic(kaddr, KM_USER0);
338 brelse(cp_bh);
339 ret =
340 nilfs_cpfile_delete_checkpoint_block(
341 cpfile, cno);
342 if (ret == 0)
343 continue;
344 printk(KERN_ERR
345 "%s: cannot delete block\n",
346 __func__);
347 break;
348 }
Koji Sato29619802009-04-06 19:01:31 -0700349 }
350 }
351
352 kunmap_atomic(kaddr, KM_USER0);
353 brelse(cp_bh);
354 }
355
356 if (tnicps > 0) {
357 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
358 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
359 kaddr);
Koji Sato6c98cd42009-04-06 19:01:32 -0700360 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900361 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700362 nilfs_mdt_mark_dirty(cpfile);
363 kunmap_atomic(kaddr, KM_USER0);
364 }
Ryusuke Konishi62013ab2009-05-30 21:50:58 +0900365
Koji Sato29619802009-04-06 19:01:31 -0700366 brelse(header_bh);
367
368 out_sem:
369 up_write(&NILFS_MDT(cpfile)->mi_sem);
370 return ret;
371}
372
373static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
374 struct nilfs_checkpoint *cp,
375 struct nilfs_cpinfo *ci)
376{
377 ci->ci_flags = le32_to_cpu(cp->cp_flags);
378 ci->ci_cno = le64_to_cpu(cp->cp_cno);
379 ci->ci_create = le64_to_cpu(cp->cp_create);
380 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
381 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
382 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
383 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
384}
385
Ryusuke Konishi76068c42009-04-06 19:01:50 -0700386static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900387 void *buf, unsigned cisz, size_t nci)
Koji Sato29619802009-04-06 19:01:31 -0700388{
389 struct nilfs_checkpoint *cp;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900390 struct nilfs_cpinfo *ci = buf;
Koji Sato29619802009-04-06 19:01:31 -0700391 struct buffer_head *bh;
392 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
Ryusuke Konishi76068c42009-04-06 19:01:50 -0700393 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
Koji Sato29619802009-04-06 19:01:31 -0700394 void *kaddr;
395 int n, ret;
396 int ncps, i;
397
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700398 if (cno == 0)
399 return -ENOENT; /* checkpoint number 0 is invalid */
Koji Sato29619802009-04-06 19:01:31 -0700400 down_read(&NILFS_MDT(cpfile)->mi_sem);
401
402 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
403 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
404 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
405 if (ret < 0) {
406 if (ret != -ENOENT)
407 goto out;
408 continue; /* skip hole */
409 }
410
411 kaddr = kmap_atomic(bh->b_page, KM_USER0);
412 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
413 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900414 if (!nilfs_checkpoint_invalid(cp)) {
415 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
416 ci);
417 ci = (void *)ci + cisz;
418 n++;
419 }
Koji Sato29619802009-04-06 19:01:31 -0700420 }
421 kunmap_atomic(kaddr, KM_USER0);
422 brelse(bh);
423 }
424
425 ret = n;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900426 if (n > 0) {
427 ci = (void *)ci - cisz;
428 *cnop = ci->ci_cno + 1;
429 }
Koji Sato29619802009-04-06 19:01:31 -0700430
431 out:
432 up_read(&NILFS_MDT(cpfile)->mi_sem);
433 return ret;
434}
435
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700436static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900437 void *buf, unsigned cisz, size_t nci)
Koji Sato29619802009-04-06 19:01:31 -0700438{
439 struct buffer_head *bh;
440 struct nilfs_cpfile_header *header;
441 struct nilfs_checkpoint *cp;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900442 struct nilfs_cpinfo *ci = buf;
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700443 __u64 curr = *cnop, next;
Koji Sato29619802009-04-06 19:01:31 -0700444 unsigned long curr_blkoff, next_blkoff;
445 void *kaddr;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700446 int n = 0, ret;
Koji Sato29619802009-04-06 19:01:31 -0700447
448 down_read(&NILFS_MDT(cpfile)->mi_sem);
449
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700450 if (curr == 0) {
Koji Sato29619802009-04-06 19:01:31 -0700451 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
452 if (ret < 0)
453 goto out;
454 kaddr = kmap_atomic(bh->b_page, KM_USER0);
455 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
456 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
457 kunmap_atomic(kaddr, KM_USER0);
458 brelse(bh);
459 if (curr == 0) {
460 ret = 0;
461 goto out;
462 }
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700463 } else if (unlikely(curr == ~(__u64)0)) {
464 ret = 0;
465 goto out;
466 }
467
Koji Sato29619802009-04-06 19:01:31 -0700468 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
469 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700470 if (unlikely(ret < 0)) {
471 if (ret == -ENOENT)
472 ret = 0; /* No snapshots (started from a hole block) */
Koji Sato29619802009-04-06 19:01:31 -0700473 goto out;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700474 }
Koji Sato29619802009-04-06 19:01:31 -0700475 kaddr = kmap_atomic(bh->b_page, KM_USER0);
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700476 while (n < nci) {
477 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
478 curr = ~(__u64)0; /* Terminator */
479 if (unlikely(nilfs_checkpoint_invalid(cp) ||
480 !nilfs_checkpoint_snapshot(cp)))
Koji Sato29619802009-04-06 19:01:31 -0700481 break;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900482 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
483 ci = (void *)ci + cisz;
484 n++;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700485 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
486 if (next == 0)
487 break; /* reach end of the snapshot list */
488
Koji Sato29619802009-04-06 19:01:31 -0700489 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
490 if (curr_blkoff != next_blkoff) {
491 kunmap_atomic(kaddr, KM_USER0);
492 brelse(bh);
493 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
494 0, &bh);
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700495 if (unlikely(ret < 0)) {
496 WARN_ON(ret == -ENOENT);
Koji Sato29619802009-04-06 19:01:31 -0700497 goto out;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700498 }
Koji Sato29619802009-04-06 19:01:31 -0700499 kaddr = kmap_atomic(bh->b_page, KM_USER0);
500 }
501 curr = next;
502 curr_blkoff = next_blkoff;
503 }
504 kunmap_atomic(kaddr, KM_USER0);
505 brelse(bh);
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700506 *cnop = curr;
Koji Sato29619802009-04-06 19:01:31 -0700507 ret = n;
508
509 out:
510 up_read(&NILFS_MDT(cpfile)->mi_sem);
511 return ret;
512}
513
514/**
515 * nilfs_cpfile_get_cpinfo -
516 * @cpfile:
517 * @cno:
518 * @ci:
519 * @nci:
520 */
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700521
522ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900523 void *buf, unsigned cisz, size_t nci)
Koji Sato29619802009-04-06 19:01:31 -0700524{
525 switch (mode) {
526 case NILFS_CHECKPOINT:
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900527 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
Koji Sato29619802009-04-06 19:01:31 -0700528 case NILFS_SNAPSHOT:
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900529 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
Koji Sato29619802009-04-06 19:01:31 -0700530 default:
531 return -EINVAL;
532 }
533}
534
535/**
536 * nilfs_cpfile_delete_checkpoint -
537 * @cpfile:
538 * @cno:
539 */
540int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
541{
542 struct nilfs_cpinfo ci;
Ryusuke Konishi76068c42009-04-06 19:01:50 -0700543 __u64 tcno = cno;
Koji Sato29619802009-04-06 19:01:31 -0700544 ssize_t nci;
Koji Sato29619802009-04-06 19:01:31 -0700545
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900546 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
Koji Sato29619802009-04-06 19:01:31 -0700547 if (nci < 0)
548 return nci;
549 else if (nci == 0 || ci.ci_cno != cno)
550 return -ENOENT;
Ryusuke Konishi30c25be2009-05-30 19:08:09 +0900551 else if (nilfs_cpinfo_snapshot(&ci))
552 return -EBUSY;
Koji Sato29619802009-04-06 19:01:31 -0700553
554 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
555}
556
557static struct nilfs_snapshot_list *
558nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
559 __u64 cno,
560 struct buffer_head *bh,
561 void *kaddr)
562{
563 struct nilfs_cpfile_header *header;
564 struct nilfs_checkpoint *cp;
565 struct nilfs_snapshot_list *list;
566
567 if (cno != 0) {
568 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
569 list = &cp->cp_snapshot_list;
570 } else {
571 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
572 list = &header->ch_snapshot_list;
573 }
574 return list;
575}
576
577static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
578{
579 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
580 struct nilfs_cpfile_header *header;
581 struct nilfs_checkpoint *cp;
582 struct nilfs_snapshot_list *list;
583 __u64 curr, prev;
584 unsigned long curr_blkoff, prev_blkoff;
585 void *kaddr;
586 int ret;
587
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700588 if (cno == 0)
589 return -ENOENT; /* checkpoint number 0 is invalid */
Koji Sato29619802009-04-06 19:01:31 -0700590 down_write(&NILFS_MDT(cpfile)->mi_sem);
591
592 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
593 if (ret < 0)
594 goto out_sem;
595 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
596 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
597 if (nilfs_checkpoint_invalid(cp)) {
598 ret = -ENOENT;
599 kunmap_atomic(kaddr, KM_USER0);
600 goto out_cp;
601 }
602 if (nilfs_checkpoint_snapshot(cp)) {
603 ret = 0;
604 kunmap_atomic(kaddr, KM_USER0);
605 goto out_cp;
606 }
607 kunmap_atomic(kaddr, KM_USER0);
608
609 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
610 if (ret < 0)
611 goto out_cp;
612 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
613 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
614 list = &header->ch_snapshot_list;
615 curr_bh = header_bh;
616 get_bh(curr_bh);
617 curr = 0;
618 curr_blkoff = 0;
619 prev = le64_to_cpu(list->ssl_prev);
620 while (prev > cno) {
621 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
622 curr = prev;
623 if (curr_blkoff != prev_blkoff) {
624 kunmap_atomic(kaddr, KM_USER0);
625 brelse(curr_bh);
626 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
627 0, &curr_bh);
628 if (ret < 0)
629 goto out_header;
630 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
631 }
632 curr_blkoff = prev_blkoff;
633 cp = nilfs_cpfile_block_get_checkpoint(
634 cpfile, curr, curr_bh, kaddr);
635 list = &cp->cp_snapshot_list;
636 prev = le64_to_cpu(list->ssl_prev);
637 }
638 kunmap_atomic(kaddr, KM_USER0);
639
640 if (prev != 0) {
641 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
642 &prev_bh);
643 if (ret < 0)
644 goto out_curr;
645 } else {
646 prev_bh = header_bh;
647 get_bh(prev_bh);
648 }
649
650 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
651 list = nilfs_cpfile_block_get_snapshot_list(
652 cpfile, curr, curr_bh, kaddr);
653 list->ssl_prev = cpu_to_le64(cno);
654 kunmap_atomic(kaddr, KM_USER0);
655
656 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
657 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
658 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
659 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
660 nilfs_checkpoint_set_snapshot(cp);
661 kunmap_atomic(kaddr, KM_USER0);
662
663 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
664 list = nilfs_cpfile_block_get_snapshot_list(
665 cpfile, prev, prev_bh, kaddr);
666 list->ssl_next = cpu_to_le64(cno);
667 kunmap_atomic(kaddr, KM_USER0);
668
669 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
670 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
671 le64_add_cpu(&header->ch_nsnapshots, 1);
672 kunmap_atomic(kaddr, KM_USER0);
673
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900674 mark_buffer_dirty(prev_bh);
675 mark_buffer_dirty(curr_bh);
676 mark_buffer_dirty(cp_bh);
677 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700678 nilfs_mdt_mark_dirty(cpfile);
679
680 brelse(prev_bh);
681
682 out_curr:
683 brelse(curr_bh);
684
685 out_header:
686 brelse(header_bh);
687
688 out_cp:
689 brelse(cp_bh);
690
691 out_sem:
692 up_write(&NILFS_MDT(cpfile)->mi_sem);
693 return ret;
694}
695
696static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
697{
698 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
699 struct nilfs_cpfile_header *header;
700 struct nilfs_checkpoint *cp;
701 struct nilfs_snapshot_list *list;
702 __u64 next, prev;
703 void *kaddr;
704 int ret;
705
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700706 if (cno == 0)
707 return -ENOENT; /* checkpoint number 0 is invalid */
Koji Sato29619802009-04-06 19:01:31 -0700708 down_write(&NILFS_MDT(cpfile)->mi_sem);
709
710 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
711 if (ret < 0)
712 goto out_sem;
713 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
714 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
715 if (nilfs_checkpoint_invalid(cp)) {
716 ret = -ENOENT;
717 kunmap_atomic(kaddr, KM_USER0);
718 goto out_cp;
719 }
720 if (!nilfs_checkpoint_snapshot(cp)) {
721 ret = 0;
722 kunmap_atomic(kaddr, KM_USER0);
723 goto out_cp;
724 }
725
726 list = &cp->cp_snapshot_list;
727 next = le64_to_cpu(list->ssl_next);
728 prev = le64_to_cpu(list->ssl_prev);
729 kunmap_atomic(kaddr, KM_USER0);
730
731 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
732 if (ret < 0)
733 goto out_cp;
734 if (next != 0) {
735 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
736 &next_bh);
737 if (ret < 0)
738 goto out_header;
739 } else {
740 next_bh = header_bh;
741 get_bh(next_bh);
742 }
743 if (prev != 0) {
744 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
745 &prev_bh);
746 if (ret < 0)
747 goto out_next;
748 } else {
749 prev_bh = header_bh;
750 get_bh(prev_bh);
751 }
752
753 kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
754 list = nilfs_cpfile_block_get_snapshot_list(
755 cpfile, next, next_bh, kaddr);
756 list->ssl_prev = cpu_to_le64(prev);
757 kunmap_atomic(kaddr, KM_USER0);
758
759 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
760 list = nilfs_cpfile_block_get_snapshot_list(
761 cpfile, prev, prev_bh, kaddr);
762 list->ssl_next = cpu_to_le64(next);
763 kunmap_atomic(kaddr, KM_USER0);
764
765 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
766 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
767 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
768 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
769 nilfs_checkpoint_clear_snapshot(cp);
770 kunmap_atomic(kaddr, KM_USER0);
771
772 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
773 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
774 le64_add_cpu(&header->ch_nsnapshots, -1);
775 kunmap_atomic(kaddr, KM_USER0);
776
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900777 mark_buffer_dirty(next_bh);
778 mark_buffer_dirty(prev_bh);
779 mark_buffer_dirty(cp_bh);
780 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700781 nilfs_mdt_mark_dirty(cpfile);
782
783 brelse(prev_bh);
784
785 out_next:
786 brelse(next_bh);
787
788 out_header:
789 brelse(header_bh);
790
791 out_cp:
792 brelse(cp_bh);
793
794 out_sem:
795 up_write(&NILFS_MDT(cpfile)->mi_sem);
796 return ret;
797}
798
799/**
800 * nilfs_cpfile_is_snapshot -
801 * @cpfile: inode of checkpoint file
802 * @cno: checkpoint number
803 *
804 * Description:
805 *
806 * Return Value: On success, 1 is returned if the checkpoint specified by
807 * @cno is a snapshot, or 0 if not. On error, one of the following negative
808 * error codes is returned.
809 *
810 * %-EIO - I/O error.
811 *
812 * %-ENOMEM - Insufficient amount of memory available.
813 *
814 * %-ENOENT - No such checkpoint.
815 */
816int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
817{
818 struct buffer_head *bh;
819 struct nilfs_checkpoint *cp;
820 void *kaddr;
821 int ret;
822
Zhu Yanhai43be0ec2009-08-12 14:17:59 +0800823 /* CP number is invalid if it's zero or larger than the
824 largest exist one.*/
825 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
826 return -ENOENT;
Koji Sato29619802009-04-06 19:01:31 -0700827 down_read(&NILFS_MDT(cpfile)->mi_sem);
828
829 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
830 if (ret < 0)
831 goto out;
832 kaddr = kmap_atomic(bh->b_page, KM_USER0);
833 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
Zhu Yanhai43be0ec2009-08-12 14:17:59 +0800834 if (nilfs_checkpoint_invalid(cp))
835 ret = -ENOENT;
836 else
837 ret = nilfs_checkpoint_snapshot(cp);
Koji Sato29619802009-04-06 19:01:31 -0700838 kunmap_atomic(kaddr, KM_USER0);
839 brelse(bh);
840
841 out:
842 up_read(&NILFS_MDT(cpfile)->mi_sem);
843 return ret;
844}
845
846/**
847 * nilfs_cpfile_change_cpmode - change checkpoint mode
848 * @cpfile: inode of checkpoint file
849 * @cno: checkpoint number
850 * @status: mode of checkpoint
851 *
852 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
853 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
854 *
855 * Return Value: On success, 0 is returned. On error, one of the following
856 * negative error codes is returned.
857 *
858 * %-EIO - I/O error.
859 *
860 * %-ENOMEM - Insufficient amount of memory available.
861 *
862 * %-ENOENT - No such checkpoint.
863 */
864int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
865{
Koji Sato29619802009-04-06 19:01:31 -0700866 int ret;
867
Koji Sato29619802009-04-06 19:01:31 -0700868 switch (mode) {
869 case NILFS_CHECKPOINT:
Ryusuke Konishi032dbb32010-09-13 11:16:34 +0900870 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
871 /*
872 * Current implementation does not have to protect
873 * plain read-only mounts since they are exclusive
874 * with a read/write mount and are protected from the
875 * cleaner.
876 */
Koji Sato29619802009-04-06 19:01:31 -0700877 ret = -EBUSY;
Ryusuke Konishi032dbb32010-09-13 11:16:34 +0900878 else
Koji Sato29619802009-04-06 19:01:31 -0700879 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
Koji Sato29619802009-04-06 19:01:31 -0700880 return ret;
881 case NILFS_SNAPSHOT:
882 return nilfs_cpfile_set_snapshot(cpfile, cno);
883 default:
884 return -EINVAL;
885 }
886}
887
888/**
889 * nilfs_cpfile_get_stat - get checkpoint statistics
890 * @cpfile: inode of checkpoint file
891 * @stat: pointer to a structure of checkpoint statistics
892 *
893 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
894 *
895 * Return Value: On success, 0 is returned, and checkpoints information is
896 * stored in the place pointed by @stat. On error, one of the following
897 * negative error codes is returned.
898 *
899 * %-EIO - I/O error.
900 *
901 * %-ENOMEM - Insufficient amount of memory available.
902 */
903int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
904{
905 struct buffer_head *bh;
906 struct nilfs_cpfile_header *header;
907 void *kaddr;
908 int ret;
909
910 down_read(&NILFS_MDT(cpfile)->mi_sem);
911
912 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
913 if (ret < 0)
914 goto out_sem;
915 kaddr = kmap_atomic(bh->b_page, KM_USER0);
916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
917 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
920 kunmap_atomic(kaddr, KM_USER0);
921 brelse(bh);
922
923 out_sem:
924 up_read(&NILFS_MDT(cpfile)->mi_sem);
925 return ret;
926}
Ryusuke Konishi79739562009-11-12 23:56:43 +0900927
928/**
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900929 * nilfs_cpfile_read - read or get cpfile inode
930 * @sb: super block instance
Ryusuke Konishi79739562009-11-12 23:56:43 +0900931 * @cpsize: size of a checkpoint entry
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900932 * @raw_inode: on-disk cpfile inode
933 * @inodep: buffer to store the inode
Ryusuke Konishi79739562009-11-12 23:56:43 +0900934 */
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900935int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
936 struct nilfs_inode *raw_inode, struct inode **inodep)
Ryusuke Konishi79739562009-11-12 23:56:43 +0900937{
938 struct inode *cpfile;
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900939 int err;
Ryusuke Konishi79739562009-11-12 23:56:43 +0900940
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900941 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
942 if (unlikely(!cpfile))
943 return -ENOMEM;
944 if (!(cpfile->i_state & I_NEW))
945 goto out;
946
947 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
948 if (err)
949 goto failed;
950
951 nilfs_mdt_set_entry_size(cpfile, cpsize,
952 sizeof(struct nilfs_cpfile_header));
953
954 err = nilfs_read_inode_common(cpfile, raw_inode);
955 if (err)
956 goto failed;
957
958 unlock_new_inode(cpfile);
959 out:
960 *inodep = cpfile;
961 return 0;
962 failed:
963 iget_failed(cpfile);
964 return err;
Ryusuke Konishi79739562009-11-12 23:56:43 +0900965}