blob: 8d41311b5db4b45b5a1536a10c90015e9e60e01e [file] [log] [blame]
Ryusuke Konishiae980432018-09-04 15:46:30 -07001// SPDX-License-Identifier: GPL-2.0+
Koji Sato29619802009-04-06 19:01:31 -07002/*
3 * cpfile.c - NILFS checkpoint file.
4 *
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 *
Ryusuke Konishi4b420ab2016-05-23 16:23:09 -07007 * Written by Koji Sato.
Koji Sato29619802009-04-06 19:01:31 -07008 */
9
10#include <linux/kernel.h>
11#include <linux/fs.h>
12#include <linux/string.h>
13#include <linux/buffer_head.h>
14#include <linux/errno.h>
Koji Sato29619802009-04-06 19:01:31 -070015#include "mdt.h"
16#include "cpfile.h"
17
18
19static inline unsigned long
20nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
21{
22 return NILFS_MDT(cpfile)->mi_entries_per_block;
23}
24
25/* block number from the beginning of the file */
26static unsigned long
27nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
28{
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -070029 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
Ryusuke Konishi4ad364c2016-05-23 16:23:25 -070030
Koji Sato29619802009-04-06 19:01:31 -070031 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
32 return (unsigned long)tcno;
33}
34
35/* offset in block */
36static unsigned long
37nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
38{
39 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
Ryusuke Konishi4ad364c2016-05-23 16:23:25 -070040
Koji Sato29619802009-04-06 19:01:31 -070041 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
42}
43
Ryusuke Konishi53a2c3b2015-04-16 12:46:42 -070044static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile,
45 unsigned long blkoff)
46{
47 return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff
48 + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset;
49}
50
Koji Sato29619802009-04-06 19:01:31 -070051static unsigned long
52nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
53 __u64 curr,
54 __u64 max)
55{
56 return min_t(__u64,
57 nilfs_cpfile_checkpoints_per_block(cpfile) -
58 nilfs_cpfile_get_offset(cpfile, curr),
59 max - curr);
60}
61
62static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
63 __u64 cno)
64{
65 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
66}
67
68static unsigned int
69nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
70 struct buffer_head *bh,
71 void *kaddr,
72 unsigned int n)
73{
74 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
75 unsigned int count;
76
77 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
78 cp->cp_checkpoints_count = cpu_to_le32(count);
79 return count;
80}
81
82static unsigned int
83nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
84 struct buffer_head *bh,
85 void *kaddr,
86 unsigned int n)
87{
88 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
89 unsigned int count;
90
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -070091 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
Koji Sato29619802009-04-06 19:01:31 -070092 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
93 cp->cp_checkpoints_count = cpu_to_le32(count);
94 return count;
95}
96
97static inline struct nilfs_cpfile_header *
98nilfs_cpfile_block_get_header(const struct inode *cpfile,
99 struct buffer_head *bh,
100 void *kaddr)
101{
102 return kaddr + bh_offset(bh);
103}
104
105static struct nilfs_checkpoint *
106nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
107 struct buffer_head *bh,
108 void *kaddr)
109{
110 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
111 NILFS_MDT(cpfile)->mi_entry_size;
112}
113
114static void nilfs_cpfile_block_init(struct inode *cpfile,
115 struct buffer_head *bh,
116 void *kaddr)
117{
118 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
119 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
120 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
121
122 while (n-- > 0) {
123 nilfs_checkpoint_set_invalid(cp);
124 cp = (void *)cp + cpsz;
125 }
126}
127
128static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
129 struct buffer_head **bhp)
130{
131 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
132}
133
134static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
135 __u64 cno,
136 int create,
137 struct buffer_head **bhp)
138{
139 return nilfs_mdt_get_block(cpfile,
140 nilfs_cpfile_get_blkoff(cpfile, cno),
141 create, nilfs_cpfile_block_init, bhp);
142}
143
Ryusuke Konishi53a2c3b2015-04-16 12:46:42 -0700144/**
145 * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile
146 * @cpfile: inode of cpfile
147 * @start_cno: start checkpoint number (inclusive)
148 * @end_cno: end checkpoint number (inclusive)
149 * @cnop: place to store the next checkpoint number
150 * @bhp: place to store a pointer to buffer_head struct
151 *
152 * Return Value: On success, it returns 0. On error, the following negative
153 * error code is returned.
154 *
155 * %-ENOMEM - Insufficient memory available.
156 *
157 * %-EIO - I/O error
158 *
159 * %-ENOENT - no block exists in the range.
160 */
161static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
162 __u64 start_cno, __u64 end_cno,
163 __u64 *cnop,
164 struct buffer_head **bhp)
165{
166 unsigned long start, end, blkoff;
167 int ret;
168
169 if (unlikely(start_cno > end_cno))
170 return -ENOENT;
171
172 start = nilfs_cpfile_get_blkoff(cpfile, start_cno);
173 end = nilfs_cpfile_get_blkoff(cpfile, end_cno);
174
175 ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp);
176 if (!ret)
177 *cnop = (blkoff == start) ? start_cno :
178 nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff);
179 return ret;
180}
181
Koji Sato29619802009-04-06 19:01:31 -0700182static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
183 __u64 cno)
184{
185 return nilfs_mdt_delete_block(cpfile,
186 nilfs_cpfile_get_blkoff(cpfile, cno));
187}
188
189/**
190 * nilfs_cpfile_get_checkpoint - get a checkpoint
191 * @cpfile: inode of checkpoint file
192 * @cno: checkpoint number
193 * @create: create flag
194 * @cpp: pointer to a checkpoint
195 * @bhp: pointer to a buffer head
196 *
197 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
198 * specified by @cno. A new checkpoint will be created if @cno is the current
199 * checkpoint number and @create is nonzero.
200 *
201 * Return Value: On success, 0 is returned, and the checkpoint and the
202 * buffer head of the buffer on which the checkpoint is located are stored in
203 * the place pointed by @cpp and @bhp, respectively. On error, one of the
204 * following negative error codes is returned.
205 *
206 * %-EIO - I/O error.
207 *
208 * %-ENOMEM - Insufficient amount of memory available.
209 *
210 * %-ENOENT - No such checkpoint.
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700211 *
212 * %-EINVAL - invalid checkpoint.
Koji Sato29619802009-04-06 19:01:31 -0700213 */
214int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
215 __u64 cno,
216 int create,
217 struct nilfs_checkpoint **cpp,
218 struct buffer_head **bhp)
219{
220 struct buffer_head *header_bh, *cp_bh;
221 struct nilfs_cpfile_header *header;
222 struct nilfs_checkpoint *cp;
223 void *kaddr;
224 int ret;
225
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700226 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
227 (cno < nilfs_mdt_cno(cpfile) && create)))
228 return -EINVAL;
Koji Sato29619802009-04-06 19:01:31 -0700229
230 down_write(&NILFS_MDT(cpfile)->mi_sem);
231
232 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
233 if (ret < 0)
234 goto out_sem;
235 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
236 if (ret < 0)
237 goto out_header;
238 kaddr = kmap(cp_bh->b_page);
239 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
240 if (nilfs_checkpoint_invalid(cp)) {
241 if (!create) {
242 kunmap(cp_bh->b_page);
243 brelse(cp_bh);
244 ret = -ENOENT;
245 goto out_header;
246 }
247 /* a newly-created checkpoint */
248 nilfs_checkpoint_clear_invalid(cp);
249 if (!nilfs_cpfile_is_in_first(cpfile, cno))
250 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
251 kaddr, 1);
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900252 mark_buffer_dirty(cp_bh);
Koji Sato29619802009-04-06 19:01:31 -0700253
Cong Wang7b9c0972011-11-25 23:14:33 +0800254 kaddr = kmap_atomic(header_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700255 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
256 kaddr);
257 le64_add_cpu(&header->ch_ncheckpoints, 1);
Cong Wang7b9c0972011-11-25 23:14:33 +0800258 kunmap_atomic(kaddr);
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900259 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700260 nilfs_mdt_mark_dirty(cpfile);
261 }
262
263 if (cpp != NULL)
264 *cpp = cp;
265 *bhp = cp_bh;
266
267 out_header:
268 brelse(header_bh);
269
270 out_sem:
271 up_write(&NILFS_MDT(cpfile)->mi_sem);
272 return ret;
273}
274
275/**
276 * nilfs_cpfile_put_checkpoint - put a checkpoint
277 * @cpfile: inode of checkpoint file
278 * @cno: checkpoint number
279 * @bh: buffer head
280 *
281 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
282 * specified by @cno. @bh must be the buffer head which has been returned by
283 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
284 */
285void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
286 struct buffer_head *bh)
287{
288 kunmap(bh->b_page);
289 brelse(bh);
290}
291
292/**
293 * nilfs_cpfile_delete_checkpoints - delete checkpoints
294 * @cpfile: inode of checkpoint file
295 * @start: start checkpoint number
296 * @end: end checkpoint numer
297 *
298 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
299 * the period from @start to @end, excluding @end itself. The checkpoints
300 * which have been already deleted are ignored.
301 *
302 * Return Value: On success, 0 is returned. On error, one of the following
303 * negative error codes is returned.
304 *
305 * %-EIO - I/O error.
306 *
307 * %-ENOMEM - Insufficient amount of memory available.
308 *
309 * %-EINVAL - invalid checkpoints.
310 */
311int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
312 __u64 start,
313 __u64 end)
314{
315 struct buffer_head *header_bh, *cp_bh;
316 struct nilfs_cpfile_header *header;
317 struct nilfs_checkpoint *cp;
318 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
319 __u64 cno;
320 void *kaddr;
321 unsigned long tnicps;
Ryusuke Konishife0627e2012-07-30 14:42:05 -0700322 int ret, ncps, nicps, nss, count, i;
Koji Sato29619802009-04-06 19:01:31 -0700323
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700324 if (unlikely(start == 0 || start > end)) {
Ryusuke Konishifeee8802016-08-02 14:05:10 -0700325 nilfs_msg(cpfile->i_sb, KERN_ERR,
326 "cannot delete checkpoints: invalid range [%llu, %llu)",
327 (unsigned long long)start, (unsigned long long)end);
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700328 return -EINVAL;
Koji Sato29619802009-04-06 19:01:31 -0700329 }
330
Koji Sato29619802009-04-06 19:01:31 -0700331 down_write(&NILFS_MDT(cpfile)->mi_sem);
332
333 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
334 if (ret < 0)
335 goto out_sem;
336 tnicps = 0;
Ryusuke Konishife0627e2012-07-30 14:42:05 -0700337 nss = 0;
Koji Sato29619802009-04-06 19:01:31 -0700338
339 for (cno = start; cno < end; cno += ncps) {
340 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
341 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
342 if (ret < 0) {
343 if (ret != -ENOENT)
Jiro SEKIBAd9a0a342009-07-04 23:00:53 +0900344 break;
Koji Sato29619802009-04-06 19:01:31 -0700345 /* skip hole */
346 ret = 0;
347 continue;
348 }
349
Cong Wang7b9c0972011-11-25 23:14:33 +0800350 kaddr = kmap_atomic(cp_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700351 cp = nilfs_cpfile_block_get_checkpoint(
352 cpfile, cno, cp_bh, kaddr);
353 nicps = 0;
354 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
Ryusuke Konishife0627e2012-07-30 14:42:05 -0700355 if (nilfs_checkpoint_snapshot(cp)) {
356 nss++;
357 } else if (!nilfs_checkpoint_invalid(cp)) {
Koji Sato29619802009-04-06 19:01:31 -0700358 nilfs_checkpoint_set_invalid(cp);
359 nicps++;
360 }
361 }
362 if (nicps > 0) {
363 tnicps += nicps;
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900364 mark_buffer_dirty(cp_bh);
Koji Sato29619802009-04-06 19:01:31 -0700365 nilfs_mdt_mark_dirty(cpfile);
Jiro SEKIBA5ee58142009-12-06 15:43:56 +0900366 if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
367 count =
368 nilfs_cpfile_block_sub_valid_checkpoints(
369 cpfile, cp_bh, kaddr, nicps);
370 if (count == 0) {
371 /* make hole */
Cong Wang7b9c0972011-11-25 23:14:33 +0800372 kunmap_atomic(kaddr);
Jiro SEKIBA5ee58142009-12-06 15:43:56 +0900373 brelse(cp_bh);
374 ret =
375 nilfs_cpfile_delete_checkpoint_block(
376 cpfile, cno);
377 if (ret == 0)
378 continue;
Ryusuke Konishifeee8802016-08-02 14:05:10 -0700379 nilfs_msg(cpfile->i_sb, KERN_ERR,
380 "error %d deleting checkpoint block",
381 ret);
Jiro SEKIBA5ee58142009-12-06 15:43:56 +0900382 break;
383 }
Koji Sato29619802009-04-06 19:01:31 -0700384 }
385 }
386
Cong Wang7b9c0972011-11-25 23:14:33 +0800387 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700388 brelse(cp_bh);
389 }
390
391 if (tnicps > 0) {
Cong Wang7b9c0972011-11-25 23:14:33 +0800392 kaddr = kmap_atomic(header_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700393 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
394 kaddr);
Koji Sato6c98cd42009-04-06 19:01:32 -0700395 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900396 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700397 nilfs_mdt_mark_dirty(cpfile);
Cong Wang7b9c0972011-11-25 23:14:33 +0800398 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700399 }
Ryusuke Konishi62013ab2009-05-30 21:50:58 +0900400
Koji Sato29619802009-04-06 19:01:31 -0700401 brelse(header_bh);
Ryusuke Konishife0627e2012-07-30 14:42:05 -0700402 if (nss > 0)
403 ret = -EBUSY;
Koji Sato29619802009-04-06 19:01:31 -0700404
405 out_sem:
406 up_write(&NILFS_MDT(cpfile)->mi_sem);
407 return ret;
408}
409
410static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
411 struct nilfs_checkpoint *cp,
412 struct nilfs_cpinfo *ci)
413{
414 ci->ci_flags = le32_to_cpu(cp->cp_flags);
415 ci->ci_cno = le64_to_cpu(cp->cp_cno);
416 ci->ci_create = le64_to_cpu(cp->cp_create);
417 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
418 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
419 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
420 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
421}
422
Ryusuke Konishi76068c42009-04-06 19:01:50 -0700423static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
Ryusuke Konishi0c6c44c2016-05-23 16:23:39 -0700424 void *buf, unsigned int cisz,
425 size_t nci)
Koji Sato29619802009-04-06 19:01:31 -0700426{
427 struct nilfs_checkpoint *cp;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900428 struct nilfs_cpinfo *ci = buf;
Koji Sato29619802009-04-06 19:01:31 -0700429 struct buffer_head *bh;
430 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
Ryusuke Konishi76068c42009-04-06 19:01:50 -0700431 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
Koji Sato29619802009-04-06 19:01:31 -0700432 void *kaddr;
433 int n, ret;
434 int ncps, i;
435
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700436 if (cno == 0)
437 return -ENOENT; /* checkpoint number 0 is invalid */
Koji Sato29619802009-04-06 19:01:31 -0700438 down_read(&NILFS_MDT(cpfile)->mi_sem);
439
Ryusuke Konishi53a2c3b2015-04-16 12:46:42 -0700440 for (n = 0; n < nci; cno += ncps) {
441 ret = nilfs_cpfile_find_checkpoint_block(
442 cpfile, cno, cur_cno - 1, &cno, &bh);
Koji Sato29619802009-04-06 19:01:31 -0700443 if (ret < 0) {
Ryusuke Konishi53a2c3b2015-04-16 12:46:42 -0700444 if (likely(ret == -ENOENT))
445 break;
446 goto out;
Koji Sato29619802009-04-06 19:01:31 -0700447 }
Ryusuke Konishi53a2c3b2015-04-16 12:46:42 -0700448 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
Koji Sato29619802009-04-06 19:01:31 -0700449
Cong Wang7b9c0972011-11-25 23:14:33 +0800450 kaddr = kmap_atomic(bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700451 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
452 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900453 if (!nilfs_checkpoint_invalid(cp)) {
454 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
455 ci);
456 ci = (void *)ci + cisz;
457 n++;
458 }
Koji Sato29619802009-04-06 19:01:31 -0700459 }
Cong Wang7b9c0972011-11-25 23:14:33 +0800460 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700461 brelse(bh);
462 }
463
464 ret = n;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900465 if (n > 0) {
466 ci = (void *)ci - cisz;
467 *cnop = ci->ci_cno + 1;
468 }
Koji Sato29619802009-04-06 19:01:31 -0700469
470 out:
471 up_read(&NILFS_MDT(cpfile)->mi_sem);
472 return ret;
473}
474
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700475static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
Ryusuke Konishi0c6c44c2016-05-23 16:23:39 -0700476 void *buf, unsigned int cisz,
477 size_t nci)
Koji Sato29619802009-04-06 19:01:31 -0700478{
479 struct buffer_head *bh;
480 struct nilfs_cpfile_header *header;
481 struct nilfs_checkpoint *cp;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900482 struct nilfs_cpinfo *ci = buf;
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700483 __u64 curr = *cnop, next;
Koji Sato29619802009-04-06 19:01:31 -0700484 unsigned long curr_blkoff, next_blkoff;
485 void *kaddr;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700486 int n = 0, ret;
Koji Sato29619802009-04-06 19:01:31 -0700487
488 down_read(&NILFS_MDT(cpfile)->mi_sem);
489
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700490 if (curr == 0) {
Koji Sato29619802009-04-06 19:01:31 -0700491 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
492 if (ret < 0)
493 goto out;
Cong Wang7b9c0972011-11-25 23:14:33 +0800494 kaddr = kmap_atomic(bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700495 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
496 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
Cong Wang7b9c0972011-11-25 23:14:33 +0800497 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700498 brelse(bh);
499 if (curr == 0) {
500 ret = 0;
501 goto out;
502 }
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700503 } else if (unlikely(curr == ~(__u64)0)) {
504 ret = 0;
505 goto out;
506 }
507
Koji Sato29619802009-04-06 19:01:31 -0700508 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
509 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700510 if (unlikely(ret < 0)) {
511 if (ret == -ENOENT)
512 ret = 0; /* No snapshots (started from a hole block) */
Koji Sato29619802009-04-06 19:01:31 -0700513 goto out;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700514 }
Cong Wang7b9c0972011-11-25 23:14:33 +0800515 kaddr = kmap_atomic(bh->b_page);
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700516 while (n < nci) {
517 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
518 curr = ~(__u64)0; /* Terminator */
519 if (unlikely(nilfs_checkpoint_invalid(cp) ||
520 !nilfs_checkpoint_snapshot(cp)))
Koji Sato29619802009-04-06 19:01:31 -0700521 break;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900522 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
523 ci = (void *)ci + cisz;
524 n++;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700525 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
526 if (next == 0)
527 break; /* reach end of the snapshot list */
528
Koji Sato29619802009-04-06 19:01:31 -0700529 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
530 if (curr_blkoff != next_blkoff) {
Cong Wang7b9c0972011-11-25 23:14:33 +0800531 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700532 brelse(bh);
533 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
534 0, &bh);
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700535 if (unlikely(ret < 0)) {
536 WARN_ON(ret == -ENOENT);
Koji Sato29619802009-04-06 19:01:31 -0700537 goto out;
Ryusuke Konishi7fa10d22009-04-06 19:01:48 -0700538 }
Cong Wang7b9c0972011-11-25 23:14:33 +0800539 kaddr = kmap_atomic(bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700540 }
541 curr = next;
542 curr_blkoff = next_blkoff;
543 }
Cong Wang7b9c0972011-11-25 23:14:33 +0800544 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700545 brelse(bh);
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700546 *cnop = curr;
Koji Sato29619802009-04-06 19:01:31 -0700547 ret = n;
548
549 out:
550 up_read(&NILFS_MDT(cpfile)->mi_sem);
551 return ret;
552}
553
554/**
555 * nilfs_cpfile_get_cpinfo -
556 * @cpfile:
557 * @cno:
558 * @ci:
559 * @nci:
560 */
Ryusuke Konishib028fcf2009-04-06 19:01:47 -0700561
562ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
Ryusuke Konishi0c6c44c2016-05-23 16:23:39 -0700563 void *buf, unsigned int cisz, size_t nci)
Koji Sato29619802009-04-06 19:01:31 -0700564{
565 switch (mode) {
566 case NILFS_CHECKPOINT:
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900567 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
Koji Sato29619802009-04-06 19:01:31 -0700568 case NILFS_SNAPSHOT:
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900569 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
Koji Sato29619802009-04-06 19:01:31 -0700570 default:
571 return -EINVAL;
572 }
573}
574
575/**
576 * nilfs_cpfile_delete_checkpoint -
577 * @cpfile:
578 * @cno:
579 */
580int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
581{
582 struct nilfs_cpinfo ci;
Ryusuke Konishi76068c42009-04-06 19:01:50 -0700583 __u64 tcno = cno;
Koji Sato29619802009-04-06 19:01:31 -0700584 ssize_t nci;
Koji Sato29619802009-04-06 19:01:31 -0700585
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900586 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
Koji Sato29619802009-04-06 19:01:31 -0700587 if (nci < 0)
588 return nci;
589 else if (nci == 0 || ci.ci_cno != cno)
590 return -ENOENT;
Ryusuke Konishi30c25be2009-05-30 19:08:09 +0900591 else if (nilfs_cpinfo_snapshot(&ci))
592 return -EBUSY;
Koji Sato29619802009-04-06 19:01:31 -0700593
594 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
595}
596
597static struct nilfs_snapshot_list *
598nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
599 __u64 cno,
600 struct buffer_head *bh,
601 void *kaddr)
602{
603 struct nilfs_cpfile_header *header;
604 struct nilfs_checkpoint *cp;
605 struct nilfs_snapshot_list *list;
606
607 if (cno != 0) {
608 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
609 list = &cp->cp_snapshot_list;
610 } else {
611 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
612 list = &header->ch_snapshot_list;
613 }
614 return list;
615}
616
617static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
618{
619 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
620 struct nilfs_cpfile_header *header;
621 struct nilfs_checkpoint *cp;
622 struct nilfs_snapshot_list *list;
623 __u64 curr, prev;
624 unsigned long curr_blkoff, prev_blkoff;
625 void *kaddr;
626 int ret;
627
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700628 if (cno == 0)
629 return -ENOENT; /* checkpoint number 0 is invalid */
Koji Sato29619802009-04-06 19:01:31 -0700630 down_write(&NILFS_MDT(cpfile)->mi_sem);
631
632 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
633 if (ret < 0)
634 goto out_sem;
Cong Wang7b9c0972011-11-25 23:14:33 +0800635 kaddr = kmap_atomic(cp_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700636 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
637 if (nilfs_checkpoint_invalid(cp)) {
638 ret = -ENOENT;
Cong Wang7b9c0972011-11-25 23:14:33 +0800639 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700640 goto out_cp;
641 }
642 if (nilfs_checkpoint_snapshot(cp)) {
643 ret = 0;
Cong Wang7b9c0972011-11-25 23:14:33 +0800644 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700645 goto out_cp;
646 }
Cong Wang7b9c0972011-11-25 23:14:33 +0800647 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700648
649 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
650 if (ret < 0)
651 goto out_cp;
Cong Wang7b9c0972011-11-25 23:14:33 +0800652 kaddr = kmap_atomic(header_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700653 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
654 list = &header->ch_snapshot_list;
655 curr_bh = header_bh;
656 get_bh(curr_bh);
657 curr = 0;
658 curr_blkoff = 0;
659 prev = le64_to_cpu(list->ssl_prev);
660 while (prev > cno) {
661 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
662 curr = prev;
663 if (curr_blkoff != prev_blkoff) {
Cong Wang7b9c0972011-11-25 23:14:33 +0800664 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700665 brelse(curr_bh);
666 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
667 0, &curr_bh);
668 if (ret < 0)
669 goto out_header;
Cong Wang7b9c0972011-11-25 23:14:33 +0800670 kaddr = kmap_atomic(curr_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700671 }
672 curr_blkoff = prev_blkoff;
673 cp = nilfs_cpfile_block_get_checkpoint(
674 cpfile, curr, curr_bh, kaddr);
675 list = &cp->cp_snapshot_list;
676 prev = le64_to_cpu(list->ssl_prev);
677 }
Cong Wang7b9c0972011-11-25 23:14:33 +0800678 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700679
680 if (prev != 0) {
681 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
682 &prev_bh);
683 if (ret < 0)
684 goto out_curr;
685 } else {
686 prev_bh = header_bh;
687 get_bh(prev_bh);
688 }
689
Cong Wang7b9c0972011-11-25 23:14:33 +0800690 kaddr = kmap_atomic(curr_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700691 list = nilfs_cpfile_block_get_snapshot_list(
692 cpfile, curr, curr_bh, kaddr);
693 list->ssl_prev = cpu_to_le64(cno);
Cong Wang7b9c0972011-11-25 23:14:33 +0800694 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700695
Cong Wang7b9c0972011-11-25 23:14:33 +0800696 kaddr = kmap_atomic(cp_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700697 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
698 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
699 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
700 nilfs_checkpoint_set_snapshot(cp);
Cong Wang7b9c0972011-11-25 23:14:33 +0800701 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700702
Cong Wang7b9c0972011-11-25 23:14:33 +0800703 kaddr = kmap_atomic(prev_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700704 list = nilfs_cpfile_block_get_snapshot_list(
705 cpfile, prev, prev_bh, kaddr);
706 list->ssl_next = cpu_to_le64(cno);
Cong Wang7b9c0972011-11-25 23:14:33 +0800707 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700708
Cong Wang7b9c0972011-11-25 23:14:33 +0800709 kaddr = kmap_atomic(header_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700710 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
711 le64_add_cpu(&header->ch_nsnapshots, 1);
Cong Wang7b9c0972011-11-25 23:14:33 +0800712 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700713
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900714 mark_buffer_dirty(prev_bh);
715 mark_buffer_dirty(curr_bh);
716 mark_buffer_dirty(cp_bh);
717 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700718 nilfs_mdt_mark_dirty(cpfile);
719
720 brelse(prev_bh);
721
722 out_curr:
723 brelse(curr_bh);
724
725 out_header:
726 brelse(header_bh);
727
728 out_cp:
729 brelse(cp_bh);
730
731 out_sem:
732 up_write(&NILFS_MDT(cpfile)->mi_sem);
733 return ret;
734}
735
736static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
737{
738 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
739 struct nilfs_cpfile_header *header;
740 struct nilfs_checkpoint *cp;
741 struct nilfs_snapshot_list *list;
742 __u64 next, prev;
743 void *kaddr;
744 int ret;
745
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700746 if (cno == 0)
747 return -ENOENT; /* checkpoint number 0 is invalid */
Koji Sato29619802009-04-06 19:01:31 -0700748 down_write(&NILFS_MDT(cpfile)->mi_sem);
749
750 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
751 if (ret < 0)
752 goto out_sem;
Cong Wang7b9c0972011-11-25 23:14:33 +0800753 kaddr = kmap_atomic(cp_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700754 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
755 if (nilfs_checkpoint_invalid(cp)) {
756 ret = -ENOENT;
Cong Wang7b9c0972011-11-25 23:14:33 +0800757 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700758 goto out_cp;
759 }
760 if (!nilfs_checkpoint_snapshot(cp)) {
761 ret = 0;
Cong Wang7b9c0972011-11-25 23:14:33 +0800762 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700763 goto out_cp;
764 }
765
766 list = &cp->cp_snapshot_list;
767 next = le64_to_cpu(list->ssl_next);
768 prev = le64_to_cpu(list->ssl_prev);
Cong Wang7b9c0972011-11-25 23:14:33 +0800769 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700770
771 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
772 if (ret < 0)
773 goto out_cp;
774 if (next != 0) {
775 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
776 &next_bh);
777 if (ret < 0)
778 goto out_header;
779 } else {
780 next_bh = header_bh;
781 get_bh(next_bh);
782 }
783 if (prev != 0) {
784 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
785 &prev_bh);
786 if (ret < 0)
787 goto out_next;
788 } else {
789 prev_bh = header_bh;
790 get_bh(prev_bh);
791 }
792
Cong Wang7b9c0972011-11-25 23:14:33 +0800793 kaddr = kmap_atomic(next_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700794 list = nilfs_cpfile_block_get_snapshot_list(
795 cpfile, next, next_bh, kaddr);
796 list->ssl_prev = cpu_to_le64(prev);
Cong Wang7b9c0972011-11-25 23:14:33 +0800797 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700798
Cong Wang7b9c0972011-11-25 23:14:33 +0800799 kaddr = kmap_atomic(prev_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700800 list = nilfs_cpfile_block_get_snapshot_list(
801 cpfile, prev, prev_bh, kaddr);
802 list->ssl_next = cpu_to_le64(next);
Cong Wang7b9c0972011-11-25 23:14:33 +0800803 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700804
Cong Wang7b9c0972011-11-25 23:14:33 +0800805 kaddr = kmap_atomic(cp_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700806 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
807 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
808 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
809 nilfs_checkpoint_clear_snapshot(cp);
Cong Wang7b9c0972011-11-25 23:14:33 +0800810 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700811
Cong Wang7b9c0972011-11-25 23:14:33 +0800812 kaddr = kmap_atomic(header_bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700813 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
814 le64_add_cpu(&header->ch_nsnapshots, -1);
Cong Wang7b9c0972011-11-25 23:14:33 +0800815 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700816
Ryusuke Konishi5fc7b142011-05-05 12:56:51 +0900817 mark_buffer_dirty(next_bh);
818 mark_buffer_dirty(prev_bh);
819 mark_buffer_dirty(cp_bh);
820 mark_buffer_dirty(header_bh);
Koji Sato29619802009-04-06 19:01:31 -0700821 nilfs_mdt_mark_dirty(cpfile);
822
823 brelse(prev_bh);
824
825 out_next:
826 brelse(next_bh);
827
828 out_header:
829 brelse(header_bh);
830
831 out_cp:
832 brelse(cp_bh);
833
834 out_sem:
835 up_write(&NILFS_MDT(cpfile)->mi_sem);
836 return ret;
837}
838
839/**
840 * nilfs_cpfile_is_snapshot -
841 * @cpfile: inode of checkpoint file
842 * @cno: checkpoint number
843 *
844 * Description:
845 *
846 * Return Value: On success, 1 is returned if the checkpoint specified by
847 * @cno is a snapshot, or 0 if not. On error, one of the following negative
848 * error codes is returned.
849 *
850 * %-EIO - I/O error.
851 *
852 * %-ENOMEM - Insufficient amount of memory available.
853 *
854 * %-ENOENT - No such checkpoint.
855 */
856int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
857{
858 struct buffer_head *bh;
859 struct nilfs_checkpoint *cp;
860 void *kaddr;
861 int ret;
862
Ryusuke Konishi076a3782016-05-23 16:23:48 -0700863 /*
864 * CP number is invalid if it's zero or larger than the
865 * largest existing one.
866 */
Zhu Yanhai43be0ec2009-08-12 14:17:59 +0800867 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
868 return -ENOENT;
Koji Sato29619802009-04-06 19:01:31 -0700869 down_read(&NILFS_MDT(cpfile)->mi_sem);
870
871 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
872 if (ret < 0)
873 goto out;
Cong Wang7b9c0972011-11-25 23:14:33 +0800874 kaddr = kmap_atomic(bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700875 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
Zhu Yanhai43be0ec2009-08-12 14:17:59 +0800876 if (nilfs_checkpoint_invalid(cp))
877 ret = -ENOENT;
878 else
879 ret = nilfs_checkpoint_snapshot(cp);
Cong Wang7b9c0972011-11-25 23:14:33 +0800880 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700881 brelse(bh);
882
883 out:
884 up_read(&NILFS_MDT(cpfile)->mi_sem);
885 return ret;
886}
887
888/**
889 * nilfs_cpfile_change_cpmode - change checkpoint mode
890 * @cpfile: inode of checkpoint file
891 * @cno: checkpoint number
892 * @status: mode of checkpoint
893 *
894 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
895 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
896 *
897 * Return Value: On success, 0 is returned. On error, one of the following
898 * negative error codes is returned.
899 *
900 * %-EIO - I/O error.
901 *
902 * %-ENOMEM - Insufficient amount of memory available.
903 *
904 * %-ENOENT - No such checkpoint.
905 */
906int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
907{
Koji Sato29619802009-04-06 19:01:31 -0700908 int ret;
909
Koji Sato29619802009-04-06 19:01:31 -0700910 switch (mode) {
911 case NILFS_CHECKPOINT:
Ryusuke Konishi032dbb32010-09-13 11:16:34 +0900912 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
913 /*
914 * Current implementation does not have to protect
915 * plain read-only mounts since they are exclusive
916 * with a read/write mount and are protected from the
917 * cleaner.
918 */
Koji Sato29619802009-04-06 19:01:31 -0700919 ret = -EBUSY;
Ryusuke Konishi032dbb32010-09-13 11:16:34 +0900920 else
Koji Sato29619802009-04-06 19:01:31 -0700921 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
Koji Sato29619802009-04-06 19:01:31 -0700922 return ret;
923 case NILFS_SNAPSHOT:
924 return nilfs_cpfile_set_snapshot(cpfile, cno);
925 default:
926 return -EINVAL;
927 }
928}
929
930/**
931 * nilfs_cpfile_get_stat - get checkpoint statistics
932 * @cpfile: inode of checkpoint file
933 * @stat: pointer to a structure of checkpoint statistics
934 *
935 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
936 *
937 * Return Value: On success, 0 is returned, and checkpoints information is
938 * stored in the place pointed by @stat. On error, one of the following
939 * negative error codes is returned.
940 *
941 * %-EIO - I/O error.
942 *
943 * %-ENOMEM - Insufficient amount of memory available.
944 */
945int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
946{
947 struct buffer_head *bh;
948 struct nilfs_cpfile_header *header;
949 void *kaddr;
950 int ret;
951
952 down_read(&NILFS_MDT(cpfile)->mi_sem);
953
954 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
955 if (ret < 0)
956 goto out_sem;
Cong Wang7b9c0972011-11-25 23:14:33 +0800957 kaddr = kmap_atomic(bh->b_page);
Koji Sato29619802009-04-06 19:01:31 -0700958 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
959 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
960 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
961 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
Cong Wang7b9c0972011-11-25 23:14:33 +0800962 kunmap_atomic(kaddr);
Koji Sato29619802009-04-06 19:01:31 -0700963 brelse(bh);
964
965 out_sem:
966 up_read(&NILFS_MDT(cpfile)->mi_sem);
967 return ret;
968}
Ryusuke Konishi79739562009-11-12 23:56:43 +0900969
970/**
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900971 * nilfs_cpfile_read - read or get cpfile inode
972 * @sb: super block instance
Ryusuke Konishi79739562009-11-12 23:56:43 +0900973 * @cpsize: size of a checkpoint entry
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900974 * @raw_inode: on-disk cpfile inode
975 * @inodep: buffer to store the inode
Ryusuke Konishi79739562009-11-12 23:56:43 +0900976 */
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900977int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
978 struct nilfs_inode *raw_inode, struct inode **inodep)
Ryusuke Konishi79739562009-11-12 23:56:43 +0900979{
980 struct inode *cpfile;
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900981 int err;
Ryusuke Konishi79739562009-11-12 23:56:43 +0900982
Ryusuke Konishi0ec060d2014-04-03 14:50:31 -0700983 if (cpsize > sb->s_blocksize) {
Ryusuke Konishifeee8802016-08-02 14:05:10 -0700984 nilfs_msg(sb, KERN_ERR,
985 "too large checkpoint size: %zu bytes", cpsize);
Ryusuke Konishi0ec060d2014-04-03 14:50:31 -0700986 return -EINVAL;
987 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
Ryusuke Konishifeee8802016-08-02 14:05:10 -0700988 nilfs_msg(sb, KERN_ERR,
989 "too small checkpoint size: %zu bytes", cpsize);
Ryusuke Konishi0ec060d2014-04-03 14:50:31 -0700990 return -EINVAL;
991 }
992
Ryusuke Konishif1e89c82010-09-05 12:20:59 +0900993 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
994 if (unlikely(!cpfile))
995 return -ENOMEM;
996 if (!(cpfile->i_state & I_NEW))
997 goto out;
998
999 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
1000 if (err)
1001 goto failed;
1002
1003 nilfs_mdt_set_entry_size(cpfile, cpsize,
1004 sizeof(struct nilfs_cpfile_header));
1005
1006 err = nilfs_read_inode_common(cpfile, raw_inode);
1007 if (err)
1008 goto failed;
1009
1010 unlock_new_inode(cpfile);
1011 out:
1012 *inodep = cpfile;
1013 return 0;
1014 failed:
1015 iget_failed(cpfile);
1016 return err;
Ryusuke Konishi79739562009-11-12 23:56:43 +09001017}