blob: da127325fdaa26dd0ff4fb3d7af2a5009c9b6f15 [file] [log] [blame]
Koji Sato6c98cd42009-04-06 19:01:32 -07001/*
2 * sufile.c - NILFS segment usage file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
Ryusuke Konishidda54f42009-05-16 21:49:10 +090021 * Rivised by Ryusuke Konishi <ryusuke@osrg.net>.
Koji Sato6c98cd42009-04-06 19:01:32 -070022 */
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/errno.h>
29#include <linux/nilfs2_fs.h>
30#include "mdt.h"
31#include "sufile.h"
32
33
34static inline unsigned long
35nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36{
37 return NILFS_MDT(sufile)->mi_entries_per_block;
38}
39
40static unsigned long
41nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
42{
43 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
44 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
45 return (unsigned long)t;
46}
47
48static unsigned long
49nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
50{
51 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
52 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
53}
54
55static unsigned long
56nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
57 __u64 max)
58{
59 return min_t(unsigned long,
60 nilfs_sufile_segment_usages_per_block(sufile) -
61 nilfs_sufile_get_offset(sufile, curr),
62 max - curr + 1);
63}
64
65static inline struct nilfs_sufile_header *
66nilfs_sufile_block_get_header(const struct inode *sufile,
67 struct buffer_head *bh,
68 void *kaddr)
69{
70 return kaddr + bh_offset(bh);
71}
72
73static struct nilfs_segment_usage *
74nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 struct buffer_head *bh, void *kaddr)
76{
77 return kaddr + bh_offset(bh) +
78 nilfs_sufile_get_offset(sufile, segnum) *
79 NILFS_MDT(sufile)->mi_entry_size;
80}
81
82static inline int nilfs_sufile_get_header_block(struct inode *sufile,
83 struct buffer_head **bhp)
84{
85 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
86}
87
88static inline int
89nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
90 int create, struct buffer_head **bhp)
91{
92 return nilfs_mdt_get_block(sufile,
93 nilfs_sufile_get_blkoff(sufile, segnum),
94 create, NULL, bhp);
95}
96
Ryusuke Konishia7030182009-04-05 18:24:11 +090097static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
98 u64 ncleanadd, u64 ndirtyadd)
99{
100 struct nilfs_sufile_header *header;
101 void *kaddr;
102
103 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
104 header = kaddr + bh_offset(header_bh);
105 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
106 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
107 kunmap_atomic(kaddr, KM_USER0);
108
109 nilfs_mdt_mark_buffer_dirty(header_bh);
110}
111
Ryusuke Konishidda54f42009-05-16 21:49:10 +0900112/**
113 * nilfs_sufile_updatev - modify multiple segment usages at a time
114 * @sufile: inode of segment usage file
115 * @segnumv: array of segment numbers
116 * @nsegs: size of @segnumv array
117 * @create: creation flag
118 * @ndone: place to store number of modified segments on @segnumv
119 * @dofunc: primitive operation for the update
120 *
121 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
122 * against the given array of segments. The @dofunc is called with
123 * buffers of a header block and the sufile block in which the target
124 * segment usage entry is contained. If @ndone is given, the number
125 * of successfully modified segments from the head is stored in the
126 * place @ndone points to.
127 *
128 * Return Value: On success, zero is returned. On error, one of the
129 * following negative error codes is returned.
130 *
131 * %-EIO - I/O error.
132 *
133 * %-ENOMEM - Insufficient amount of memory available.
134 *
135 * %-ENOENT - Given segment usage is in hole block (may be returned if
136 * @create is zero)
137 *
138 * %-EINVAL - Invalid segment usage number
139 */
140int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
141 int create, size_t *ndone,
142 void (*dofunc)(struct inode *, __u64,
143 struct buffer_head *,
144 struct buffer_head *))
145{
146 struct buffer_head *header_bh, *bh;
147 unsigned long blkoff, prev_blkoff;
148 __u64 *seg;
149 size_t nerr = 0, n = 0;
150 int ret = 0;
151
152 if (unlikely(nsegs == 0))
153 goto out;
154
155 down_write(&NILFS_MDT(sufile)->mi_sem);
156 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
157 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
158 printk(KERN_WARNING
159 "%s: invalid segment number: %llu\n", __func__,
160 (unsigned long long)*seg);
161 nerr++;
162 }
163 }
164 if (nerr > 0) {
165 ret = -EINVAL;
166 goto out_sem;
167 }
168
169 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
170 if (ret < 0)
171 goto out_sem;
172
173 seg = segnumv;
174 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
175 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
176 if (ret < 0)
177 goto out_header;
178
179 for (;;) {
180 dofunc(sufile, *seg, header_bh, bh);
181
182 if (++seg >= segnumv + nsegs)
183 break;
184 prev_blkoff = blkoff;
185 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
186 if (blkoff == prev_blkoff)
187 continue;
188
189 /* get different block */
190 brelse(bh);
191 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
192 if (unlikely(ret < 0))
193 goto out_header;
194 }
195 brelse(bh);
196
197 out_header:
198 n = seg - segnumv;
199 brelse(header_bh);
200 out_sem:
201 up_write(&NILFS_MDT(sufile)->mi_sem);
202 out:
203 if (ndone)
204 *ndone = n;
205 return ret;
206}
207
Ryusuke Konishia7030182009-04-05 18:24:11 +0900208int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
209 void (*dofunc)(struct inode *, __u64,
210 struct buffer_head *,
211 struct buffer_head *))
212{
213 struct buffer_head *header_bh, *bh;
214 int ret;
215
216 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
217 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
218 __func__, (unsigned long long)segnum);
219 return -EINVAL;
220 }
221 down_write(&NILFS_MDT(sufile)->mi_sem);
222
223 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
224 if (ret < 0)
225 goto out_sem;
226
227 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
228 if (!ret) {
229 dofunc(sufile, segnum, header_bh, bh);
230 brelse(bh);
231 }
232 brelse(header_bh);
233
234 out_sem:
235 up_write(&NILFS_MDT(sufile)->mi_sem);
236 return ret;
237}
238
Koji Sato6c98cd42009-04-06 19:01:32 -0700239/**
240 * nilfs_sufile_alloc - allocate a segment
241 * @sufile: inode of segment usage file
242 * @segnump: pointer to segment number
243 *
244 * Description: nilfs_sufile_alloc() allocates a clean segment.
245 *
246 * Return Value: On success, 0 is returned and the segment number of the
247 * allocated segment is stored in the place pointed by @segnump. On error, one
248 * of the following negative error codes is returned.
249 *
250 * %-EIO - I/O error.
251 *
252 * %-ENOMEM - Insufficient amount of memory available.
253 *
254 * %-ENOSPC - No clean segment left.
255 */
256int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
257{
258 struct buffer_head *header_bh, *su_bh;
Koji Sato6c98cd42009-04-06 19:01:32 -0700259 struct nilfs_sufile_header *header;
260 struct nilfs_segment_usage *su;
261 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
262 __u64 segnum, maxsegnum, last_alloc;
263 void *kaddr;
264 unsigned long nsegments, ncleansegs, nsus;
265 int ret, i, j;
266
267 down_write(&NILFS_MDT(sufile)->mi_sem);
268
Koji Sato6c98cd42009-04-06 19:01:32 -0700269 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
270 if (ret < 0)
271 goto out_sem;
272 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
273 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
274 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
275 last_alloc = le64_to_cpu(header->sh_last_alloc);
276 kunmap_atomic(kaddr, KM_USER0);
277
278 nsegments = nilfs_sufile_get_nsegments(sufile);
279 segnum = last_alloc + 1;
280 maxsegnum = nsegments - 1;
281 for (i = 0; i < nsegments; i += nsus) {
282 if (segnum >= nsegments) {
283 /* wrap around */
284 segnum = 0;
285 maxsegnum = last_alloc;
286 }
287 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
288 &su_bh);
289 if (ret < 0)
290 goto out_header;
291 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
292 su = nilfs_sufile_block_get_segment_usage(
293 sufile, segnum, su_bh, kaddr);
294
295 nsus = nilfs_sufile_segment_usages_in_block(
296 sufile, segnum, maxsegnum);
297 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
298 if (!nilfs_segment_usage_clean(su))
299 continue;
300 /* found a clean segment */
Koji Sato6c98cd42009-04-06 19:01:32 -0700301 nilfs_segment_usage_set_dirty(su);
302 kunmap_atomic(kaddr, KM_USER0);
303
304 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
305 header = nilfs_sufile_block_get_header(
306 sufile, header_bh, kaddr);
307 le64_add_cpu(&header->sh_ncleansegs, -1);
308 le64_add_cpu(&header->sh_ndirtysegs, 1);
309 header->sh_last_alloc = cpu_to_le64(segnum);
310 kunmap_atomic(kaddr, KM_USER0);
311
312 nilfs_mdt_mark_buffer_dirty(header_bh);
313 nilfs_mdt_mark_buffer_dirty(su_bh);
314 nilfs_mdt_mark_dirty(sufile);
315 brelse(su_bh);
316 *segnump = segnum;
317 goto out_header;
318 }
319
320 kunmap_atomic(kaddr, KM_USER0);
321 brelse(su_bh);
322 }
323
324 /* no segments left */
325 ret = -ENOSPC;
326
327 out_header:
328 brelse(header_bh);
329
330 out_sem:
331 up_write(&NILFS_MDT(sufile)->mi_sem);
332 return ret;
333}
334
Ryusuke Konishia7030182009-04-05 18:24:11 +0900335void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
336 struct buffer_head *header_bh,
337 struct buffer_head *su_bh)
Koji Sato6c98cd42009-04-06 19:01:32 -0700338{
Koji Sato6c98cd42009-04-06 19:01:32 -0700339 struct nilfs_segment_usage *su;
340 void *kaddr;
Koji Sato6c98cd42009-04-06 19:01:32 -0700341
342 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900343 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700344 if (unlikely(!nilfs_segment_usage_clean(su))) {
345 printk(KERN_WARNING "%s: segment %llu must be clean\n",
Koji Sato6c98cd42009-04-06 19:01:32 -0700346 __func__, (unsigned long long)segnum);
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700347 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900348 return;
Koji Sato6c98cd42009-04-06 19:01:32 -0700349 }
350 nilfs_segment_usage_set_dirty(su);
351 kunmap_atomic(kaddr, KM_USER0);
352
Ryusuke Konishia7030182009-04-05 18:24:11 +0900353 nilfs_sufile_mod_counter(header_bh, -1, 1);
Koji Sato6c98cd42009-04-06 19:01:32 -0700354 nilfs_mdt_mark_buffer_dirty(su_bh);
355 nilfs_mdt_mark_dirty(sufile);
Koji Sato6c98cd42009-04-06 19:01:32 -0700356}
357
Ryusuke Konishic85399c2009-04-05 18:30:58 +0900358void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
359 struct buffer_head *header_bh,
360 struct buffer_head *su_bh)
361{
362 struct nilfs_segment_usage *su;
363 void *kaddr;
364 int clean, dirty;
365
366 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
367 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
368 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
369 su->su_nblocks == cpu_to_le32(0)) {
370 kunmap_atomic(kaddr, KM_USER0);
371 return;
372 }
373 clean = nilfs_segment_usage_clean(su);
374 dirty = nilfs_segment_usage_dirty(su);
375
376 /* make the segment garbage */
377 su->su_lastmod = cpu_to_le64(0);
378 su->su_nblocks = cpu_to_le32(0);
379 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
380 kunmap_atomic(kaddr, KM_USER0);
381
382 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
383 nilfs_mdt_mark_buffer_dirty(su_bh);
384 nilfs_mdt_mark_dirty(sufile);
385}
386
Ryusuke Konishia7030182009-04-05 18:24:11 +0900387void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
388 struct buffer_head *header_bh,
389 struct buffer_head *su_bh)
Koji Sato6c98cd42009-04-06 19:01:32 -0700390{
Koji Sato6c98cd42009-04-06 19:01:32 -0700391 struct nilfs_segment_usage *su;
392 void *kaddr;
Ryusuke Konishia7030182009-04-05 18:24:11 +0900393 int sudirty;
Koji Sato6c98cd42009-04-06 19:01:32 -0700394
Ryusuke Konishia7030182009-04-05 18:24:11 +0900395 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
396 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
397 if (nilfs_segment_usage_clean(su)) {
398 printk(KERN_WARNING "%s: segment %llu is already clean\n",
399 __func__, (unsigned long long)segnum);
Koji Sato6c98cd42009-04-06 19:01:32 -0700400 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900401 return;
Koji Sato6c98cd42009-04-06 19:01:32 -0700402 }
Ryusuke Konishia7030182009-04-05 18:24:11 +0900403 WARN_ON(nilfs_segment_usage_error(su));
404 WARN_ON(!nilfs_segment_usage_dirty(su));
405
406 sudirty = nilfs_segment_usage_dirty(su);
407 nilfs_segment_usage_set_clean(su);
Koji Sato6c98cd42009-04-06 19:01:32 -0700408 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900409 nilfs_mdt_mark_buffer_dirty(su_bh);
410
411 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
Koji Sato6c98cd42009-04-06 19:01:32 -0700412 nilfs_mdt_mark_dirty(sufile);
Koji Sato6c98cd42009-04-06 19:01:32 -0700413}
414
415/**
416 * nilfs_sufile_get_segment_usage - get a segment usage
417 * @sufile: inode of segment usage file
418 * @segnum: segment number
419 * @sup: pointer to segment usage
420 * @bhp: pointer to buffer head
421 *
422 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
423 * specified by @segnum.
424 *
425 * Return Value: On success, 0 is returned, and the segment usage and the
426 * buffer head of the buffer on which the segment usage is located are stored
427 * in the place pointed by @sup and @bhp, respectively. On error, one of the
428 * following negative error codes is returned.
429 *
430 * %-EIO - I/O error.
431 *
432 * %-ENOMEM - Insufficient amount of memory available.
433 *
434 * %-EINVAL - Invalid segment usage number.
435 */
436int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
437 struct nilfs_segment_usage **sup,
438 struct buffer_head **bhp)
439{
440 struct buffer_head *bh;
441 struct nilfs_segment_usage *su;
442 void *kaddr;
443 int ret;
444
445 /* segnum is 0 origin */
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700446 if (segnum >= nilfs_sufile_get_nsegments(sufile))
447 return -EINVAL;
Koji Sato6c98cd42009-04-06 19:01:32 -0700448 down_write(&NILFS_MDT(sufile)->mi_sem);
449 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
450 if (ret < 0)
451 goto out_sem;
452 kaddr = kmap(bh->b_page);
453 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
454 if (nilfs_segment_usage_error(su)) {
455 kunmap(bh->b_page);
456 brelse(bh);
457 ret = -EINVAL;
458 goto out_sem;
459 }
460
461 if (sup != NULL)
462 *sup = su;
463 *bhp = bh;
464
465 out_sem:
466 up_write(&NILFS_MDT(sufile)->mi_sem);
467 return ret;
468}
469
470/**
471 * nilfs_sufile_put_segment_usage - put a segment usage
472 * @sufile: inode of segment usage file
473 * @segnum: segment number
474 * @bh: buffer head
475 *
476 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
477 * specified by @segnum. @bh must be the buffer head which have been returned
478 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
479 */
480void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
481 struct buffer_head *bh)
482{
483 kunmap(bh->b_page);
484 brelse(bh);
485}
486
487/**
488 * nilfs_sufile_get_stat - get segment usage statistics
489 * @sufile: inode of segment usage file
490 * @stat: pointer to a structure of segment usage statistics
491 *
492 * Description: nilfs_sufile_get_stat() returns information about segment
493 * usage.
494 *
495 * Return Value: On success, 0 is returned, and segment usage information is
496 * stored in the place pointed by @stat. On error, one of the following
497 * negative error codes is returned.
498 *
499 * %-EIO - I/O error.
500 *
501 * %-ENOMEM - Insufficient amount of memory available.
502 */
503int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
504{
505 struct buffer_head *header_bh;
506 struct nilfs_sufile_header *header;
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -0700507 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
Koji Sato6c98cd42009-04-06 19:01:32 -0700508 void *kaddr;
509 int ret;
510
511 down_read(&NILFS_MDT(sufile)->mi_sem);
512
513 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
514 if (ret < 0)
515 goto out_sem;
516
517 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
518 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
519 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
520 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
521 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -0700522 sustat->ss_ctime = nilfs->ns_ctime;
523 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
524 spin_lock(&nilfs->ns_last_segment_lock);
525 sustat->ss_prot_seq = nilfs->ns_prot_seq;
526 spin_unlock(&nilfs->ns_last_segment_lock);
Koji Sato6c98cd42009-04-06 19:01:32 -0700527 kunmap_atomic(kaddr, KM_USER0);
528 brelse(header_bh);
529
530 out_sem:
531 up_read(&NILFS_MDT(sufile)->mi_sem);
532 return ret;
533}
534
535/**
536 * nilfs_sufile_get_ncleansegs - get the number of clean segments
537 * @sufile: inode of segment usage file
538 * @nsegsp: pointer to the number of clean segments
539 *
540 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
541 * segments.
542 *
543 * Return Value: On success, 0 is returned and the number of clean segments is
544 * stored in the place pointed by @nsegsp. On error, one of the following
545 * negative error codes is returned.
546 *
547 * %-EIO - I/O error.
548 *
549 * %-ENOMEM - Insufficient amount of memory available.
550 */
551int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
552{
553 struct nilfs_sustat sustat;
554 int ret;
555
556 ret = nilfs_sufile_get_stat(sufile, &sustat);
557 if (ret == 0)
558 *nsegsp = sustat.ss_ncleansegs;
559 return ret;
560}
561
Ryusuke Konishia7030182009-04-05 18:24:11 +0900562void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
563 struct buffer_head *header_bh,
564 struct buffer_head *su_bh)
Koji Sato6c98cd42009-04-06 19:01:32 -0700565{
Koji Sato6c98cd42009-04-06 19:01:32 -0700566 struct nilfs_segment_usage *su;
Koji Sato6c98cd42009-04-06 19:01:32 -0700567 void *kaddr;
Ryusuke Konishia7030182009-04-05 18:24:11 +0900568 int suclean;
Koji Sato6c98cd42009-04-06 19:01:32 -0700569
570 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
571 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
572 if (nilfs_segment_usage_error(su)) {
573 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900574 return;
Koji Sato6c98cd42009-04-06 19:01:32 -0700575 }
Ryusuke Konishi88072fa2009-04-05 15:03:16 +0900576 suclean = nilfs_segment_usage_clean(su);
Koji Sato6c98cd42009-04-06 19:01:32 -0700577 nilfs_segment_usage_set_error(su);
578 kunmap_atomic(kaddr, KM_USER0);
Koji Sato6c98cd42009-04-06 19:01:32 -0700579
Ryusuke Konishia7030182009-04-05 18:24:11 +0900580 if (suclean)
581 nilfs_sufile_mod_counter(header_bh, -1, 0);
Koji Sato6c98cd42009-04-06 19:01:32 -0700582 nilfs_mdt_mark_buffer_dirty(su_bh);
583 nilfs_mdt_mark_dirty(sufile);
Koji Sato6c98cd42009-04-06 19:01:32 -0700584}
585
586/**
587 * nilfs_sufile_get_suinfo -
588 * @sufile: inode of segment usage file
589 * @segnum: segment number to start looking
590 * @si: array of suinfo
591 * @nsi: size of suinfo array
592 *
593 * Description:
594 *
595 * Return Value: On success, 0 is returned and .... On error, one of the
596 * following negative error codes is returned.
597 *
598 * %-EIO - I/O error.
599 *
600 * %-ENOMEM - Insufficient amount of memory available.
601 */
602ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
603 struct nilfs_suinfo *si, size_t nsi)
604{
605 struct buffer_head *su_bh;
606 struct nilfs_segment_usage *su;
607 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
Ryusuke Konishicece5522009-04-06 19:01:58 -0700608 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
Koji Sato6c98cd42009-04-06 19:01:32 -0700609 void *kaddr;
610 unsigned long nsegs, segusages_per_block;
611 ssize_t n;
612 int ret, i, j;
613
614 down_read(&NILFS_MDT(sufile)->mi_sem);
615
616 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
617 nsegs = min_t(unsigned long,
618 nilfs_sufile_get_nsegments(sufile) - segnum,
619 nsi);
620 for (i = 0; i < nsegs; i += n, segnum += n) {
621 n = min_t(unsigned long,
622 segusages_per_block -
623 nilfs_sufile_get_offset(sufile, segnum),
624 nsegs - i);
625 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
626 &su_bh);
627 if (ret < 0) {
628 if (ret != -ENOENT)
629 goto out;
630 /* hole */
631 memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n);
632 continue;
633 }
634
635 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
636 su = nilfs_sufile_block_get_segment_usage(
637 sufile, segnum, su_bh, kaddr);
638 for (j = 0; j < n; j++, su = (void *)su + susz) {
639 si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod);
640 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
Ryusuke Konishicece5522009-04-06 19:01:58 -0700641 si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
642 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
Ryusuke Konishi3efb55b2009-03-30 00:50:19 +0900643 if (nilfs_segment_is_active(nilfs, segnum + j))
Ryusuke Konishicece5522009-04-06 19:01:58 -0700644 si[i + j].sui_flags |=
645 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
Koji Sato6c98cd42009-04-06 19:01:32 -0700646 }
647 kunmap_atomic(kaddr, KM_USER0);
648 brelse(su_bh);
649 }
650 ret = nsegs;
651
652 out:
653 up_read(&NILFS_MDT(sufile)->mi_sem);
654 return ret;
655}