blob: 88344728f2168fa9d5f422f3a28940629e5e3ca0 [file] [log] [blame]
Koji Sato6c98cd42009-04-06 19:01:32 -07001/*
2 * sufile.c - NILFS segment usage file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
Ryusuke Konishidda54f42009-05-16 21:49:10 +090021 * Rivised by Ryusuke Konishi <ryusuke@osrg.net>.
Koji Sato6c98cd42009-04-06 19:01:32 -070022 */
23
24#include <linux/kernel.h>
25#include <linux/fs.h>
26#include <linux/string.h>
27#include <linux/buffer_head.h>
28#include <linux/errno.h>
29#include <linux/nilfs2_fs.h>
30#include "mdt.h"
31#include "sufile.h"
32
33
34static inline unsigned long
35nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36{
37 return NILFS_MDT(sufile)->mi_entries_per_block;
38}
39
40static unsigned long
41nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
42{
43 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
44 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
45 return (unsigned long)t;
46}
47
48static unsigned long
49nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
50{
51 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
52 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
53}
54
55static unsigned long
56nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
57 __u64 max)
58{
59 return min_t(unsigned long,
60 nilfs_sufile_segment_usages_per_block(sufile) -
61 nilfs_sufile_get_offset(sufile, curr),
62 max - curr + 1);
63}
64
Koji Sato6c98cd42009-04-06 19:01:32 -070065static struct nilfs_segment_usage *
66nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
67 struct buffer_head *bh, void *kaddr)
68{
69 return kaddr + bh_offset(bh) +
70 nilfs_sufile_get_offset(sufile, segnum) *
71 NILFS_MDT(sufile)->mi_entry_size;
72}
73
74static inline int nilfs_sufile_get_header_block(struct inode *sufile,
75 struct buffer_head **bhp)
76{
77 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
78}
79
80static inline int
81nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
82 int create, struct buffer_head **bhp)
83{
84 return nilfs_mdt_get_block(sufile,
85 nilfs_sufile_get_blkoff(sufile, segnum),
86 create, NULL, bhp);
87}
88
Ryusuke Konishia7030182009-04-05 18:24:11 +090089static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
90 u64 ncleanadd, u64 ndirtyadd)
91{
92 struct nilfs_sufile_header *header;
93 void *kaddr;
94
95 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
96 header = kaddr + bh_offset(header_bh);
97 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
98 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
99 kunmap_atomic(kaddr, KM_USER0);
100
101 nilfs_mdt_mark_buffer_dirty(header_bh);
102}
103
Ryusuke Konishidda54f42009-05-16 21:49:10 +0900104/**
105 * nilfs_sufile_updatev - modify multiple segment usages at a time
106 * @sufile: inode of segment usage file
107 * @segnumv: array of segment numbers
108 * @nsegs: size of @segnumv array
109 * @create: creation flag
110 * @ndone: place to store number of modified segments on @segnumv
111 * @dofunc: primitive operation for the update
112 *
113 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
114 * against the given array of segments. The @dofunc is called with
115 * buffers of a header block and the sufile block in which the target
116 * segment usage entry is contained. If @ndone is given, the number
117 * of successfully modified segments from the head is stored in the
118 * place @ndone points to.
119 *
120 * Return Value: On success, zero is returned. On error, one of the
121 * following negative error codes is returned.
122 *
123 * %-EIO - I/O error.
124 *
125 * %-ENOMEM - Insufficient amount of memory available.
126 *
127 * %-ENOENT - Given segment usage is in hole block (may be returned if
128 * @create is zero)
129 *
130 * %-EINVAL - Invalid segment usage number
131 */
132int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
133 int create, size_t *ndone,
134 void (*dofunc)(struct inode *, __u64,
135 struct buffer_head *,
136 struct buffer_head *))
137{
138 struct buffer_head *header_bh, *bh;
139 unsigned long blkoff, prev_blkoff;
140 __u64 *seg;
141 size_t nerr = 0, n = 0;
142 int ret = 0;
143
144 if (unlikely(nsegs == 0))
145 goto out;
146
147 down_write(&NILFS_MDT(sufile)->mi_sem);
148 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
149 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
150 printk(KERN_WARNING
151 "%s: invalid segment number: %llu\n", __func__,
152 (unsigned long long)*seg);
153 nerr++;
154 }
155 }
156 if (nerr > 0) {
157 ret = -EINVAL;
158 goto out_sem;
159 }
160
161 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
162 if (ret < 0)
163 goto out_sem;
164
165 seg = segnumv;
166 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
167 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
168 if (ret < 0)
169 goto out_header;
170
171 for (;;) {
172 dofunc(sufile, *seg, header_bh, bh);
173
174 if (++seg >= segnumv + nsegs)
175 break;
176 prev_blkoff = blkoff;
177 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
178 if (blkoff == prev_blkoff)
179 continue;
180
181 /* get different block */
182 brelse(bh);
183 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
184 if (unlikely(ret < 0))
185 goto out_header;
186 }
187 brelse(bh);
188
189 out_header:
190 n = seg - segnumv;
191 brelse(header_bh);
192 out_sem:
193 up_write(&NILFS_MDT(sufile)->mi_sem);
194 out:
195 if (ndone)
196 *ndone = n;
197 return ret;
198}
199
Ryusuke Konishia7030182009-04-05 18:24:11 +0900200int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
201 void (*dofunc)(struct inode *, __u64,
202 struct buffer_head *,
203 struct buffer_head *))
204{
205 struct buffer_head *header_bh, *bh;
206 int ret;
207
208 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
209 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
210 __func__, (unsigned long long)segnum);
211 return -EINVAL;
212 }
213 down_write(&NILFS_MDT(sufile)->mi_sem);
214
215 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
216 if (ret < 0)
217 goto out_sem;
218
219 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
220 if (!ret) {
221 dofunc(sufile, segnum, header_bh, bh);
222 brelse(bh);
223 }
224 brelse(header_bh);
225
226 out_sem:
227 up_write(&NILFS_MDT(sufile)->mi_sem);
228 return ret;
229}
230
Koji Sato6c98cd42009-04-06 19:01:32 -0700231/**
232 * nilfs_sufile_alloc - allocate a segment
233 * @sufile: inode of segment usage file
234 * @segnump: pointer to segment number
235 *
236 * Description: nilfs_sufile_alloc() allocates a clean segment.
237 *
238 * Return Value: On success, 0 is returned and the segment number of the
239 * allocated segment is stored in the place pointed by @segnump. On error, one
240 * of the following negative error codes is returned.
241 *
242 * %-EIO - I/O error.
243 *
244 * %-ENOMEM - Insufficient amount of memory available.
245 *
246 * %-ENOSPC - No clean segment left.
247 */
248int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
249{
250 struct buffer_head *header_bh, *su_bh;
Koji Sato6c98cd42009-04-06 19:01:32 -0700251 struct nilfs_sufile_header *header;
252 struct nilfs_segment_usage *su;
253 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
254 __u64 segnum, maxsegnum, last_alloc;
255 void *kaddr;
256 unsigned long nsegments, ncleansegs, nsus;
257 int ret, i, j;
258
259 down_write(&NILFS_MDT(sufile)->mi_sem);
260
Koji Sato6c98cd42009-04-06 19:01:32 -0700261 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
262 if (ret < 0)
263 goto out_sem;
264 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
Ryusuke Konishi7b16c8a2009-11-13 03:10:21 +0900265 header = kaddr + bh_offset(header_bh);
Koji Sato6c98cd42009-04-06 19:01:32 -0700266 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
267 last_alloc = le64_to_cpu(header->sh_last_alloc);
268 kunmap_atomic(kaddr, KM_USER0);
269
270 nsegments = nilfs_sufile_get_nsegments(sufile);
271 segnum = last_alloc + 1;
272 maxsegnum = nsegments - 1;
273 for (i = 0; i < nsegments; i += nsus) {
274 if (segnum >= nsegments) {
275 /* wrap around */
276 segnum = 0;
277 maxsegnum = last_alloc;
278 }
279 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
280 &su_bh);
281 if (ret < 0)
282 goto out_header;
283 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
284 su = nilfs_sufile_block_get_segment_usage(
285 sufile, segnum, su_bh, kaddr);
286
287 nsus = nilfs_sufile_segment_usages_in_block(
288 sufile, segnum, maxsegnum);
289 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
290 if (!nilfs_segment_usage_clean(su))
291 continue;
292 /* found a clean segment */
Koji Sato6c98cd42009-04-06 19:01:32 -0700293 nilfs_segment_usage_set_dirty(su);
294 kunmap_atomic(kaddr, KM_USER0);
295
296 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
Ryusuke Konishi7b16c8a2009-11-13 03:10:21 +0900297 header = kaddr + bh_offset(header_bh);
Koji Sato6c98cd42009-04-06 19:01:32 -0700298 le64_add_cpu(&header->sh_ncleansegs, -1);
299 le64_add_cpu(&header->sh_ndirtysegs, 1);
300 header->sh_last_alloc = cpu_to_le64(segnum);
301 kunmap_atomic(kaddr, KM_USER0);
302
303 nilfs_mdt_mark_buffer_dirty(header_bh);
304 nilfs_mdt_mark_buffer_dirty(su_bh);
305 nilfs_mdt_mark_dirty(sufile);
306 brelse(su_bh);
307 *segnump = segnum;
308 goto out_header;
309 }
310
311 kunmap_atomic(kaddr, KM_USER0);
312 brelse(su_bh);
313 }
314
315 /* no segments left */
316 ret = -ENOSPC;
317
318 out_header:
319 brelse(header_bh);
320
321 out_sem:
322 up_write(&NILFS_MDT(sufile)->mi_sem);
323 return ret;
324}
325
Ryusuke Konishia7030182009-04-05 18:24:11 +0900326void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
327 struct buffer_head *header_bh,
328 struct buffer_head *su_bh)
Koji Sato6c98cd42009-04-06 19:01:32 -0700329{
Koji Sato6c98cd42009-04-06 19:01:32 -0700330 struct nilfs_segment_usage *su;
331 void *kaddr;
Koji Sato6c98cd42009-04-06 19:01:32 -0700332
333 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900334 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700335 if (unlikely(!nilfs_segment_usage_clean(su))) {
336 printk(KERN_WARNING "%s: segment %llu must be clean\n",
Koji Sato6c98cd42009-04-06 19:01:32 -0700337 __func__, (unsigned long long)segnum);
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700338 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900339 return;
Koji Sato6c98cd42009-04-06 19:01:32 -0700340 }
341 nilfs_segment_usage_set_dirty(su);
342 kunmap_atomic(kaddr, KM_USER0);
343
Ryusuke Konishia7030182009-04-05 18:24:11 +0900344 nilfs_sufile_mod_counter(header_bh, -1, 1);
Koji Sato6c98cd42009-04-06 19:01:32 -0700345 nilfs_mdt_mark_buffer_dirty(su_bh);
346 nilfs_mdt_mark_dirty(sufile);
Koji Sato6c98cd42009-04-06 19:01:32 -0700347}
348
Ryusuke Konishic85399c2009-04-05 18:30:58 +0900349void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
350 struct buffer_head *header_bh,
351 struct buffer_head *su_bh)
352{
353 struct nilfs_segment_usage *su;
354 void *kaddr;
355 int clean, dirty;
356
357 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
358 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
359 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
360 su->su_nblocks == cpu_to_le32(0)) {
361 kunmap_atomic(kaddr, KM_USER0);
362 return;
363 }
364 clean = nilfs_segment_usage_clean(su);
365 dirty = nilfs_segment_usage_dirty(su);
366
367 /* make the segment garbage */
368 su->su_lastmod = cpu_to_le64(0);
369 su->su_nblocks = cpu_to_le32(0);
370 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
371 kunmap_atomic(kaddr, KM_USER0);
372
373 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
374 nilfs_mdt_mark_buffer_dirty(su_bh);
375 nilfs_mdt_mark_dirty(sufile);
376}
377
Ryusuke Konishia7030182009-04-05 18:24:11 +0900378void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
379 struct buffer_head *header_bh,
380 struct buffer_head *su_bh)
Koji Sato6c98cd42009-04-06 19:01:32 -0700381{
Koji Sato6c98cd42009-04-06 19:01:32 -0700382 struct nilfs_segment_usage *su;
383 void *kaddr;
Ryusuke Konishia7030182009-04-05 18:24:11 +0900384 int sudirty;
Koji Sato6c98cd42009-04-06 19:01:32 -0700385
Ryusuke Konishia7030182009-04-05 18:24:11 +0900386 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
387 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
388 if (nilfs_segment_usage_clean(su)) {
389 printk(KERN_WARNING "%s: segment %llu is already clean\n",
390 __func__, (unsigned long long)segnum);
Koji Sato6c98cd42009-04-06 19:01:32 -0700391 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900392 return;
Koji Sato6c98cd42009-04-06 19:01:32 -0700393 }
Ryusuke Konishia7030182009-04-05 18:24:11 +0900394 WARN_ON(nilfs_segment_usage_error(su));
395 WARN_ON(!nilfs_segment_usage_dirty(su));
396
397 sudirty = nilfs_segment_usage_dirty(su);
398 nilfs_segment_usage_set_clean(su);
Koji Sato6c98cd42009-04-06 19:01:32 -0700399 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900400 nilfs_mdt_mark_buffer_dirty(su_bh);
401
402 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
Koji Sato6c98cd42009-04-06 19:01:32 -0700403 nilfs_mdt_mark_dirty(sufile);
Koji Sato6c98cd42009-04-06 19:01:32 -0700404}
405
406/**
407 * nilfs_sufile_get_segment_usage - get a segment usage
408 * @sufile: inode of segment usage file
409 * @segnum: segment number
410 * @sup: pointer to segment usage
411 * @bhp: pointer to buffer head
412 *
413 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
414 * specified by @segnum.
415 *
416 * Return Value: On success, 0 is returned, and the segment usage and the
417 * buffer head of the buffer on which the segment usage is located are stored
418 * in the place pointed by @sup and @bhp, respectively. On error, one of the
419 * following negative error codes is returned.
420 *
421 * %-EIO - I/O error.
422 *
423 * %-ENOMEM - Insufficient amount of memory available.
424 *
425 * %-EINVAL - Invalid segment usage number.
426 */
427int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
428 struct nilfs_segment_usage **sup,
429 struct buffer_head **bhp)
430{
431 struct buffer_head *bh;
432 struct nilfs_segment_usage *su;
433 void *kaddr;
434 int ret;
435
436 /* segnum is 0 origin */
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700437 if (segnum >= nilfs_sufile_get_nsegments(sufile))
438 return -EINVAL;
Koji Sato6c98cd42009-04-06 19:01:32 -0700439 down_write(&NILFS_MDT(sufile)->mi_sem);
440 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
441 if (ret < 0)
442 goto out_sem;
443 kaddr = kmap(bh->b_page);
444 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
445 if (nilfs_segment_usage_error(su)) {
446 kunmap(bh->b_page);
447 brelse(bh);
448 ret = -EINVAL;
449 goto out_sem;
450 }
451
452 if (sup != NULL)
453 *sup = su;
454 *bhp = bh;
455
456 out_sem:
457 up_write(&NILFS_MDT(sufile)->mi_sem);
458 return ret;
459}
460
461/**
462 * nilfs_sufile_put_segment_usage - put a segment usage
463 * @sufile: inode of segment usage file
464 * @segnum: segment number
465 * @bh: buffer head
466 *
467 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
468 * specified by @segnum. @bh must be the buffer head which have been returned
469 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
470 */
471void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
472 struct buffer_head *bh)
473{
474 kunmap(bh->b_page);
475 brelse(bh);
476}
477
478/**
479 * nilfs_sufile_get_stat - get segment usage statistics
480 * @sufile: inode of segment usage file
481 * @stat: pointer to a structure of segment usage statistics
482 *
483 * Description: nilfs_sufile_get_stat() returns information about segment
484 * usage.
485 *
486 * Return Value: On success, 0 is returned, and segment usage information is
487 * stored in the place pointed by @stat. On error, one of the following
488 * negative error codes is returned.
489 *
490 * %-EIO - I/O error.
491 *
492 * %-ENOMEM - Insufficient amount of memory available.
493 */
494int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
495{
496 struct buffer_head *header_bh;
497 struct nilfs_sufile_header *header;
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -0700498 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
Koji Sato6c98cd42009-04-06 19:01:32 -0700499 void *kaddr;
500 int ret;
501
502 down_read(&NILFS_MDT(sufile)->mi_sem);
503
504 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
505 if (ret < 0)
506 goto out_sem;
507
508 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
Ryusuke Konishi7b16c8a2009-11-13 03:10:21 +0900509 header = kaddr + bh_offset(header_bh);
Koji Sato6c98cd42009-04-06 19:01:32 -0700510 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
511 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
512 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -0700513 sustat->ss_ctime = nilfs->ns_ctime;
514 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
515 spin_lock(&nilfs->ns_last_segment_lock);
516 sustat->ss_prot_seq = nilfs->ns_prot_seq;
517 spin_unlock(&nilfs->ns_last_segment_lock);
Koji Sato6c98cd42009-04-06 19:01:32 -0700518 kunmap_atomic(kaddr, KM_USER0);
519 brelse(header_bh);
520
521 out_sem:
522 up_read(&NILFS_MDT(sufile)->mi_sem);
523 return ret;
524}
525
526/**
527 * nilfs_sufile_get_ncleansegs - get the number of clean segments
528 * @sufile: inode of segment usage file
529 * @nsegsp: pointer to the number of clean segments
530 *
531 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
532 * segments.
533 *
534 * Return Value: On success, 0 is returned and the number of clean segments is
535 * stored in the place pointed by @nsegsp. On error, one of the following
536 * negative error codes is returned.
537 *
538 * %-EIO - I/O error.
539 *
540 * %-ENOMEM - Insufficient amount of memory available.
541 */
542int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
543{
544 struct nilfs_sustat sustat;
545 int ret;
546
547 ret = nilfs_sufile_get_stat(sufile, &sustat);
548 if (ret == 0)
549 *nsegsp = sustat.ss_ncleansegs;
550 return ret;
551}
552
Ryusuke Konishia7030182009-04-05 18:24:11 +0900553void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
554 struct buffer_head *header_bh,
555 struct buffer_head *su_bh)
Koji Sato6c98cd42009-04-06 19:01:32 -0700556{
Koji Sato6c98cd42009-04-06 19:01:32 -0700557 struct nilfs_segment_usage *su;
Koji Sato6c98cd42009-04-06 19:01:32 -0700558 void *kaddr;
Ryusuke Konishia7030182009-04-05 18:24:11 +0900559 int suclean;
Koji Sato6c98cd42009-04-06 19:01:32 -0700560
561 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
562 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
563 if (nilfs_segment_usage_error(su)) {
564 kunmap_atomic(kaddr, KM_USER0);
Ryusuke Konishia7030182009-04-05 18:24:11 +0900565 return;
Koji Sato6c98cd42009-04-06 19:01:32 -0700566 }
Ryusuke Konishi88072fa2009-04-05 15:03:16 +0900567 suclean = nilfs_segment_usage_clean(su);
Koji Sato6c98cd42009-04-06 19:01:32 -0700568 nilfs_segment_usage_set_error(su);
569 kunmap_atomic(kaddr, KM_USER0);
Koji Sato6c98cd42009-04-06 19:01:32 -0700570
Ryusuke Konishia7030182009-04-05 18:24:11 +0900571 if (suclean)
572 nilfs_sufile_mod_counter(header_bh, -1, 0);
Koji Sato6c98cd42009-04-06 19:01:32 -0700573 nilfs_mdt_mark_buffer_dirty(su_bh);
574 nilfs_mdt_mark_dirty(sufile);
Koji Sato6c98cd42009-04-06 19:01:32 -0700575}
576
577/**
578 * nilfs_sufile_get_suinfo -
579 * @sufile: inode of segment usage file
580 * @segnum: segment number to start looking
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900581 * @buf: array of suinfo
582 * @sisz: byte size of suinfo
Koji Sato6c98cd42009-04-06 19:01:32 -0700583 * @nsi: size of suinfo array
584 *
585 * Description:
586 *
587 * Return Value: On success, 0 is returned and .... On error, one of the
588 * following negative error codes is returned.
589 *
590 * %-EIO - I/O error.
591 *
592 * %-ENOMEM - Insufficient amount of memory available.
593 */
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900594ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
595 unsigned sisz, size_t nsi)
Koji Sato6c98cd42009-04-06 19:01:32 -0700596{
597 struct buffer_head *su_bh;
598 struct nilfs_segment_usage *su;
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900599 struct nilfs_suinfo *si = buf;
Koji Sato6c98cd42009-04-06 19:01:32 -0700600 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
Ryusuke Konishicece5522009-04-06 19:01:58 -0700601 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
Koji Sato6c98cd42009-04-06 19:01:32 -0700602 void *kaddr;
603 unsigned long nsegs, segusages_per_block;
604 ssize_t n;
605 int ret, i, j;
606
607 down_read(&NILFS_MDT(sufile)->mi_sem);
608
609 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
610 nsegs = min_t(unsigned long,
611 nilfs_sufile_get_nsegments(sufile) - segnum,
612 nsi);
613 for (i = 0; i < nsegs; i += n, segnum += n) {
614 n = min_t(unsigned long,
615 segusages_per_block -
616 nilfs_sufile_get_offset(sufile, segnum),
617 nsegs - i);
618 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
619 &su_bh);
620 if (ret < 0) {
621 if (ret != -ENOENT)
622 goto out;
623 /* hole */
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900624 memset(si, 0, sisz * n);
625 si = (void *)si + sisz * n;
Koji Sato6c98cd42009-04-06 19:01:32 -0700626 continue;
627 }
628
629 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
630 su = nilfs_sufile_block_get_segment_usage(
631 sufile, segnum, su_bh, kaddr);
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900632 for (j = 0; j < n;
633 j++, su = (void *)su + susz, si = (void *)si + sisz) {
634 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
635 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
636 si->sui_flags = le32_to_cpu(su->su_flags) &
Ryusuke Konishicece5522009-04-06 19:01:58 -0700637 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
Ryusuke Konishi3efb55b2009-03-30 00:50:19 +0900638 if (nilfs_segment_is_active(nilfs, segnum + j))
Ryusuke Konishi003ff182009-05-12 03:58:47 +0900639 si->sui_flags |=
Ryusuke Konishicece5522009-04-06 19:01:58 -0700640 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
Koji Sato6c98cd42009-04-06 19:01:32 -0700641 }
642 kunmap_atomic(kaddr, KM_USER0);
643 brelse(su_bh);
644 }
645 ret = nsegs;
646
647 out:
648 up_read(&NILFS_MDT(sufile)->mi_sem);
649 return ret;
650}
Ryusuke Konishi79739562009-11-12 23:56:43 +0900651
652/**
Ryusuke Konishi8707df32009-11-13 01:36:56 +0900653 * nilfs_sufile_read - read sufile inode
654 * @sufile: sufile inode
655 * @raw_inode: on-disk sufile inode
656 */
657int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
658{
659 return nilfs_read_inode_common(sufile, raw_inode);
660}
661
662/**
Ryusuke Konishi79739562009-11-12 23:56:43 +0900663 * nilfs_sufile_new - create sufile
664 * @nilfs: nilfs object
665 * @susize: size of a segment usage entry
666 */
667struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
668{
669 struct inode *sufile;
670
671 sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO, 0);
672 if (sufile)
673 nilfs_mdt_set_entry_size(sufile, susize,
674 sizeof(struct nilfs_sufile_header));
675 return sufile;
676}