blob: cc714c72b138271d622d69fe8fd80fac01f143f0 [file] [log] [blame]
Koji Sato6c98cd42009-04-06 19:01:32 -07001/*
2 * sufile.c - NILFS segment usage file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/kernel.h>
24#include <linux/fs.h>
25#include <linux/string.h>
26#include <linux/buffer_head.h>
27#include <linux/errno.h>
28#include <linux/nilfs2_fs.h>
29#include "mdt.h"
30#include "sufile.h"
31
32
33static inline unsigned long
34nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
35{
36 return NILFS_MDT(sufile)->mi_entries_per_block;
37}
38
39static unsigned long
40nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
41{
42 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
43 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
44 return (unsigned long)t;
45}
46
47static unsigned long
48nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
49{
50 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
51 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
52}
53
54static unsigned long
55nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
56 __u64 max)
57{
58 return min_t(unsigned long,
59 nilfs_sufile_segment_usages_per_block(sufile) -
60 nilfs_sufile_get_offset(sufile, curr),
61 max - curr + 1);
62}
63
64static inline struct nilfs_sufile_header *
65nilfs_sufile_block_get_header(const struct inode *sufile,
66 struct buffer_head *bh,
67 void *kaddr)
68{
69 return kaddr + bh_offset(bh);
70}
71
72static struct nilfs_segment_usage *
73nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
74 struct buffer_head *bh, void *kaddr)
75{
76 return kaddr + bh_offset(bh) +
77 nilfs_sufile_get_offset(sufile, segnum) *
78 NILFS_MDT(sufile)->mi_entry_size;
79}
80
81static inline int nilfs_sufile_get_header_block(struct inode *sufile,
82 struct buffer_head **bhp)
83{
84 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
85}
86
87static inline int
88nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
89 int create, struct buffer_head **bhp)
90{
91 return nilfs_mdt_get_block(sufile,
92 nilfs_sufile_get_blkoff(sufile, segnum),
93 create, NULL, bhp);
94}
95
96/**
97 * nilfs_sufile_alloc - allocate a segment
98 * @sufile: inode of segment usage file
99 * @segnump: pointer to segment number
100 *
101 * Description: nilfs_sufile_alloc() allocates a clean segment.
102 *
103 * Return Value: On success, 0 is returned and the segment number of the
104 * allocated segment is stored in the place pointed by @segnump. On error, one
105 * of the following negative error codes is returned.
106 *
107 * %-EIO - I/O error.
108 *
109 * %-ENOMEM - Insufficient amount of memory available.
110 *
111 * %-ENOSPC - No clean segment left.
112 */
113int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
114{
115 struct buffer_head *header_bh, *su_bh;
116 struct the_nilfs *nilfs;
117 struct nilfs_sufile_header *header;
118 struct nilfs_segment_usage *su;
119 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
120 __u64 segnum, maxsegnum, last_alloc;
121 void *kaddr;
122 unsigned long nsegments, ncleansegs, nsus;
123 int ret, i, j;
124
125 down_write(&NILFS_MDT(sufile)->mi_sem);
126
127 nilfs = NILFS_MDT(sufile)->mi_nilfs;
128
129 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
130 if (ret < 0)
131 goto out_sem;
132 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
133 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
134 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
135 last_alloc = le64_to_cpu(header->sh_last_alloc);
136 kunmap_atomic(kaddr, KM_USER0);
137
138 nsegments = nilfs_sufile_get_nsegments(sufile);
139 segnum = last_alloc + 1;
140 maxsegnum = nsegments - 1;
141 for (i = 0; i < nsegments; i += nsus) {
142 if (segnum >= nsegments) {
143 /* wrap around */
144 segnum = 0;
145 maxsegnum = last_alloc;
146 }
147 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
148 &su_bh);
149 if (ret < 0)
150 goto out_header;
151 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
152 su = nilfs_sufile_block_get_segment_usage(
153 sufile, segnum, su_bh, kaddr);
154
155 nsus = nilfs_sufile_segment_usages_in_block(
156 sufile, segnum, maxsegnum);
157 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
158 if (!nilfs_segment_usage_clean(su))
159 continue;
160 /* found a clean segment */
161 nilfs_segment_usage_set_active(su);
162 nilfs_segment_usage_set_dirty(su);
163 kunmap_atomic(kaddr, KM_USER0);
164
165 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
166 header = nilfs_sufile_block_get_header(
167 sufile, header_bh, kaddr);
168 le64_add_cpu(&header->sh_ncleansegs, -1);
169 le64_add_cpu(&header->sh_ndirtysegs, 1);
170 header->sh_last_alloc = cpu_to_le64(segnum);
171 kunmap_atomic(kaddr, KM_USER0);
172
173 nilfs_mdt_mark_buffer_dirty(header_bh);
174 nilfs_mdt_mark_buffer_dirty(su_bh);
175 nilfs_mdt_mark_dirty(sufile);
176 brelse(su_bh);
177 *segnump = segnum;
178 goto out_header;
179 }
180
181 kunmap_atomic(kaddr, KM_USER0);
182 brelse(su_bh);
183 }
184
185 /* no segments left */
186 ret = -ENOSPC;
187
188 out_header:
189 brelse(header_bh);
190
191 out_sem:
192 up_write(&NILFS_MDT(sufile)->mi_sem);
193 return ret;
194}
195
196/**
197 * nilfs_sufile_cancel_free -
198 * @sufile: inode of segment usage file
199 * @segnum: segment number
200 *
201 * Description:
202 *
203 * Return Value: On success, 0 is returned. On error, one of the following
204 * negative error codes is returned.
205 *
206 * %-EIO - I/O error.
207 *
208 * %-ENOMEM - Insufficient amount of memory available.
209 */
210int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
211{
212 struct buffer_head *header_bh, *su_bh;
213 struct the_nilfs *nilfs;
214 struct nilfs_sufile_header *header;
215 struct nilfs_segment_usage *su;
216 void *kaddr;
217 int ret;
218
219 down_write(&NILFS_MDT(sufile)->mi_sem);
220
221 nilfs = NILFS_MDT(sufile)->mi_nilfs;
222
223 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
224 if (ret < 0)
225 goto out_sem;
226
227 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
228 if (ret < 0)
229 goto out_header;
230
231 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
232 su = nilfs_sufile_block_get_segment_usage(
233 sufile, segnum, su_bh, kaddr);
234 if (!nilfs_segment_usage_clean(su)) {
235 printk(KERN_CRIT "%s: segment %llu must be clean\n",
236 __func__, (unsigned long long)segnum);
237 BUG();
238 }
239 nilfs_segment_usage_set_dirty(su);
240 kunmap_atomic(kaddr, KM_USER0);
241
242 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
243 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
244 le64_add_cpu(&header->sh_ncleansegs, -1);
245 le64_add_cpu(&header->sh_ndirtysegs, 1);
246 kunmap_atomic(kaddr, KM_USER0);
247
248 nilfs_mdt_mark_buffer_dirty(header_bh);
249 nilfs_mdt_mark_buffer_dirty(su_bh);
250 nilfs_mdt_mark_dirty(sufile);
251
252 brelse(su_bh);
253
254 out_header:
255 brelse(header_bh);
256
257 out_sem:
258 up_write(&NILFS_MDT(sufile)->mi_sem);
259 return ret;
260}
261
262/**
263 * nilfs_sufile_freev - free segments
264 * @sufile: inode of segment usage file
265 * @segnum: array of segment numbers
266 * @nsegs: number of segments
267 *
268 * Description: nilfs_sufile_freev() frees segments specified by @segnum and
269 * @nsegs, which must have been returned by a previous call to
270 * nilfs_sufile_alloc().
271 *
272 * Return Value: On success, 0 is returned. On error, one of the following
273 * negative error codes is returned.
274 *
275 * %-EIO - I/O error.
276 *
277 * %-ENOMEM - Insufficient amount of memory available.
278 */
279#define NILFS_SUFILE_FREEV_PREALLOC 16
280int nilfs_sufile_freev(struct inode *sufile, __u64 *segnum, size_t nsegs)
281{
282 struct buffer_head *header_bh, **su_bh,
283 *su_bh_prealloc[NILFS_SUFILE_FREEV_PREALLOC];
284 struct the_nilfs *nilfs;
285 struct nilfs_sufile_header *header;
286 struct nilfs_segment_usage *su;
287 void *kaddr;
288 int ret, i;
289
290 down_write(&NILFS_MDT(sufile)->mi_sem);
291
292 nilfs = NILFS_MDT(sufile)->mi_nilfs;
293
294 /* prepare resources */
295 if (nsegs <= NILFS_SUFILE_FREEV_PREALLOC)
296 su_bh = su_bh_prealloc;
297 else {
298 su_bh = kmalloc(sizeof(*su_bh) * nsegs, GFP_NOFS);
299 if (su_bh == NULL) {
300 ret = -ENOMEM;
301 goto out_sem;
302 }
303 }
304
305 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
306 if (ret < 0)
307 goto out_su_bh;
308 for (i = 0; i < nsegs; i++) {
309 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum[i],
310 0, &su_bh[i]);
311 if (ret < 0)
312 goto out_bh;
313 }
314
315 /* free segments */
316 for (i = 0; i < nsegs; i++) {
317 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0);
318 su = nilfs_sufile_block_get_segment_usage(
319 sufile, segnum[i], su_bh[i], kaddr);
320 BUG_ON(nilfs_segment_usage_error(su));
321 nilfs_segment_usage_set_clean(su);
322 kunmap_atomic(kaddr, KM_USER0);
323 nilfs_mdt_mark_buffer_dirty(su_bh[i]);
324 }
325 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
326 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
327 le64_add_cpu(&header->sh_ncleansegs, nsegs);
328 le64_add_cpu(&header->sh_ndirtysegs, -(u64)nsegs);
329 kunmap_atomic(kaddr, KM_USER0);
330 nilfs_mdt_mark_buffer_dirty(header_bh);
331 nilfs_mdt_mark_dirty(sufile);
332
333 out_bh:
334 for (i--; i >= 0; i--)
335 brelse(su_bh[i]);
336 brelse(header_bh);
337
338 out_su_bh:
339 if (su_bh != su_bh_prealloc)
340 kfree(su_bh);
341
342 out_sem:
343 up_write(&NILFS_MDT(sufile)->mi_sem);
344 return ret;
345}
346
347/**
348 * nilfs_sufile_free -
349 * @sufile:
350 * @segnum:
351 */
352int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
353{
354 return nilfs_sufile_freev(sufile, &segnum, 1);
355}
356
357/**
358 * nilfs_sufile_get_segment_usage - get a segment usage
359 * @sufile: inode of segment usage file
360 * @segnum: segment number
361 * @sup: pointer to segment usage
362 * @bhp: pointer to buffer head
363 *
364 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
365 * specified by @segnum.
366 *
367 * Return Value: On success, 0 is returned, and the segment usage and the
368 * buffer head of the buffer on which the segment usage is located are stored
369 * in the place pointed by @sup and @bhp, respectively. On error, one of the
370 * following negative error codes is returned.
371 *
372 * %-EIO - I/O error.
373 *
374 * %-ENOMEM - Insufficient amount of memory available.
375 *
376 * %-EINVAL - Invalid segment usage number.
377 */
378int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
379 struct nilfs_segment_usage **sup,
380 struct buffer_head **bhp)
381{
382 struct buffer_head *bh;
383 struct nilfs_segment_usage *su;
384 void *kaddr;
385 int ret;
386
387 /* segnum is 0 origin */
388 BUG_ON(segnum >= nilfs_sufile_get_nsegments(sufile));
389
390 down_write(&NILFS_MDT(sufile)->mi_sem);
391 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
392 if (ret < 0)
393 goto out_sem;
394 kaddr = kmap(bh->b_page);
395 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
396 if (nilfs_segment_usage_error(su)) {
397 kunmap(bh->b_page);
398 brelse(bh);
399 ret = -EINVAL;
400 goto out_sem;
401 }
402
403 if (sup != NULL)
404 *sup = su;
405 *bhp = bh;
406
407 out_sem:
408 up_write(&NILFS_MDT(sufile)->mi_sem);
409 return ret;
410}
411
412/**
413 * nilfs_sufile_put_segment_usage - put a segment usage
414 * @sufile: inode of segment usage file
415 * @segnum: segment number
416 * @bh: buffer head
417 *
418 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
419 * specified by @segnum. @bh must be the buffer head which have been returned
420 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
421 */
422void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
423 struct buffer_head *bh)
424{
425 kunmap(bh->b_page);
426 brelse(bh);
427}
428
429/**
430 * nilfs_sufile_get_stat - get segment usage statistics
431 * @sufile: inode of segment usage file
432 * @stat: pointer to a structure of segment usage statistics
433 *
434 * Description: nilfs_sufile_get_stat() returns information about segment
435 * usage.
436 *
437 * Return Value: On success, 0 is returned, and segment usage information is
438 * stored in the place pointed by @stat. On error, one of the following
439 * negative error codes is returned.
440 *
441 * %-EIO - I/O error.
442 *
443 * %-ENOMEM - Insufficient amount of memory available.
444 */
445int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
446{
447 struct buffer_head *header_bh;
448 struct nilfs_sufile_header *header;
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -0700449 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
Koji Sato6c98cd42009-04-06 19:01:32 -0700450 void *kaddr;
451 int ret;
452
453 down_read(&NILFS_MDT(sufile)->mi_sem);
454
455 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
456 if (ret < 0)
457 goto out_sem;
458
459 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
460 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
461 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
462 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
463 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
Ryusuke Konishi2c2e52f2009-04-06 19:01:54 -0700464 sustat->ss_ctime = nilfs->ns_ctime;
465 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
466 spin_lock(&nilfs->ns_last_segment_lock);
467 sustat->ss_prot_seq = nilfs->ns_prot_seq;
468 spin_unlock(&nilfs->ns_last_segment_lock);
Koji Sato6c98cd42009-04-06 19:01:32 -0700469 kunmap_atomic(kaddr, KM_USER0);
470 brelse(header_bh);
471
472 out_sem:
473 up_read(&NILFS_MDT(sufile)->mi_sem);
474 return ret;
475}
476
477/**
478 * nilfs_sufile_get_ncleansegs - get the number of clean segments
479 * @sufile: inode of segment usage file
480 * @nsegsp: pointer to the number of clean segments
481 *
482 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
483 * segments.
484 *
485 * Return Value: On success, 0 is returned and the number of clean segments is
486 * stored in the place pointed by @nsegsp. On error, one of the following
487 * negative error codes is returned.
488 *
489 * %-EIO - I/O error.
490 *
491 * %-ENOMEM - Insufficient amount of memory available.
492 */
493int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
494{
495 struct nilfs_sustat sustat;
496 int ret;
497
498 ret = nilfs_sufile_get_stat(sufile, &sustat);
499 if (ret == 0)
500 *nsegsp = sustat.ss_ncleansegs;
501 return ret;
502}
503
504/**
505 * nilfs_sufile_set_error - mark a segment as erroneous
506 * @sufile: inode of segment usage file
507 * @segnum: segment number
508 *
509 * Description: nilfs_sufile_set_error() marks the segment specified by
510 * @segnum as erroneous. The error segment will never be used again.
511 *
512 * Return Value: On success, 0 is returned. On error, one of the following
513 * negative error codes is returned.
514 *
515 * %-EIO - I/O error.
516 *
517 * %-ENOMEM - Insufficient amount of memory available.
518 */
519int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
520{
521 struct buffer_head *header_bh, *su_bh;
522 struct nilfs_segment_usage *su;
523 struct nilfs_sufile_header *header;
524 void *kaddr;
525 int ret;
526
527 BUG_ON(segnum >= nilfs_sufile_get_nsegments(sufile));
528
529 down_write(&NILFS_MDT(sufile)->mi_sem);
530
531 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
532 if (ret < 0)
533 goto out_sem;
534 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
535 if (ret < 0)
536 goto out_header;
537
538 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
539 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
540 if (nilfs_segment_usage_error(su)) {
541 kunmap_atomic(kaddr, KM_USER0);
542 brelse(su_bh);
543 goto out_header;
544 }
545
546 nilfs_segment_usage_set_error(su);
547 kunmap_atomic(kaddr, KM_USER0);
548 brelse(su_bh);
549
550 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
551 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
552 le64_add_cpu(&header->sh_ndirtysegs, -1);
553 kunmap_atomic(kaddr, KM_USER0);
554 nilfs_mdt_mark_buffer_dirty(header_bh);
555 nilfs_mdt_mark_buffer_dirty(su_bh);
556 nilfs_mdt_mark_dirty(sufile);
557 brelse(su_bh);
558
559 out_header:
560 brelse(header_bh);
561
562 out_sem:
563 up_write(&NILFS_MDT(sufile)->mi_sem);
564 return ret;
565}
566
567/**
568 * nilfs_sufile_get_suinfo -
569 * @sufile: inode of segment usage file
570 * @segnum: segment number to start looking
571 * @si: array of suinfo
572 * @nsi: size of suinfo array
573 *
574 * Description:
575 *
576 * Return Value: On success, 0 is returned and .... On error, one of the
577 * following negative error codes is returned.
578 *
579 * %-EIO - I/O error.
580 *
581 * %-ENOMEM - Insufficient amount of memory available.
582 */
583ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
584 struct nilfs_suinfo *si, size_t nsi)
585{
586 struct buffer_head *su_bh;
587 struct nilfs_segment_usage *su;
588 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
589 void *kaddr;
590 unsigned long nsegs, segusages_per_block;
591 ssize_t n;
592 int ret, i, j;
593
594 down_read(&NILFS_MDT(sufile)->mi_sem);
595
596 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
597 nsegs = min_t(unsigned long,
598 nilfs_sufile_get_nsegments(sufile) - segnum,
599 nsi);
600 for (i = 0; i < nsegs; i += n, segnum += n) {
601 n = min_t(unsigned long,
602 segusages_per_block -
603 nilfs_sufile_get_offset(sufile, segnum),
604 nsegs - i);
605 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
606 &su_bh);
607 if (ret < 0) {
608 if (ret != -ENOENT)
609 goto out;
610 /* hole */
611 memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n);
612 continue;
613 }
614
615 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
616 su = nilfs_sufile_block_get_segment_usage(
617 sufile, segnum, su_bh, kaddr);
618 for (j = 0; j < n; j++, su = (void *)su + susz) {
619 si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod);
620 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
621 si[i + j].sui_flags = le32_to_cpu(su->su_flags);
622 }
623 kunmap_atomic(kaddr, KM_USER0);
624 brelse(su_bh);
625 }
626 ret = nsegs;
627
628 out:
629 up_read(&NILFS_MDT(sufile)->mi_sem);
630 return ret;
631}