blob: 65fe06810172bc06d16d2a79898dc0f6b74aec41 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/ufs/cylinder.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * ext2 - inode (block) bitmap caching inspired
9 */
10
11#include <linux/fs.h>
12#include <linux/ufs_fs.h>
13#include <linux/time.h>
14#include <linux/stat.h>
15#include <linux/string.h>
16#include <linux/bitops.h>
17
18#include <asm/byteorder.h>
19
20#include "swab.h"
21#include "util.h"
22
23#undef UFS_CYLINDER_DEBUG
24
25#ifdef UFS_CYLINDER_DEBUG
26#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
27#else
28#define UFSD(x)
29#endif
30
31
32/*
33 * Read cylinder group into cache. The memory space for ufs_cg_private_info
34 * structure is already allocated during ufs_read_super.
35 */
36static void ufs_read_cylinder (struct super_block * sb,
37 unsigned cgno, unsigned bitmap_nr)
38{
39 struct ufs_sb_info * sbi = UFS_SB(sb);
40 struct ufs_sb_private_info * uspi;
41 struct ufs_cg_private_info * ucpi;
42 struct ufs_cylinder_group * ucg;
43 unsigned i, j;
44
45 UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr))
46 uspi = sbi->s_uspi;
47 ucpi = sbi->s_ucpi[bitmap_nr];
48 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
49
Evgeniy Dushistov9695ef12006-06-25 05:47:22 -070050 UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
51 UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 /*
53 * We have already the first fragment of cylinder group block in buffer
54 */
Evgeniy Dushistov9695ef12006-06-25 05:47:22 -070055 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
56 for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
57 if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 goto failed;
59 sbi->s_cgno[bitmap_nr] = cgno;
60
61 ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
62 ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
63 ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk);
64 ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk);
65 ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor);
66 ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor);
67 ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor);
68 ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff);
69 ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff);
70 ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
71 ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff);
72 ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
73 ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
74 ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
75 ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
76 UFSD(("EXIT\n"))
77 return;
78
79failed:
80 for (j = 1; j < i; j++)
81 brelse (sbi->s_ucg[j]);
82 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
83 ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
84}
85
86/*
87 * Remove cylinder group from cache, doesn't release memory
88 * allocated for cylinder group (this is done at ufs_put_super only).
89 */
90void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
91{
92 struct ufs_sb_info * sbi = UFS_SB(sb);
93 struct ufs_sb_private_info * uspi;
94 struct ufs_cg_private_info * ucpi;
95 struct ufs_cylinder_group * ucg;
96 unsigned i;
97
98 UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr))
99
100 uspi = sbi->s_uspi;
101 if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
102 UFSD(("EXIT\n"))
103 return;
104 }
105 ucpi = sbi->s_ucpi[bitmap_nr];
Evgeniy Dushistov9695ef12006-06-25 05:47:22 -0700106 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
109 ufs_panic (sb, "ufs_put_cylinder", "internal error");
110 return;
111 }
112 /*
113 * rotor is not so important data, so we put it to disk
114 * at the end of working with cylinder
115 */
116 ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
117 ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
118 ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
Evgeniy Dushistov9695ef12006-06-25 05:47:22 -0700119 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
120 for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
121 brelse (UCPI_UBH(ucpi)->bh[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 }
123
124 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
125 UFSD(("EXIT\n"))
126}
127
128/*
129 * Find cylinder group in cache and return it as pointer.
130 * If cylinder group is not in cache, we will load it from disk.
131 *
132 * The cache is managed by LRU algorithm.
133 */
134struct ufs_cg_private_info * ufs_load_cylinder (
135 struct super_block * sb, unsigned cgno)
136{
137 struct ufs_sb_info * sbi = UFS_SB(sb);
138 struct ufs_sb_private_info * uspi;
139 struct ufs_cg_private_info * ucpi;
140 unsigned cg, i, j;
141
142 UFSD(("ENTER, cgno %u\n", cgno))
143
144 uspi = sbi->s_uspi;
145 if (cgno >= uspi->s_ncg) {
146 ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
147 return NULL;
148 }
149 /*
150 * Cylinder group number cg it in cache and it was last used
151 */
152 if (sbi->s_cgno[0] == cgno) {
153 UFSD(("EXIT\n"))
154 return sbi->s_ucpi[0];
155 }
156 /*
157 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
158 */
159 if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
160 if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
161 if (sbi->s_cgno[cgno] != cgno) {
162 ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
163 UFSD(("EXIT (FAILED)\n"))
164 return NULL;
165 }
166 else {
167 UFSD(("EXIT\n"))
168 return sbi->s_ucpi[cgno];
169 }
170 } else {
171 ufs_read_cylinder (sb, cgno, cgno);
172 UFSD(("EXIT\n"))
173 return sbi->s_ucpi[cgno];
174 }
175 }
176 /*
177 * Cylinder group number cg is in cache but it was not last used,
178 * we will move to the first position
179 */
180 for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
181 if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
182 cg = sbi->s_cgno[i];
183 ucpi = sbi->s_ucpi[i];
184 for (j = i; j > 0; j--) {
185 sbi->s_cgno[j] = sbi->s_cgno[j-1];
186 sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
187 }
188 sbi->s_cgno[0] = cg;
189 sbi->s_ucpi[0] = ucpi;
190 /*
191 * Cylinder group number cg is not in cache, we will read it from disk
192 * and put it to the first position
193 */
194 } else {
195 if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
196 sbi->s_cg_loaded++;
197 else
198 ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
199 ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
200 for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
201 sbi->s_cgno[j] = sbi->s_cgno[j-1];
202 sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
203 }
204 sbi->s_ucpi[0] = ucpi;
205 ufs_read_cylinder (sb, cgno, 0);
206 }
207 UFSD(("EXIT\n"))
208 return sbi->s_ucpi[0];
209}