blob: 7dbcfd7f65e611e065a9b7e06e6d566582f8814c [file] [log] [blame]
Jan Kara9e33d692008-08-25 19:56:50 +02001/*
2 * Implementation of operations over global quota file
3 */
Mark Fasheh171bf932008-10-20 15:36:47 +02004#include <linux/spinlock.h>
Jan Kara9e33d692008-08-25 19:56:50 +02005#include <linux/fs.h>
6#include <linux/quota.h>
7#include <linux/quotaops.h>
8#include <linux/dqblk_qtree.h>
Mark Fasheh171bf932008-10-20 15:36:47 +02009#include <linux/jiffies.h>
10#include <linux/writeback.h>
11#include <linux/workqueue.h>
Jan Kara9e33d692008-08-25 19:56:50 +020012
13#define MLOG_MASK_PREFIX ML_QUOTA
14#include <cluster/masklog.h>
15
16#include "ocfs2_fs.h"
17#include "ocfs2.h"
18#include "alloc.h"
19#include "inode.h"
20#include "journal.h"
21#include "file.h"
22#include "sysfile.h"
23#include "dlmglue.h"
24#include "uptodate.h"
25#include "quota.h"
26
Mark Fasheh171bf932008-10-20 15:36:47 +020027static struct workqueue_struct *ocfs2_quota_wq = NULL;
28
29static void qsync_work_fn(struct work_struct *work);
30
Jan Kara9e33d692008-08-25 19:56:50 +020031static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
32{
33 struct ocfs2_global_disk_dqblk *d = dp;
34 struct mem_dqblk *m = &dquot->dq_dqb;
35
36 /* Update from disk only entries not set by the admin */
37 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
38 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
39 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
40 }
41 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
42 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
43 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
44 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
45 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
46 }
47 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
48 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
49 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
50 m->dqb_btime = le64_to_cpu(d->dqb_btime);
51 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
52 m->dqb_itime = le64_to_cpu(d->dqb_itime);
53 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
54}
55
56static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
57{
58 struct ocfs2_global_disk_dqblk *d = dp;
59 struct mem_dqblk *m = &dquot->dq_dqb;
60
61 d->dqb_id = cpu_to_le32(dquot->dq_id);
62 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
63 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
64 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
65 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
66 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
67 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
68 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
69 d->dqb_btime = cpu_to_le64(m->dqb_btime);
70 d->dqb_itime = cpu_to_le64(m->dqb_itime);
71}
72
73static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
74{
75 struct ocfs2_global_disk_dqblk *d = dp;
76 struct ocfs2_mem_dqinfo *oinfo =
77 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
78
79 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
80 return 0;
81 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
82}
83
84struct qtree_fmt_operations ocfs2_global_ops = {
85 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
86 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
87 .is_id = ocfs2_global_is_id,
88};
89
Joel Becker684ef272008-12-02 17:44:05 -080090static int ocfs2_validate_quota_block(struct super_block *sb,
91 struct buffer_head *bh)
92{
93 struct ocfs2_disk_dqtrailer *dqt = ocfs2_dq_trailer(sb, bh->b_data);
94
95 mlog(0, "Validating quota block %llu\n",
96 (unsigned long long)bh->b_blocknr);
97
98 return 0;
99}
100
Joel Becker85eb8b72008-11-25 15:31:27 +0100101int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
102 struct buffer_head **bh)
Jan Kara9e33d692008-08-25 19:56:50 +0200103{
Joel Becker85eb8b72008-11-25 15:31:27 +0100104 int rc = 0;
105 struct buffer_head *tmp = *bh;
Jan Kara9e33d692008-08-25 19:56:50 +0200106
Joel Becker684ef272008-12-02 17:44:05 -0800107 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
108 ocfs2_validate_quota_block);
Joel Becker85eb8b72008-11-25 15:31:27 +0100109 if (rc)
110 mlog_errno(rc);
Jan Kara9e33d692008-08-25 19:56:50 +0200111
Joel Becker85eb8b72008-11-25 15:31:27 +0100112 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
113 if (!rc && !*bh)
114 *bh = tmp;
115
116 return rc;
Jan Kara9e33d692008-08-25 19:56:50 +0200117}
118
Jan Kara53a36042008-11-25 15:31:29 +0100119static int ocfs2_get_quota_block(struct inode *inode, int block,
120 struct buffer_head **bh)
Jan Kara9e33d692008-08-25 19:56:50 +0200121{
122 u64 pblock, pcount;
Jan Kara53a36042008-11-25 15:31:29 +0100123 int err;
Jan Kara9e33d692008-08-25 19:56:50 +0200124
125 down_read(&OCFS2_I(inode)->ip_alloc_sem);
Jan Kara53a36042008-11-25 15:31:29 +0100126 err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
Jan Kara9e33d692008-08-25 19:56:50 +0200127 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Jan Kara53a36042008-11-25 15:31:29 +0100128 if (err) {
129 mlog_errno(err);
130 return err;
Jan Kara9e33d692008-08-25 19:56:50 +0200131 }
Jan Kara53a36042008-11-25 15:31:29 +0100132 *bh = sb_getblk(inode->i_sb, pblock);
133 if (!*bh) {
134 err = -EIO;
135 mlog_errno(err);
Jan Kara9e33d692008-08-25 19:56:50 +0200136 }
Jan Kara53a36042008-11-25 15:31:29 +0100137 return err;;
Jan Kara9e33d692008-08-25 19:56:50 +0200138}
139
140/* Read data from global quotafile - avoid pagecache and such because we cannot
141 * afford acquiring the locks... We use quota cluster lock to serialize
142 * operations. Caller is responsible for acquiring it. */
143ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
144 size_t len, loff_t off)
145{
146 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
147 struct inode *gqinode = oinfo->dqi_gqinode;
148 loff_t i_size = i_size_read(gqinode);
149 int offset = off & (sb->s_blocksize - 1);
150 sector_t blk = off >> sb->s_blocksize_bits;
151 int err = 0;
152 struct buffer_head *bh;
153 size_t toread, tocopy;
154
155 if (off > i_size)
156 return 0;
157 if (off + len > i_size)
158 len = i_size - off;
159 toread = len;
160 while (toread > 0) {
161 tocopy = min((size_t)(sb->s_blocksize - offset), toread);
Joel Becker85eb8b72008-11-25 15:31:27 +0100162 bh = NULL;
163 err = ocfs2_read_quota_block(gqinode, blk, &bh);
164 if (err) {
Jan Kara9e33d692008-08-25 19:56:50 +0200165 mlog_errno(err);
166 return err;
167 }
168 memcpy(data, bh->b_data + offset, tocopy);
169 brelse(bh);
170 offset = 0;
171 toread -= tocopy;
172 data += tocopy;
173 blk++;
174 }
175 return len;
176}
177
178/* Write to quotafile (we know the transaction is already started and has
179 * enough credits) */
180ssize_t ocfs2_quota_write(struct super_block *sb, int type,
181 const char *data, size_t len, loff_t off)
182{
183 struct mem_dqinfo *info = sb_dqinfo(sb, type);
184 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
185 struct inode *gqinode = oinfo->dqi_gqinode;
186 int offset = off & (sb->s_blocksize - 1);
187 sector_t blk = off >> sb->s_blocksize_bits;
Jan Karaaf09e512008-11-25 15:31:28 +0100188 int err = 0, new = 0, ja_type;
Joel Becker85eb8b72008-11-25 15:31:27 +0100189 struct buffer_head *bh = NULL;
Jan Kara9e33d692008-08-25 19:56:50 +0200190 handle_t *handle = journal_current_handle();
191
192 if (!handle) {
193 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
194 "because transaction was not started.\n",
195 (unsigned long long)off, (unsigned long long)len);
196 return -EIO;
197 }
198 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
199 WARN_ON(1);
200 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
201 }
202
203 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
204 if (gqinode->i_size < off + len) {
205 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
206 err = ocfs2_extend_no_holes(gqinode, off + len, off);
207 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
208 if (err < 0)
209 goto out;
210 err = ocfs2_simple_size_update(gqinode,
211 oinfo->dqi_gqi_bh,
212 off + len);
213 if (err < 0)
214 goto out;
215 new = 1;
216 }
217 /* Not rewriting whole block? */
218 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
219 !new) {
Joel Becker85eb8b72008-11-25 15:31:27 +0100220 err = ocfs2_read_quota_block(gqinode, blk, &bh);
Jan Karaaf09e512008-11-25 15:31:28 +0100221 ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
Jan Kara9e33d692008-08-25 19:56:50 +0200222 } else {
Jan Kara53a36042008-11-25 15:31:29 +0100223 err = ocfs2_get_quota_block(gqinode, blk, &bh);
Jan Karaaf09e512008-11-25 15:31:28 +0100224 ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
Jan Kara9e33d692008-08-25 19:56:50 +0200225 }
Jan Karaaf09e512008-11-25 15:31:28 +0100226 if (err) {
227 mlog_errno(err);
228 return err;
Jan Kara9e33d692008-08-25 19:56:50 +0200229 }
230 lock_buffer(bh);
231 if (new)
232 memset(bh->b_data, 0, sb->s_blocksize);
233 memcpy(bh->b_data + offset, data, len);
234 flush_dcache_page(bh->b_page);
Jan Karaaf09e512008-11-25 15:31:28 +0100235 set_buffer_uptodate(bh);
Jan Kara9e33d692008-08-25 19:56:50 +0200236 unlock_buffer(bh);
237 ocfs2_set_buffer_uptodate(gqinode, bh);
Jan Karaaf09e512008-11-25 15:31:28 +0100238 err = ocfs2_journal_access(handle, gqinode, bh, ja_type);
239 if (err < 0) {
240 brelse(bh);
241 goto out;
242 }
Jan Kara9e33d692008-08-25 19:56:50 +0200243 err = ocfs2_journal_dirty(handle, bh);
244 brelse(bh);
245 if (err < 0)
246 goto out;
247out:
248 if (err) {
249 mutex_unlock(&gqinode->i_mutex);
250 mlog_errno(err);
251 return err;
252 }
253 gqinode->i_version++;
254 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
255 mutex_unlock(&gqinode->i_mutex);
256 return len;
257}
258
259int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
260{
261 int status;
262 struct buffer_head *bh = NULL;
263
264 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
265 if (status < 0)
266 return status;
267 spin_lock(&dq_data_lock);
268 if (!oinfo->dqi_gqi_count++)
269 oinfo->dqi_gqi_bh = bh;
270 else
271 WARN_ON(bh != oinfo->dqi_gqi_bh);
272 spin_unlock(&dq_data_lock);
273 return 0;
274}
275
276void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
277{
278 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
279 brelse(oinfo->dqi_gqi_bh);
280 spin_lock(&dq_data_lock);
281 if (!--oinfo->dqi_gqi_count)
282 oinfo->dqi_gqi_bh = NULL;
283 spin_unlock(&dq_data_lock);
284}
285
286/* Read information header from global quota file */
287int ocfs2_global_read_info(struct super_block *sb, int type)
288{
289 struct inode *gqinode = NULL;
290 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
291 GROUP_QUOTA_SYSTEM_INODE };
292 struct ocfs2_global_disk_dqinfo dinfo;
293 struct mem_dqinfo *info = sb_dqinfo(sb, type);
294 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
295 int status;
296
297 mlog_entry_void();
298
299 /* Read global header */
300 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
301 OCFS2_INVALID_SLOT);
302 if (!gqinode) {
303 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
304 type);
305 status = -EINVAL;
306 goto out_err;
307 }
308 oinfo->dqi_gi.dqi_sb = sb;
309 oinfo->dqi_gi.dqi_type = type;
310 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
311 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
312 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
313 oinfo->dqi_gqi_bh = NULL;
314 oinfo->dqi_gqi_count = 0;
315 oinfo->dqi_gqinode = gqinode;
316 status = ocfs2_lock_global_qf(oinfo, 0);
317 if (status < 0) {
318 mlog_errno(status);
319 goto out_err;
320 }
321 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
322 sizeof(struct ocfs2_global_disk_dqinfo),
323 OCFS2_GLOBAL_INFO_OFF);
324 ocfs2_unlock_global_qf(oinfo, 0);
325 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
326 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
327 status);
328 if (status >= 0)
329 status = -EIO;
330 mlog_errno(status);
331 goto out_err;
332 }
333 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
334 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
335 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
Mark Fasheh171bf932008-10-20 15:36:47 +0200336 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
Jan Kara9e33d692008-08-25 19:56:50 +0200337 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
338 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
339 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
340 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
341 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
342 OCFS2_QBLK_RESERVED_SPACE;
343 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
Mark Fasheh171bf932008-10-20 15:36:47 +0200344 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
345 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
346 oinfo->dqi_syncjiff);
347
Jan Kara9e33d692008-08-25 19:56:50 +0200348out_err:
349 mlog_exit(status);
350 return status;
351}
352
353/* Write information to global quota file. Expects exlusive lock on quota
354 * file inode and quota info */
355static int __ocfs2_global_write_info(struct super_block *sb, int type)
356{
357 struct mem_dqinfo *info = sb_dqinfo(sb, type);
358 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
359 struct ocfs2_global_disk_dqinfo dinfo;
360 ssize_t size;
361
362 spin_lock(&dq_data_lock);
363 info->dqi_flags &= ~DQF_INFO_DIRTY;
364 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
365 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
366 spin_unlock(&dq_data_lock);
367 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
368 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
369 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
370 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
371 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
372 sizeof(struct ocfs2_global_disk_dqinfo),
373 OCFS2_GLOBAL_INFO_OFF);
374 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
375 mlog(ML_ERROR, "Cannot write global quota info structure\n");
376 if (size >= 0)
377 size = -EIO;
378 return size;
379 }
380 return 0;
381}
382
383int ocfs2_global_write_info(struct super_block *sb, int type)
384{
385 int err;
386 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
387
388 err = ocfs2_qinfo_lock(info, 1);
389 if (err < 0)
390 return err;
391 err = __ocfs2_global_write_info(sb, type);
392 ocfs2_qinfo_unlock(info, 1);
393 return err;
394}
395
396/* Read in information from global quota file and acquire a reference to it.
397 * dquot_acquire() has already started the transaction and locked quota file */
398int ocfs2_global_read_dquot(struct dquot *dquot)
399{
400 int err, err2, ex = 0;
401 struct ocfs2_mem_dqinfo *info =
402 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
403
404 err = ocfs2_qinfo_lock(info, 0);
405 if (err < 0)
406 goto out;
407 err = qtree_read_dquot(&info->dqi_gi, dquot);
408 if (err < 0)
409 goto out_qlock;
410 OCFS2_DQUOT(dquot)->dq_use_count++;
411 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
412 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
413 if (!dquot->dq_off) { /* No real quota entry? */
414 /* Upgrade to exclusive lock for allocation */
415 err = ocfs2_qinfo_lock(info, 1);
416 if (err < 0)
417 goto out_qlock;
418 ex = 1;
419 }
420 err = qtree_write_dquot(&info->dqi_gi, dquot);
421 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
422 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
423 if (!err)
424 err = err2;
425 }
426out_qlock:
427 if (ex)
428 ocfs2_qinfo_unlock(info, 1);
429 ocfs2_qinfo_unlock(info, 0);
430out:
431 if (err < 0)
432 mlog_errno(err);
433 return err;
434}
435
436/* Sync local information about quota modifications with global quota file.
437 * Caller must have started the transaction and obtained exclusive lock for
438 * global quota file inode */
439int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
440{
441 int err, err2;
442 struct super_block *sb = dquot->dq_sb;
443 int type = dquot->dq_type;
444 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
445 struct ocfs2_global_disk_dqblk dqblk;
446 s64 spacechange, inodechange;
447 time_t olditime, oldbtime;
448
449 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
450 sizeof(struct ocfs2_global_disk_dqblk),
451 dquot->dq_off);
452 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
453 if (err >= 0) {
454 mlog(ML_ERROR, "Short read from global quota file "
455 "(%u read)\n", err);
456 err = -EIO;
457 }
458 goto out;
459 }
460
461 /* Update space and inode usage. Get also other information from
462 * global quota file so that we don't overwrite any changes there.
463 * We are */
464 spin_lock(&dq_data_lock);
465 spacechange = dquot->dq_dqb.dqb_curspace -
466 OCFS2_DQUOT(dquot)->dq_origspace;
467 inodechange = dquot->dq_dqb.dqb_curinodes -
468 OCFS2_DQUOT(dquot)->dq_originodes;
469 olditime = dquot->dq_dqb.dqb_itime;
470 oldbtime = dquot->dq_dqb.dqb_btime;
471 ocfs2_global_disk2memdqb(dquot, &dqblk);
Jan Kara9a2f3862008-11-25 15:31:30 +0100472 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
473 dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
474 dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
Jan Kara9e33d692008-08-25 19:56:50 +0200475 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
476 dquot->dq_dqb.dqb_curspace += spacechange;
477 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
478 dquot->dq_dqb.dqb_curinodes += inodechange;
479 /* Set properly space grace time... */
480 if (dquot->dq_dqb.dqb_bsoftlimit &&
481 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
482 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
483 oldbtime > 0) {
484 if (dquot->dq_dqb.dqb_btime > 0)
485 dquot->dq_dqb.dqb_btime =
486 min(dquot->dq_dqb.dqb_btime, oldbtime);
487 else
488 dquot->dq_dqb.dqb_btime = oldbtime;
489 }
490 } else {
491 dquot->dq_dqb.dqb_btime = 0;
492 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
493 }
494 /* Set properly inode grace time... */
495 if (dquot->dq_dqb.dqb_isoftlimit &&
496 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
497 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
498 olditime > 0) {
499 if (dquot->dq_dqb.dqb_itime > 0)
500 dquot->dq_dqb.dqb_itime =
501 min(dquot->dq_dqb.dqb_itime, olditime);
502 else
503 dquot->dq_dqb.dqb_itime = olditime;
504 }
505 } else {
506 dquot->dq_dqb.dqb_itime = 0;
507 clear_bit(DQ_INODES_B, &dquot->dq_flags);
508 }
509 /* All information is properly updated, clear the flags */
510 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
511 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
512 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
513 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
514 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
515 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
516 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
517 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
518 spin_unlock(&dq_data_lock);
519 err = ocfs2_qinfo_lock(info, freeing);
520 if (err < 0) {
521 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
522 " (type=%d, id=%u)\n", dquot->dq_type,
523 (unsigned)dquot->dq_id);
524 goto out;
525 }
526 if (freeing)
527 OCFS2_DQUOT(dquot)->dq_use_count--;
528 err = qtree_write_dquot(&info->dqi_gi, dquot);
529 if (err < 0)
530 goto out_qlock;
531 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
532 err = qtree_release_dquot(&info->dqi_gi, dquot);
533 if (info_dirty(sb_dqinfo(sb, type))) {
534 err2 = __ocfs2_global_write_info(sb, type);
535 if (!err)
536 err = err2;
537 }
538 }
539out_qlock:
540 ocfs2_qinfo_unlock(info, freeing);
541out:
542 if (err < 0)
543 mlog_errno(err);
544 return err;
545}
546
547/*
Mark Fasheh171bf932008-10-20 15:36:47 +0200548 * Functions for periodic syncing of dquots with global file
549 */
550static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
551{
552 handle_t *handle;
553 struct super_block *sb = dquot->dq_sb;
554 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
555 struct ocfs2_super *osb = OCFS2_SB(sb);
556 int status = 0;
557
558 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
559 dquot->dq_type, type, sb->s_id);
560 if (type != dquot->dq_type)
561 goto out;
562 status = ocfs2_lock_global_qf(oinfo, 1);
563 if (status < 0)
564 goto out;
565
566 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
567 if (IS_ERR(handle)) {
568 status = PTR_ERR(handle);
569 mlog_errno(status);
570 goto out_ilock;
571 }
572 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
573 status = ocfs2_sync_dquot(dquot);
574 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
575 if (status < 0)
576 mlog_errno(status);
577 /* We have to write local structure as well... */
578 dquot_mark_dquot_dirty(dquot);
579 status = dquot_commit(dquot);
580 if (status < 0)
581 mlog_errno(status);
582 ocfs2_commit_trans(osb, handle);
583out_ilock:
584 ocfs2_unlock_global_qf(oinfo, 1);
585out:
586 mlog_exit(status);
587 return status;
588}
589
590static void qsync_work_fn(struct work_struct *work)
591{
592 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
593 struct ocfs2_mem_dqinfo,
594 dqi_sync_work.work);
595 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
596
597 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
598 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
599 oinfo->dqi_syncjiff);
600}
601
602/*
Jan Kara9e33d692008-08-25 19:56:50 +0200603 * Wrappers for generic quota functions
604 */
605
606static int ocfs2_write_dquot(struct dquot *dquot)
607{
608 handle_t *handle;
609 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
610 int status = 0;
611
612 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
613
614 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
615 if (IS_ERR(handle)) {
616 status = PTR_ERR(handle);
617 mlog_errno(status);
618 goto out;
619 }
620 status = dquot_commit(dquot);
621 ocfs2_commit_trans(osb, handle);
622out:
623 mlog_exit(status);
624 return status;
625}
626
627int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
628{
629 struct ocfs2_mem_dqinfo *oinfo;
630 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
631 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
632
633 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
634 return 0;
635
636 oinfo = sb_dqinfo(sb, type)->dqi_priv;
637 /* We modify tree, leaf block, global info, local chunk header,
638 * global and local inode */
639 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
640 2 * OCFS2_INODE_UPDATE_CREDITS;
641}
642
643static int ocfs2_release_dquot(struct dquot *dquot)
644{
645 handle_t *handle;
646 struct ocfs2_mem_dqinfo *oinfo =
647 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
648 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
649 int status = 0;
650
651 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
652
653 status = ocfs2_lock_global_qf(oinfo, 1);
654 if (status < 0)
655 goto out;
656 handle = ocfs2_start_trans(osb,
657 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
658 if (IS_ERR(handle)) {
659 status = PTR_ERR(handle);
660 mlog_errno(status);
661 goto out_ilock;
662 }
663 status = dquot_release(dquot);
664 ocfs2_commit_trans(osb, handle);
665out_ilock:
666 ocfs2_unlock_global_qf(oinfo, 1);
667out:
668 mlog_exit(status);
669 return status;
670}
671
672int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
673{
674 struct ocfs2_mem_dqinfo *oinfo;
675 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
676 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
677 struct ocfs2_dinode *lfe, *gfe;
678
679 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
680 return 0;
681
682 oinfo = sb_dqinfo(sb, type)->dqi_priv;
683 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
684 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
685 /* We can extend local file + global file. In local file we
686 * can modify info, chunk header block and dquot block. In
687 * global file we can modify info, tree and leaf block */
688 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
689 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
690 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
691}
692
693static int ocfs2_acquire_dquot(struct dquot *dquot)
694{
695 handle_t *handle;
696 struct ocfs2_mem_dqinfo *oinfo =
697 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
698 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
699 int status = 0;
700
701 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
702 /* We need an exclusive lock, because we're going to update use count
703 * and instantiate possibly new dquot structure */
704 status = ocfs2_lock_global_qf(oinfo, 1);
705 if (status < 0)
706 goto out;
707 handle = ocfs2_start_trans(osb,
708 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
709 if (IS_ERR(handle)) {
710 status = PTR_ERR(handle);
711 mlog_errno(status);
712 goto out_ilock;
713 }
714 status = dquot_acquire(dquot);
715 ocfs2_commit_trans(osb, handle);
716out_ilock:
717 ocfs2_unlock_global_qf(oinfo, 1);
718out:
719 mlog_exit(status);
720 return status;
721}
722
723static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
724{
725 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
726 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
727 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
728 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
729 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
730 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
731 int sync = 0;
732 int status;
733 struct super_block *sb = dquot->dq_sb;
734 int type = dquot->dq_type;
735 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
736 handle_t *handle;
737 struct ocfs2_super *osb = OCFS2_SB(sb);
738
739 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
740 dquot_mark_dquot_dirty(dquot);
741
742 /* In case user set some limits, sync dquot immediately to global
743 * quota file so that information propagates quicker */
744 spin_lock(&dq_data_lock);
745 if (dquot->dq_flags & mask)
746 sync = 1;
747 spin_unlock(&dq_data_lock);
748 if (!sync) {
749 status = ocfs2_write_dquot(dquot);
750 goto out;
751 }
752 status = ocfs2_lock_global_qf(oinfo, 1);
753 if (status < 0)
754 goto out;
755 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
756 if (IS_ERR(handle)) {
757 status = PTR_ERR(handle);
758 mlog_errno(status);
759 goto out_ilock;
760 }
761 status = ocfs2_sync_dquot(dquot);
762 if (status < 0) {
763 mlog_errno(status);
764 goto out_trans;
765 }
766 /* Now write updated local dquot structure */
767 status = dquot_commit(dquot);
768out_trans:
769 ocfs2_commit_trans(osb, handle);
770out_ilock:
771 ocfs2_unlock_global_qf(oinfo, 1);
772out:
773 mlog_exit(status);
774 return status;
775}
776
777/* This should happen only after set_dqinfo(). */
778static int ocfs2_write_info(struct super_block *sb, int type)
779{
780 handle_t *handle;
781 int status = 0;
782 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
783
784 mlog_entry_void();
785
786 status = ocfs2_lock_global_qf(oinfo, 1);
787 if (status < 0)
788 goto out;
789 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
790 if (IS_ERR(handle)) {
791 status = PTR_ERR(handle);
792 mlog_errno(status);
793 goto out_ilock;
794 }
795 status = dquot_commit_info(sb, type);
796 ocfs2_commit_trans(OCFS2_SB(sb), handle);
797out_ilock:
798 ocfs2_unlock_global_qf(oinfo, 1);
799out:
800 mlog_exit(status);
801 return status;
802}
803
804/* This is difficult. We have to lock quota inode and start transaction
805 * in this function but we don't want to take the penalty of exlusive
806 * quota file lock when we are just going to use cached structures. So
807 * we just take read lock check whether we have dquot cached and if so,
808 * we don't have to take the write lock... */
809static int ocfs2_dquot_initialize(struct inode *inode, int type)
810{
811 handle_t *handle = NULL;
812 int status = 0;
813 struct super_block *sb = inode->i_sb;
814 struct ocfs2_mem_dqinfo *oinfo;
815 int exclusive = 0;
816 int cnt;
817 qid_t id;
818
819 mlog_entry_void();
820
821 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
822 if (type != -1 && cnt != type)
823 continue;
824 if (!sb_has_quota_active(sb, cnt))
825 continue;
826 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
827 status = ocfs2_lock_global_qf(oinfo, 0);
828 if (status < 0)
829 goto out;
830 /* This is just a performance optimization not a reliable test.
831 * Since we hold an inode lock, noone can actually release
832 * the structure until we are finished with initialization. */
833 if (inode->i_dquot[cnt] != NODQUOT) {
834 ocfs2_unlock_global_qf(oinfo, 0);
835 continue;
836 }
837 /* When we have inode lock, we know that no dquot_release() can
838 * run and thus we can safely check whether we need to
839 * read+modify global file to get quota information or whether
840 * our node already has it. */
841 if (cnt == USRQUOTA)
842 id = inode->i_uid;
843 else if (cnt == GRPQUOTA)
844 id = inode->i_gid;
845 else
846 BUG();
847 /* Obtain exclusion from quota off... */
848 down_write(&sb_dqopt(sb)->dqptr_sem);
849 exclusive = !dquot_is_cached(sb, id, cnt);
850 up_write(&sb_dqopt(sb)->dqptr_sem);
851 if (exclusive) {
852 status = ocfs2_lock_global_qf(oinfo, 1);
853 if (status < 0) {
854 exclusive = 0;
855 mlog_errno(status);
856 goto out_ilock;
857 }
858 handle = ocfs2_start_trans(OCFS2_SB(sb),
859 ocfs2_calc_qinit_credits(sb, cnt));
860 if (IS_ERR(handle)) {
861 status = PTR_ERR(handle);
862 mlog_errno(status);
863 goto out_ilock;
864 }
865 }
866 dquot_initialize(inode, cnt);
867 if (exclusive) {
868 ocfs2_commit_trans(OCFS2_SB(sb), handle);
869 ocfs2_unlock_global_qf(oinfo, 1);
870 }
871 ocfs2_unlock_global_qf(oinfo, 0);
872 }
873 mlog_exit(0);
874 return 0;
875out_ilock:
876 if (exclusive)
877 ocfs2_unlock_global_qf(oinfo, 1);
878 ocfs2_unlock_global_qf(oinfo, 0);
879out:
880 mlog_exit(status);
881 return status;
882}
883
884static int ocfs2_dquot_drop_slow(struct inode *inode)
885{
Jan Kara57a09a72008-11-25 15:31:26 +0100886 int status = 0;
Jan Kara9e33d692008-08-25 19:56:50 +0200887 int cnt;
888 int got_lock[MAXQUOTAS] = {0, 0};
889 handle_t *handle;
890 struct super_block *sb = inode->i_sb;
891 struct ocfs2_mem_dqinfo *oinfo;
892
893 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
894 if (!sb_has_quota_active(sb, cnt))
895 continue;
896 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
897 status = ocfs2_lock_global_qf(oinfo, 1);
898 if (status < 0)
899 goto out;
900 got_lock[cnt] = 1;
901 }
902 handle = ocfs2_start_trans(OCFS2_SB(sb),
903 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
904 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
905 if (IS_ERR(handle)) {
906 status = PTR_ERR(handle);
907 mlog_errno(status);
Tao Mae35ff982008-11-26 16:20:19 -0800908 goto out;
Jan Kara9e33d692008-08-25 19:56:50 +0200909 }
910 dquot_drop(inode);
911 ocfs2_commit_trans(OCFS2_SB(sb), handle);
912out:
913 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
914 if (got_lock[cnt]) {
915 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
916 ocfs2_unlock_global_qf(oinfo, 1);
917 }
918 return status;
919}
920
921/* See the comment before ocfs2_dquot_initialize. */
922static int ocfs2_dquot_drop(struct inode *inode)
923{
924 int status = 0;
925 struct super_block *sb = inode->i_sb;
926 struct ocfs2_mem_dqinfo *oinfo;
927 int exclusive = 0;
928 int cnt;
929 int got_lock[MAXQUOTAS] = {0, 0};
930
931 mlog_entry_void();
932 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
933 if (!sb_has_quota_active(sb, cnt))
934 continue;
935 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
936 status = ocfs2_lock_global_qf(oinfo, 0);
937 if (status < 0)
938 goto out;
939 got_lock[cnt] = 1;
940 }
941 /* Lock against anyone releasing references so that when when we check
942 * we know we are not going to be last ones to release dquot */
943 down_write(&sb_dqopt(sb)->dqptr_sem);
944 /* Urgh, this is a terrible hack :( */
945 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
946 if (inode->i_dquot[cnt] != NODQUOT &&
947 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
948 exclusive = 1;
949 break;
950 }
951 }
952 if (!exclusive)
953 dquot_drop_locked(inode);
954 up_write(&sb_dqopt(sb)->dqptr_sem);
955out:
956 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
957 if (got_lock[cnt]) {
958 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
959 ocfs2_unlock_global_qf(oinfo, 0);
960 }
961 /* In case we bailed out because we had to do expensive locking
962 * do it now... */
963 if (exclusive)
964 status = ocfs2_dquot_drop_slow(inode);
965 mlog_exit(status);
966 return status;
967}
968
969static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
970{
971 struct ocfs2_dquot *dquot =
972 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
973
974 if (!dquot)
975 return NULL;
976 return &dquot->dq_dquot;
977}
978
979static void ocfs2_destroy_dquot(struct dquot *dquot)
980{
981 kmem_cache_free(ocfs2_dquot_cachep, dquot);
982}
983
984struct dquot_operations ocfs2_quota_operations = {
985 .initialize = ocfs2_dquot_initialize,
986 .drop = ocfs2_dquot_drop,
987 .alloc_space = dquot_alloc_space,
988 .alloc_inode = dquot_alloc_inode,
989 .free_space = dquot_free_space,
990 .free_inode = dquot_free_inode,
991 .transfer = dquot_transfer,
992 .write_dquot = ocfs2_write_dquot,
993 .acquire_dquot = ocfs2_acquire_dquot,
994 .release_dquot = ocfs2_release_dquot,
995 .mark_dirty = ocfs2_mark_dquot_dirty,
996 .write_info = ocfs2_write_info,
997 .alloc_dquot = ocfs2_alloc_dquot,
998 .destroy_dquot = ocfs2_destroy_dquot,
999};
Mark Fasheh171bf932008-10-20 15:36:47 +02001000
1001int ocfs2_quota_setup(void)
1002{
1003 ocfs2_quota_wq = create_workqueue("o2quot");
1004 if (!ocfs2_quota_wq)
1005 return -ENOMEM;
1006 return 0;
1007}
1008
1009void ocfs2_quota_shutdown(void)
1010{
1011 if (ocfs2_quota_wq) {
1012 flush_workqueue(ocfs2_quota_wq);
1013 destroy_workqueue(ocfs2_quota_wq);
1014 ocfs2_quota_wq = NULL;
1015 }
1016}