blob: 2bdcddd3f1c4cf0cbc0f727aa6527e5dd5c36024 [file] [log] [blame]
Jan Kara9e33d692008-08-25 19:56:50 +02001/*
2 * Implementation of operations over global quota file
3 */
Mark Fasheh171bf932008-10-20 15:36:47 +02004#include <linux/spinlock.h>
Jan Kara9e33d692008-08-25 19:56:50 +02005#include <linux/fs.h>
6#include <linux/quota.h>
7#include <linux/quotaops.h>
8#include <linux/dqblk_qtree.h>
Mark Fasheh171bf932008-10-20 15:36:47 +02009#include <linux/jiffies.h>
10#include <linux/writeback.h>
11#include <linux/workqueue.h>
Jan Kara9e33d692008-08-25 19:56:50 +020012
13#define MLOG_MASK_PREFIX ML_QUOTA
14#include <cluster/masklog.h>
15
16#include "ocfs2_fs.h"
17#include "ocfs2.h"
18#include "alloc.h"
19#include "inode.h"
20#include "journal.h"
21#include "file.h"
22#include "sysfile.h"
23#include "dlmglue.h"
24#include "uptodate.h"
25#include "quota.h"
26
Mark Fasheh171bf932008-10-20 15:36:47 +020027static struct workqueue_struct *ocfs2_quota_wq = NULL;
28
29static void qsync_work_fn(struct work_struct *work);
30
Jan Kara9e33d692008-08-25 19:56:50 +020031static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
32{
33 struct ocfs2_global_disk_dqblk *d = dp;
34 struct mem_dqblk *m = &dquot->dq_dqb;
35
36 /* Update from disk only entries not set by the admin */
37 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
38 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
39 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
40 }
41 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
42 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
43 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
44 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
45 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
46 }
47 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
48 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
49 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
50 m->dqb_btime = le64_to_cpu(d->dqb_btime);
51 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
52 m->dqb_itime = le64_to_cpu(d->dqb_itime);
53 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
54}
55
56static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
57{
58 struct ocfs2_global_disk_dqblk *d = dp;
59 struct mem_dqblk *m = &dquot->dq_dqb;
60
61 d->dqb_id = cpu_to_le32(dquot->dq_id);
62 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
63 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
64 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
65 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
66 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
67 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
68 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
69 d->dqb_btime = cpu_to_le64(m->dqb_btime);
70 d->dqb_itime = cpu_to_le64(m->dqb_itime);
71}
72
73static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
74{
75 struct ocfs2_global_disk_dqblk *d = dp;
76 struct ocfs2_mem_dqinfo *oinfo =
77 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
78
79 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
80 return 0;
81 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
82}
83
84struct qtree_fmt_operations ocfs2_global_ops = {
85 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
86 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
87 .is_id = ocfs2_global_is_id,
88};
89
Joel Becker85eb8b72008-11-25 15:31:27 +010090int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
91 struct buffer_head **bh)
Jan Kara9e33d692008-08-25 19:56:50 +020092{
Joel Becker85eb8b72008-11-25 15:31:27 +010093 int rc = 0;
94 struct buffer_head *tmp = *bh;
Jan Kara9e33d692008-08-25 19:56:50 +020095
Joel Becker85eb8b72008-11-25 15:31:27 +010096 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, NULL);
97 if (rc)
98 mlog_errno(rc);
Jan Kara9e33d692008-08-25 19:56:50 +020099
Joel Becker85eb8b72008-11-25 15:31:27 +0100100 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
101 if (!rc && !*bh)
102 *bh = tmp;
103
104 return rc;
Jan Kara9e33d692008-08-25 19:56:50 +0200105}
106
107static struct buffer_head *ocfs2_get_quota_block(struct inode *inode,
108 int block, int *err)
109{
110 u64 pblock, pcount;
111 struct buffer_head *bh;
112
113 down_read(&OCFS2_I(inode)->ip_alloc_sem);
114 *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount,
115 NULL);
116 up_read(&OCFS2_I(inode)->ip_alloc_sem);
117 if (*err) {
118 mlog_errno(*err);
119 return NULL;
120 }
121 bh = sb_getblk(inode->i_sb, pblock);
122 if (!bh) {
123 *err = -EIO;
124 mlog_errno(*err);
125 }
126 return bh;
127}
128
129/* Read data from global quotafile - avoid pagecache and such because we cannot
130 * afford acquiring the locks... We use quota cluster lock to serialize
131 * operations. Caller is responsible for acquiring it. */
132ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
133 size_t len, loff_t off)
134{
135 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
136 struct inode *gqinode = oinfo->dqi_gqinode;
137 loff_t i_size = i_size_read(gqinode);
138 int offset = off & (sb->s_blocksize - 1);
139 sector_t blk = off >> sb->s_blocksize_bits;
140 int err = 0;
141 struct buffer_head *bh;
142 size_t toread, tocopy;
143
144 if (off > i_size)
145 return 0;
146 if (off + len > i_size)
147 len = i_size - off;
148 toread = len;
149 while (toread > 0) {
150 tocopy = min((size_t)(sb->s_blocksize - offset), toread);
Joel Becker85eb8b72008-11-25 15:31:27 +0100151 bh = NULL;
152 err = ocfs2_read_quota_block(gqinode, blk, &bh);
153 if (err) {
Jan Kara9e33d692008-08-25 19:56:50 +0200154 mlog_errno(err);
155 return err;
156 }
157 memcpy(data, bh->b_data + offset, tocopy);
158 brelse(bh);
159 offset = 0;
160 toread -= tocopy;
161 data += tocopy;
162 blk++;
163 }
164 return len;
165}
166
167/* Write to quotafile (we know the transaction is already started and has
168 * enough credits) */
169ssize_t ocfs2_quota_write(struct super_block *sb, int type,
170 const char *data, size_t len, loff_t off)
171{
172 struct mem_dqinfo *info = sb_dqinfo(sb, type);
173 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
174 struct inode *gqinode = oinfo->dqi_gqinode;
175 int offset = off & (sb->s_blocksize - 1);
176 sector_t blk = off >> sb->s_blocksize_bits;
177 int err = 0, new = 0;
Joel Becker85eb8b72008-11-25 15:31:27 +0100178 struct buffer_head *bh = NULL;
Jan Kara9e33d692008-08-25 19:56:50 +0200179 handle_t *handle = journal_current_handle();
180
181 if (!handle) {
182 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
183 "because transaction was not started.\n",
184 (unsigned long long)off, (unsigned long long)len);
185 return -EIO;
186 }
187 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
188 WARN_ON(1);
189 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
190 }
191
192 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
193 if (gqinode->i_size < off + len) {
194 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
195 err = ocfs2_extend_no_holes(gqinode, off + len, off);
196 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
197 if (err < 0)
198 goto out;
199 err = ocfs2_simple_size_update(gqinode,
200 oinfo->dqi_gqi_bh,
201 off + len);
202 if (err < 0)
203 goto out;
204 new = 1;
205 }
206 /* Not rewriting whole block? */
207 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
208 !new) {
Joel Becker85eb8b72008-11-25 15:31:27 +0100209 err = ocfs2_read_quota_block(gqinode, blk, &bh);
210 if (err) {
Jan Kara9e33d692008-08-25 19:56:50 +0200211 mlog_errno(err);
212 return err;
213 }
214 err = ocfs2_journal_access(handle, gqinode, bh,
Joel Becker85eb8b72008-11-25 15:31:27 +0100215 OCFS2_JOURNAL_ACCESS_WRITE);
Jan Kara9e33d692008-08-25 19:56:50 +0200216 } else {
217 bh = ocfs2_get_quota_block(gqinode, blk, &err);
218 if (!bh) {
219 mlog_errno(err);
220 return err;
221 }
222 err = ocfs2_journal_access(handle, gqinode, bh,
Joel Becker85eb8b72008-11-25 15:31:27 +0100223 OCFS2_JOURNAL_ACCESS_CREATE);
Jan Kara9e33d692008-08-25 19:56:50 +0200224 }
225 if (err < 0) {
226 brelse(bh);
227 goto out;
228 }
229 lock_buffer(bh);
230 if (new)
231 memset(bh->b_data, 0, sb->s_blocksize);
232 memcpy(bh->b_data + offset, data, len);
233 flush_dcache_page(bh->b_page);
234 unlock_buffer(bh);
235 ocfs2_set_buffer_uptodate(gqinode, bh);
236 err = ocfs2_journal_dirty(handle, bh);
237 brelse(bh);
238 if (err < 0)
239 goto out;
240out:
241 if (err) {
242 mutex_unlock(&gqinode->i_mutex);
243 mlog_errno(err);
244 return err;
245 }
246 gqinode->i_version++;
247 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
248 mutex_unlock(&gqinode->i_mutex);
249 return len;
250}
251
252int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
253{
254 int status;
255 struct buffer_head *bh = NULL;
256
257 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
258 if (status < 0)
259 return status;
260 spin_lock(&dq_data_lock);
261 if (!oinfo->dqi_gqi_count++)
262 oinfo->dqi_gqi_bh = bh;
263 else
264 WARN_ON(bh != oinfo->dqi_gqi_bh);
265 spin_unlock(&dq_data_lock);
266 return 0;
267}
268
269void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
270{
271 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
272 brelse(oinfo->dqi_gqi_bh);
273 spin_lock(&dq_data_lock);
274 if (!--oinfo->dqi_gqi_count)
275 oinfo->dqi_gqi_bh = NULL;
276 spin_unlock(&dq_data_lock);
277}
278
279/* Read information header from global quota file */
280int ocfs2_global_read_info(struct super_block *sb, int type)
281{
282 struct inode *gqinode = NULL;
283 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
284 GROUP_QUOTA_SYSTEM_INODE };
285 struct ocfs2_global_disk_dqinfo dinfo;
286 struct mem_dqinfo *info = sb_dqinfo(sb, type);
287 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
288 int status;
289
290 mlog_entry_void();
291
292 /* Read global header */
293 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
294 OCFS2_INVALID_SLOT);
295 if (!gqinode) {
296 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
297 type);
298 status = -EINVAL;
299 goto out_err;
300 }
301 oinfo->dqi_gi.dqi_sb = sb;
302 oinfo->dqi_gi.dqi_type = type;
303 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
304 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
305 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
306 oinfo->dqi_gqi_bh = NULL;
307 oinfo->dqi_gqi_count = 0;
308 oinfo->dqi_gqinode = gqinode;
309 status = ocfs2_lock_global_qf(oinfo, 0);
310 if (status < 0) {
311 mlog_errno(status);
312 goto out_err;
313 }
314 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
315 sizeof(struct ocfs2_global_disk_dqinfo),
316 OCFS2_GLOBAL_INFO_OFF);
317 ocfs2_unlock_global_qf(oinfo, 0);
318 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
319 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
320 status);
321 if (status >= 0)
322 status = -EIO;
323 mlog_errno(status);
324 goto out_err;
325 }
326 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
327 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
328 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
Mark Fasheh171bf932008-10-20 15:36:47 +0200329 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
Jan Kara9e33d692008-08-25 19:56:50 +0200330 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
331 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
332 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
333 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
334 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
335 OCFS2_QBLK_RESERVED_SPACE;
336 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
Mark Fasheh171bf932008-10-20 15:36:47 +0200337 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
338 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
339 oinfo->dqi_syncjiff);
340
Jan Kara9e33d692008-08-25 19:56:50 +0200341out_err:
342 mlog_exit(status);
343 return status;
344}
345
346/* Write information to global quota file. Expects exlusive lock on quota
347 * file inode and quota info */
348static int __ocfs2_global_write_info(struct super_block *sb, int type)
349{
350 struct mem_dqinfo *info = sb_dqinfo(sb, type);
351 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
352 struct ocfs2_global_disk_dqinfo dinfo;
353 ssize_t size;
354
355 spin_lock(&dq_data_lock);
356 info->dqi_flags &= ~DQF_INFO_DIRTY;
357 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
358 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
359 spin_unlock(&dq_data_lock);
360 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
361 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
362 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
363 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
364 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
365 sizeof(struct ocfs2_global_disk_dqinfo),
366 OCFS2_GLOBAL_INFO_OFF);
367 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
368 mlog(ML_ERROR, "Cannot write global quota info structure\n");
369 if (size >= 0)
370 size = -EIO;
371 return size;
372 }
373 return 0;
374}
375
376int ocfs2_global_write_info(struct super_block *sb, int type)
377{
378 int err;
379 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
380
381 err = ocfs2_qinfo_lock(info, 1);
382 if (err < 0)
383 return err;
384 err = __ocfs2_global_write_info(sb, type);
385 ocfs2_qinfo_unlock(info, 1);
386 return err;
387}
388
389/* Read in information from global quota file and acquire a reference to it.
390 * dquot_acquire() has already started the transaction and locked quota file */
391int ocfs2_global_read_dquot(struct dquot *dquot)
392{
393 int err, err2, ex = 0;
394 struct ocfs2_mem_dqinfo *info =
395 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
396
397 err = ocfs2_qinfo_lock(info, 0);
398 if (err < 0)
399 goto out;
400 err = qtree_read_dquot(&info->dqi_gi, dquot);
401 if (err < 0)
402 goto out_qlock;
403 OCFS2_DQUOT(dquot)->dq_use_count++;
404 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
405 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
406 if (!dquot->dq_off) { /* No real quota entry? */
407 /* Upgrade to exclusive lock for allocation */
408 err = ocfs2_qinfo_lock(info, 1);
409 if (err < 0)
410 goto out_qlock;
411 ex = 1;
412 }
413 err = qtree_write_dquot(&info->dqi_gi, dquot);
414 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
415 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
416 if (!err)
417 err = err2;
418 }
419out_qlock:
420 if (ex)
421 ocfs2_qinfo_unlock(info, 1);
422 ocfs2_qinfo_unlock(info, 0);
423out:
424 if (err < 0)
425 mlog_errno(err);
426 return err;
427}
428
429/* Sync local information about quota modifications with global quota file.
430 * Caller must have started the transaction and obtained exclusive lock for
431 * global quota file inode */
432int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
433{
434 int err, err2;
435 struct super_block *sb = dquot->dq_sb;
436 int type = dquot->dq_type;
437 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
438 struct ocfs2_global_disk_dqblk dqblk;
439 s64 spacechange, inodechange;
440 time_t olditime, oldbtime;
441
442 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
443 sizeof(struct ocfs2_global_disk_dqblk),
444 dquot->dq_off);
445 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
446 if (err >= 0) {
447 mlog(ML_ERROR, "Short read from global quota file "
448 "(%u read)\n", err);
449 err = -EIO;
450 }
451 goto out;
452 }
453
454 /* Update space and inode usage. Get also other information from
455 * global quota file so that we don't overwrite any changes there.
456 * We are */
457 spin_lock(&dq_data_lock);
458 spacechange = dquot->dq_dqb.dqb_curspace -
459 OCFS2_DQUOT(dquot)->dq_origspace;
460 inodechange = dquot->dq_dqb.dqb_curinodes -
461 OCFS2_DQUOT(dquot)->dq_originodes;
462 olditime = dquot->dq_dqb.dqb_itime;
463 oldbtime = dquot->dq_dqb.dqb_btime;
464 ocfs2_global_disk2memdqb(dquot, &dqblk);
465 mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n",
466 dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange,
467 dquot->dq_dqb.dqb_curinodes, inodechange);
468 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
469 dquot->dq_dqb.dqb_curspace += spacechange;
470 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
471 dquot->dq_dqb.dqb_curinodes += inodechange;
472 /* Set properly space grace time... */
473 if (dquot->dq_dqb.dqb_bsoftlimit &&
474 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
475 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
476 oldbtime > 0) {
477 if (dquot->dq_dqb.dqb_btime > 0)
478 dquot->dq_dqb.dqb_btime =
479 min(dquot->dq_dqb.dqb_btime, oldbtime);
480 else
481 dquot->dq_dqb.dqb_btime = oldbtime;
482 }
483 } else {
484 dquot->dq_dqb.dqb_btime = 0;
485 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
486 }
487 /* Set properly inode grace time... */
488 if (dquot->dq_dqb.dqb_isoftlimit &&
489 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
490 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
491 olditime > 0) {
492 if (dquot->dq_dqb.dqb_itime > 0)
493 dquot->dq_dqb.dqb_itime =
494 min(dquot->dq_dqb.dqb_itime, olditime);
495 else
496 dquot->dq_dqb.dqb_itime = olditime;
497 }
498 } else {
499 dquot->dq_dqb.dqb_itime = 0;
500 clear_bit(DQ_INODES_B, &dquot->dq_flags);
501 }
502 /* All information is properly updated, clear the flags */
503 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
504 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
505 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
506 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
507 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
508 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
509 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
510 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
511 spin_unlock(&dq_data_lock);
512 err = ocfs2_qinfo_lock(info, freeing);
513 if (err < 0) {
514 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
515 " (type=%d, id=%u)\n", dquot->dq_type,
516 (unsigned)dquot->dq_id);
517 goto out;
518 }
519 if (freeing)
520 OCFS2_DQUOT(dquot)->dq_use_count--;
521 err = qtree_write_dquot(&info->dqi_gi, dquot);
522 if (err < 0)
523 goto out_qlock;
524 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
525 err = qtree_release_dquot(&info->dqi_gi, dquot);
526 if (info_dirty(sb_dqinfo(sb, type))) {
527 err2 = __ocfs2_global_write_info(sb, type);
528 if (!err)
529 err = err2;
530 }
531 }
532out_qlock:
533 ocfs2_qinfo_unlock(info, freeing);
534out:
535 if (err < 0)
536 mlog_errno(err);
537 return err;
538}
539
540/*
Mark Fasheh171bf932008-10-20 15:36:47 +0200541 * Functions for periodic syncing of dquots with global file
542 */
543static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
544{
545 handle_t *handle;
546 struct super_block *sb = dquot->dq_sb;
547 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
548 struct ocfs2_super *osb = OCFS2_SB(sb);
549 int status = 0;
550
551 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
552 dquot->dq_type, type, sb->s_id);
553 if (type != dquot->dq_type)
554 goto out;
555 status = ocfs2_lock_global_qf(oinfo, 1);
556 if (status < 0)
557 goto out;
558
559 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
560 if (IS_ERR(handle)) {
561 status = PTR_ERR(handle);
562 mlog_errno(status);
563 goto out_ilock;
564 }
565 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
566 status = ocfs2_sync_dquot(dquot);
567 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
568 if (status < 0)
569 mlog_errno(status);
570 /* We have to write local structure as well... */
571 dquot_mark_dquot_dirty(dquot);
572 status = dquot_commit(dquot);
573 if (status < 0)
574 mlog_errno(status);
575 ocfs2_commit_trans(osb, handle);
576out_ilock:
577 ocfs2_unlock_global_qf(oinfo, 1);
578out:
579 mlog_exit(status);
580 return status;
581}
582
583static void qsync_work_fn(struct work_struct *work)
584{
585 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
586 struct ocfs2_mem_dqinfo,
587 dqi_sync_work.work);
588 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
589
590 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
591 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
592 oinfo->dqi_syncjiff);
593}
594
595/*
Jan Kara9e33d692008-08-25 19:56:50 +0200596 * Wrappers for generic quota functions
597 */
598
599static int ocfs2_write_dquot(struct dquot *dquot)
600{
601 handle_t *handle;
602 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
603 int status = 0;
604
605 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
606
607 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
608 if (IS_ERR(handle)) {
609 status = PTR_ERR(handle);
610 mlog_errno(status);
611 goto out;
612 }
613 status = dquot_commit(dquot);
614 ocfs2_commit_trans(osb, handle);
615out:
616 mlog_exit(status);
617 return status;
618}
619
620int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
621{
622 struct ocfs2_mem_dqinfo *oinfo;
623 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
624 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
625
626 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
627 return 0;
628
629 oinfo = sb_dqinfo(sb, type)->dqi_priv;
630 /* We modify tree, leaf block, global info, local chunk header,
631 * global and local inode */
632 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
633 2 * OCFS2_INODE_UPDATE_CREDITS;
634}
635
636static int ocfs2_release_dquot(struct dquot *dquot)
637{
638 handle_t *handle;
639 struct ocfs2_mem_dqinfo *oinfo =
640 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
641 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
642 int status = 0;
643
644 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
645
646 status = ocfs2_lock_global_qf(oinfo, 1);
647 if (status < 0)
648 goto out;
649 handle = ocfs2_start_trans(osb,
650 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
651 if (IS_ERR(handle)) {
652 status = PTR_ERR(handle);
653 mlog_errno(status);
654 goto out_ilock;
655 }
656 status = dquot_release(dquot);
657 ocfs2_commit_trans(osb, handle);
658out_ilock:
659 ocfs2_unlock_global_qf(oinfo, 1);
660out:
661 mlog_exit(status);
662 return status;
663}
664
665int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
666{
667 struct ocfs2_mem_dqinfo *oinfo;
668 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
669 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
670 struct ocfs2_dinode *lfe, *gfe;
671
672 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
673 return 0;
674
675 oinfo = sb_dqinfo(sb, type)->dqi_priv;
676 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
677 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
678 /* We can extend local file + global file. In local file we
679 * can modify info, chunk header block and dquot block. In
680 * global file we can modify info, tree and leaf block */
681 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
682 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
683 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
684}
685
686static int ocfs2_acquire_dquot(struct dquot *dquot)
687{
688 handle_t *handle;
689 struct ocfs2_mem_dqinfo *oinfo =
690 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
691 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
692 int status = 0;
693
694 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
695 /* We need an exclusive lock, because we're going to update use count
696 * and instantiate possibly new dquot structure */
697 status = ocfs2_lock_global_qf(oinfo, 1);
698 if (status < 0)
699 goto out;
700 handle = ocfs2_start_trans(osb,
701 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
702 if (IS_ERR(handle)) {
703 status = PTR_ERR(handle);
704 mlog_errno(status);
705 goto out_ilock;
706 }
707 status = dquot_acquire(dquot);
708 ocfs2_commit_trans(osb, handle);
709out_ilock:
710 ocfs2_unlock_global_qf(oinfo, 1);
711out:
712 mlog_exit(status);
713 return status;
714}
715
716static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
717{
718 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
719 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
720 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
721 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
722 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
723 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
724 int sync = 0;
725 int status;
726 struct super_block *sb = dquot->dq_sb;
727 int type = dquot->dq_type;
728 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
729 handle_t *handle;
730 struct ocfs2_super *osb = OCFS2_SB(sb);
731
732 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
733 dquot_mark_dquot_dirty(dquot);
734
735 /* In case user set some limits, sync dquot immediately to global
736 * quota file so that information propagates quicker */
737 spin_lock(&dq_data_lock);
738 if (dquot->dq_flags & mask)
739 sync = 1;
740 spin_unlock(&dq_data_lock);
741 if (!sync) {
742 status = ocfs2_write_dquot(dquot);
743 goto out;
744 }
745 status = ocfs2_lock_global_qf(oinfo, 1);
746 if (status < 0)
747 goto out;
748 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
749 if (IS_ERR(handle)) {
750 status = PTR_ERR(handle);
751 mlog_errno(status);
752 goto out_ilock;
753 }
754 status = ocfs2_sync_dquot(dquot);
755 if (status < 0) {
756 mlog_errno(status);
757 goto out_trans;
758 }
759 /* Now write updated local dquot structure */
760 status = dquot_commit(dquot);
761out_trans:
762 ocfs2_commit_trans(osb, handle);
763out_ilock:
764 ocfs2_unlock_global_qf(oinfo, 1);
765out:
766 mlog_exit(status);
767 return status;
768}
769
770/* This should happen only after set_dqinfo(). */
771static int ocfs2_write_info(struct super_block *sb, int type)
772{
773 handle_t *handle;
774 int status = 0;
775 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
776
777 mlog_entry_void();
778
779 status = ocfs2_lock_global_qf(oinfo, 1);
780 if (status < 0)
781 goto out;
782 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
783 if (IS_ERR(handle)) {
784 status = PTR_ERR(handle);
785 mlog_errno(status);
786 goto out_ilock;
787 }
788 status = dquot_commit_info(sb, type);
789 ocfs2_commit_trans(OCFS2_SB(sb), handle);
790out_ilock:
791 ocfs2_unlock_global_qf(oinfo, 1);
792out:
793 mlog_exit(status);
794 return status;
795}
796
797/* This is difficult. We have to lock quota inode and start transaction
798 * in this function but we don't want to take the penalty of exlusive
799 * quota file lock when we are just going to use cached structures. So
800 * we just take read lock check whether we have dquot cached and if so,
801 * we don't have to take the write lock... */
802static int ocfs2_dquot_initialize(struct inode *inode, int type)
803{
804 handle_t *handle = NULL;
805 int status = 0;
806 struct super_block *sb = inode->i_sb;
807 struct ocfs2_mem_dqinfo *oinfo;
808 int exclusive = 0;
809 int cnt;
810 qid_t id;
811
812 mlog_entry_void();
813
814 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
815 if (type != -1 && cnt != type)
816 continue;
817 if (!sb_has_quota_active(sb, cnt))
818 continue;
819 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
820 status = ocfs2_lock_global_qf(oinfo, 0);
821 if (status < 0)
822 goto out;
823 /* This is just a performance optimization not a reliable test.
824 * Since we hold an inode lock, noone can actually release
825 * the structure until we are finished with initialization. */
826 if (inode->i_dquot[cnt] != NODQUOT) {
827 ocfs2_unlock_global_qf(oinfo, 0);
828 continue;
829 }
830 /* When we have inode lock, we know that no dquot_release() can
831 * run and thus we can safely check whether we need to
832 * read+modify global file to get quota information or whether
833 * our node already has it. */
834 if (cnt == USRQUOTA)
835 id = inode->i_uid;
836 else if (cnt == GRPQUOTA)
837 id = inode->i_gid;
838 else
839 BUG();
840 /* Obtain exclusion from quota off... */
841 down_write(&sb_dqopt(sb)->dqptr_sem);
842 exclusive = !dquot_is_cached(sb, id, cnt);
843 up_write(&sb_dqopt(sb)->dqptr_sem);
844 if (exclusive) {
845 status = ocfs2_lock_global_qf(oinfo, 1);
846 if (status < 0) {
847 exclusive = 0;
848 mlog_errno(status);
849 goto out_ilock;
850 }
851 handle = ocfs2_start_trans(OCFS2_SB(sb),
852 ocfs2_calc_qinit_credits(sb, cnt));
853 if (IS_ERR(handle)) {
854 status = PTR_ERR(handle);
855 mlog_errno(status);
856 goto out_ilock;
857 }
858 }
859 dquot_initialize(inode, cnt);
860 if (exclusive) {
861 ocfs2_commit_trans(OCFS2_SB(sb), handle);
862 ocfs2_unlock_global_qf(oinfo, 1);
863 }
864 ocfs2_unlock_global_qf(oinfo, 0);
865 }
866 mlog_exit(0);
867 return 0;
868out_ilock:
869 if (exclusive)
870 ocfs2_unlock_global_qf(oinfo, 1);
871 ocfs2_unlock_global_qf(oinfo, 0);
872out:
873 mlog_exit(status);
874 return status;
875}
876
877static int ocfs2_dquot_drop_slow(struct inode *inode)
878{
Jan Kara57a09a72008-11-25 15:31:26 +0100879 int status = 0;
Jan Kara9e33d692008-08-25 19:56:50 +0200880 int cnt;
881 int got_lock[MAXQUOTAS] = {0, 0};
882 handle_t *handle;
883 struct super_block *sb = inode->i_sb;
884 struct ocfs2_mem_dqinfo *oinfo;
885
886 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
887 if (!sb_has_quota_active(sb, cnt))
888 continue;
889 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
890 status = ocfs2_lock_global_qf(oinfo, 1);
891 if (status < 0)
892 goto out;
893 got_lock[cnt] = 1;
894 }
895 handle = ocfs2_start_trans(OCFS2_SB(sb),
896 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
897 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
898 if (IS_ERR(handle)) {
899 status = PTR_ERR(handle);
900 mlog_errno(status);
901 goto out;
902 }
903 dquot_drop(inode);
904 ocfs2_commit_trans(OCFS2_SB(sb), handle);
905out:
906 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
907 if (got_lock[cnt]) {
908 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
909 ocfs2_unlock_global_qf(oinfo, 1);
910 }
911 return status;
912}
913
914/* See the comment before ocfs2_dquot_initialize. */
915static int ocfs2_dquot_drop(struct inode *inode)
916{
917 int status = 0;
918 struct super_block *sb = inode->i_sb;
919 struct ocfs2_mem_dqinfo *oinfo;
920 int exclusive = 0;
921 int cnt;
922 int got_lock[MAXQUOTAS] = {0, 0};
923
924 mlog_entry_void();
925 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
926 if (!sb_has_quota_active(sb, cnt))
927 continue;
928 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
929 status = ocfs2_lock_global_qf(oinfo, 0);
930 if (status < 0)
931 goto out;
932 got_lock[cnt] = 1;
933 }
934 /* Lock against anyone releasing references so that when when we check
935 * we know we are not going to be last ones to release dquot */
936 down_write(&sb_dqopt(sb)->dqptr_sem);
937 /* Urgh, this is a terrible hack :( */
938 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
939 if (inode->i_dquot[cnt] != NODQUOT &&
940 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
941 exclusive = 1;
942 break;
943 }
944 }
945 if (!exclusive)
946 dquot_drop_locked(inode);
947 up_write(&sb_dqopt(sb)->dqptr_sem);
948out:
949 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
950 if (got_lock[cnt]) {
951 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
952 ocfs2_unlock_global_qf(oinfo, 0);
953 }
954 /* In case we bailed out because we had to do expensive locking
955 * do it now... */
956 if (exclusive)
957 status = ocfs2_dquot_drop_slow(inode);
958 mlog_exit(status);
959 return status;
960}
961
962static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
963{
964 struct ocfs2_dquot *dquot =
965 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
966
967 if (!dquot)
968 return NULL;
969 return &dquot->dq_dquot;
970}
971
972static void ocfs2_destroy_dquot(struct dquot *dquot)
973{
974 kmem_cache_free(ocfs2_dquot_cachep, dquot);
975}
976
977struct dquot_operations ocfs2_quota_operations = {
978 .initialize = ocfs2_dquot_initialize,
979 .drop = ocfs2_dquot_drop,
980 .alloc_space = dquot_alloc_space,
981 .alloc_inode = dquot_alloc_inode,
982 .free_space = dquot_free_space,
983 .free_inode = dquot_free_inode,
984 .transfer = dquot_transfer,
985 .write_dquot = ocfs2_write_dquot,
986 .acquire_dquot = ocfs2_acquire_dquot,
987 .release_dquot = ocfs2_release_dquot,
988 .mark_dirty = ocfs2_mark_dquot_dirty,
989 .write_info = ocfs2_write_info,
990 .alloc_dquot = ocfs2_alloc_dquot,
991 .destroy_dquot = ocfs2_destroy_dquot,
992};
Mark Fasheh171bf932008-10-20 15:36:47 +0200993
994int ocfs2_quota_setup(void)
995{
996 ocfs2_quota_wq = create_workqueue("o2quot");
997 if (!ocfs2_quota_wq)
998 return -ENOMEM;
999 return 0;
1000}
1001
1002void ocfs2_quota_shutdown(void)
1003{
1004 if (ocfs2_quota_wq) {
1005 flush_workqueue(ocfs2_quota_wq);
1006 destroy_workqueue(ocfs2_quota_wq);
1007 ocfs2_quota_wq = NULL;
1008 }
1009}