blob: adf53508bdb80ca2668ed8aafdf9f1c5ac1baced [file] [log] [blame]
Jan Kara9e33d692008-08-25 19:56:50 +02001/*
2 * Implementation of operations over global quota file
3 */
Mark Fasheh171bf932008-10-20 15:36:47 +02004#include <linux/spinlock.h>
Jan Kara9e33d692008-08-25 19:56:50 +02005#include <linux/fs.h>
6#include <linux/quota.h>
7#include <linux/quotaops.h>
8#include <linux/dqblk_qtree.h>
Mark Fasheh171bf932008-10-20 15:36:47 +02009#include <linux/jiffies.h>
10#include <linux/writeback.h>
11#include <linux/workqueue.h>
Jan Kara9e33d692008-08-25 19:56:50 +020012
13#define MLOG_MASK_PREFIX ML_QUOTA
14#include <cluster/masklog.h>
15
16#include "ocfs2_fs.h"
17#include "ocfs2.h"
18#include "alloc.h"
19#include "inode.h"
20#include "journal.h"
21#include "file.h"
22#include "sysfile.h"
23#include "dlmglue.h"
24#include "uptodate.h"
25#include "quota.h"
26
Mark Fasheh171bf932008-10-20 15:36:47 +020027static struct workqueue_struct *ocfs2_quota_wq = NULL;
28
29static void qsync_work_fn(struct work_struct *work);
30
Jan Kara9e33d692008-08-25 19:56:50 +020031static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
32{
33 struct ocfs2_global_disk_dqblk *d = dp;
34 struct mem_dqblk *m = &dquot->dq_dqb;
35
36 /* Update from disk only entries not set by the admin */
37 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
38 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
39 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
40 }
41 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
42 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
43 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
44 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
45 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
46 }
47 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
48 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
49 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
50 m->dqb_btime = le64_to_cpu(d->dqb_btime);
51 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
52 m->dqb_itime = le64_to_cpu(d->dqb_itime);
53 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
54}
55
56static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
57{
58 struct ocfs2_global_disk_dqblk *d = dp;
59 struct mem_dqblk *m = &dquot->dq_dqb;
60
61 d->dqb_id = cpu_to_le32(dquot->dq_id);
62 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
63 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
64 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
65 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
66 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
67 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
68 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
69 d->dqb_btime = cpu_to_le64(m->dqb_btime);
70 d->dqb_itime = cpu_to_le64(m->dqb_itime);
71}
72
73static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
74{
75 struct ocfs2_global_disk_dqblk *d = dp;
76 struct ocfs2_mem_dqinfo *oinfo =
77 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
78
79 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
80 return 0;
81 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
82}
83
84struct qtree_fmt_operations ocfs2_global_ops = {
85 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
86 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
87 .is_id = ocfs2_global_is_id,
88};
89
90
91struct buffer_head *ocfs2_read_quota_block(struct inode *inode,
92 int block, int *err)
93{
94 struct buffer_head *tmp = NULL;
95
96 *err = ocfs2_read_virt_blocks(inode, block, 1, &tmp, 0, NULL);
97 if (*err)
98 mlog_errno(*err);
99
100 return tmp;
101}
102
103static struct buffer_head *ocfs2_get_quota_block(struct inode *inode,
104 int block, int *err)
105{
106 u64 pblock, pcount;
107 struct buffer_head *bh;
108
109 down_read(&OCFS2_I(inode)->ip_alloc_sem);
110 *err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount,
111 NULL);
112 up_read(&OCFS2_I(inode)->ip_alloc_sem);
113 if (*err) {
114 mlog_errno(*err);
115 return NULL;
116 }
117 bh = sb_getblk(inode->i_sb, pblock);
118 if (!bh) {
119 *err = -EIO;
120 mlog_errno(*err);
121 }
122 return bh;
123}
124
125/* Read data from global quotafile - avoid pagecache and such because we cannot
126 * afford acquiring the locks... We use quota cluster lock to serialize
127 * operations. Caller is responsible for acquiring it. */
128ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
129 size_t len, loff_t off)
130{
131 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
132 struct inode *gqinode = oinfo->dqi_gqinode;
133 loff_t i_size = i_size_read(gqinode);
134 int offset = off & (sb->s_blocksize - 1);
135 sector_t blk = off >> sb->s_blocksize_bits;
136 int err = 0;
137 struct buffer_head *bh;
138 size_t toread, tocopy;
139
140 if (off > i_size)
141 return 0;
142 if (off + len > i_size)
143 len = i_size - off;
144 toread = len;
145 while (toread > 0) {
146 tocopy = min((size_t)(sb->s_blocksize - offset), toread);
147 bh = ocfs2_read_quota_block(gqinode, blk, &err);
148 if (!bh) {
149 mlog_errno(err);
150 return err;
151 }
152 memcpy(data, bh->b_data + offset, tocopy);
153 brelse(bh);
154 offset = 0;
155 toread -= tocopy;
156 data += tocopy;
157 blk++;
158 }
159 return len;
160}
161
162/* Write to quotafile (we know the transaction is already started and has
163 * enough credits) */
164ssize_t ocfs2_quota_write(struct super_block *sb, int type,
165 const char *data, size_t len, loff_t off)
166{
167 struct mem_dqinfo *info = sb_dqinfo(sb, type);
168 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
169 struct inode *gqinode = oinfo->dqi_gqinode;
170 int offset = off & (sb->s_blocksize - 1);
171 sector_t blk = off >> sb->s_blocksize_bits;
172 int err = 0, new = 0;
173 struct buffer_head *bh;
174 handle_t *handle = journal_current_handle();
175
176 if (!handle) {
177 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
178 "because transaction was not started.\n",
179 (unsigned long long)off, (unsigned long long)len);
180 return -EIO;
181 }
182 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
183 WARN_ON(1);
184 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
185 }
186
187 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
188 if (gqinode->i_size < off + len) {
189 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
190 err = ocfs2_extend_no_holes(gqinode, off + len, off);
191 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
192 if (err < 0)
193 goto out;
194 err = ocfs2_simple_size_update(gqinode,
195 oinfo->dqi_gqi_bh,
196 off + len);
197 if (err < 0)
198 goto out;
199 new = 1;
200 }
201 /* Not rewriting whole block? */
202 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
203 !new) {
204 bh = ocfs2_read_quota_block(gqinode, blk, &err);
205 if (!bh) {
206 mlog_errno(err);
207 return err;
208 }
209 err = ocfs2_journal_access(handle, gqinode, bh,
210 OCFS2_JOURNAL_ACCESS_WRITE);
211 } else {
212 bh = ocfs2_get_quota_block(gqinode, blk, &err);
213 if (!bh) {
214 mlog_errno(err);
215 return err;
216 }
217 err = ocfs2_journal_access(handle, gqinode, bh,
218 OCFS2_JOURNAL_ACCESS_CREATE);
219 }
220 if (err < 0) {
221 brelse(bh);
222 goto out;
223 }
224 lock_buffer(bh);
225 if (new)
226 memset(bh->b_data, 0, sb->s_blocksize);
227 memcpy(bh->b_data + offset, data, len);
228 flush_dcache_page(bh->b_page);
229 unlock_buffer(bh);
230 ocfs2_set_buffer_uptodate(gqinode, bh);
231 err = ocfs2_journal_dirty(handle, bh);
232 brelse(bh);
233 if (err < 0)
234 goto out;
235out:
236 if (err) {
237 mutex_unlock(&gqinode->i_mutex);
238 mlog_errno(err);
239 return err;
240 }
241 gqinode->i_version++;
242 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
243 mutex_unlock(&gqinode->i_mutex);
244 return len;
245}
246
247int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
248{
249 int status;
250 struct buffer_head *bh = NULL;
251
252 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
253 if (status < 0)
254 return status;
255 spin_lock(&dq_data_lock);
256 if (!oinfo->dqi_gqi_count++)
257 oinfo->dqi_gqi_bh = bh;
258 else
259 WARN_ON(bh != oinfo->dqi_gqi_bh);
260 spin_unlock(&dq_data_lock);
261 return 0;
262}
263
264void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
265{
266 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
267 brelse(oinfo->dqi_gqi_bh);
268 spin_lock(&dq_data_lock);
269 if (!--oinfo->dqi_gqi_count)
270 oinfo->dqi_gqi_bh = NULL;
271 spin_unlock(&dq_data_lock);
272}
273
274/* Read information header from global quota file */
275int ocfs2_global_read_info(struct super_block *sb, int type)
276{
277 struct inode *gqinode = NULL;
278 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
279 GROUP_QUOTA_SYSTEM_INODE };
280 struct ocfs2_global_disk_dqinfo dinfo;
281 struct mem_dqinfo *info = sb_dqinfo(sb, type);
282 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
283 int status;
284
285 mlog_entry_void();
286
287 /* Read global header */
288 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
289 OCFS2_INVALID_SLOT);
290 if (!gqinode) {
291 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
292 type);
293 status = -EINVAL;
294 goto out_err;
295 }
296 oinfo->dqi_gi.dqi_sb = sb;
297 oinfo->dqi_gi.dqi_type = type;
298 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
299 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
300 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
301 oinfo->dqi_gqi_bh = NULL;
302 oinfo->dqi_gqi_count = 0;
303 oinfo->dqi_gqinode = gqinode;
304 status = ocfs2_lock_global_qf(oinfo, 0);
305 if (status < 0) {
306 mlog_errno(status);
307 goto out_err;
308 }
309 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
310 sizeof(struct ocfs2_global_disk_dqinfo),
311 OCFS2_GLOBAL_INFO_OFF);
312 ocfs2_unlock_global_qf(oinfo, 0);
313 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
314 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
315 status);
316 if (status >= 0)
317 status = -EIO;
318 mlog_errno(status);
319 goto out_err;
320 }
321 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
322 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
323 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
Mark Fasheh171bf932008-10-20 15:36:47 +0200324 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
Jan Kara9e33d692008-08-25 19:56:50 +0200325 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
326 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
327 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
328 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
329 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
330 OCFS2_QBLK_RESERVED_SPACE;
331 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
Mark Fasheh171bf932008-10-20 15:36:47 +0200332 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
333 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
334 oinfo->dqi_syncjiff);
335
Jan Kara9e33d692008-08-25 19:56:50 +0200336out_err:
337 mlog_exit(status);
338 return status;
339}
340
341/* Write information to global quota file. Expects exlusive lock on quota
342 * file inode and quota info */
343static int __ocfs2_global_write_info(struct super_block *sb, int type)
344{
345 struct mem_dqinfo *info = sb_dqinfo(sb, type);
346 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
347 struct ocfs2_global_disk_dqinfo dinfo;
348 ssize_t size;
349
350 spin_lock(&dq_data_lock);
351 info->dqi_flags &= ~DQF_INFO_DIRTY;
352 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
353 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
354 spin_unlock(&dq_data_lock);
355 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
356 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
357 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
358 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
359 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
360 sizeof(struct ocfs2_global_disk_dqinfo),
361 OCFS2_GLOBAL_INFO_OFF);
362 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
363 mlog(ML_ERROR, "Cannot write global quota info structure\n");
364 if (size >= 0)
365 size = -EIO;
366 return size;
367 }
368 return 0;
369}
370
371int ocfs2_global_write_info(struct super_block *sb, int type)
372{
373 int err;
374 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
375
376 err = ocfs2_qinfo_lock(info, 1);
377 if (err < 0)
378 return err;
379 err = __ocfs2_global_write_info(sb, type);
380 ocfs2_qinfo_unlock(info, 1);
381 return err;
382}
383
384/* Read in information from global quota file and acquire a reference to it.
385 * dquot_acquire() has already started the transaction and locked quota file */
386int ocfs2_global_read_dquot(struct dquot *dquot)
387{
388 int err, err2, ex = 0;
389 struct ocfs2_mem_dqinfo *info =
390 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
391
392 err = ocfs2_qinfo_lock(info, 0);
393 if (err < 0)
394 goto out;
395 err = qtree_read_dquot(&info->dqi_gi, dquot);
396 if (err < 0)
397 goto out_qlock;
398 OCFS2_DQUOT(dquot)->dq_use_count++;
399 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
400 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
401 if (!dquot->dq_off) { /* No real quota entry? */
402 /* Upgrade to exclusive lock for allocation */
403 err = ocfs2_qinfo_lock(info, 1);
404 if (err < 0)
405 goto out_qlock;
406 ex = 1;
407 }
408 err = qtree_write_dquot(&info->dqi_gi, dquot);
409 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
410 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
411 if (!err)
412 err = err2;
413 }
414out_qlock:
415 if (ex)
416 ocfs2_qinfo_unlock(info, 1);
417 ocfs2_qinfo_unlock(info, 0);
418out:
419 if (err < 0)
420 mlog_errno(err);
421 return err;
422}
423
424/* Sync local information about quota modifications with global quota file.
425 * Caller must have started the transaction and obtained exclusive lock for
426 * global quota file inode */
427int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
428{
429 int err, err2;
430 struct super_block *sb = dquot->dq_sb;
431 int type = dquot->dq_type;
432 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
433 struct ocfs2_global_disk_dqblk dqblk;
434 s64 spacechange, inodechange;
435 time_t olditime, oldbtime;
436
437 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
438 sizeof(struct ocfs2_global_disk_dqblk),
439 dquot->dq_off);
440 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
441 if (err >= 0) {
442 mlog(ML_ERROR, "Short read from global quota file "
443 "(%u read)\n", err);
444 err = -EIO;
445 }
446 goto out;
447 }
448
449 /* Update space and inode usage. Get also other information from
450 * global quota file so that we don't overwrite any changes there.
451 * We are */
452 spin_lock(&dq_data_lock);
453 spacechange = dquot->dq_dqb.dqb_curspace -
454 OCFS2_DQUOT(dquot)->dq_origspace;
455 inodechange = dquot->dq_dqb.dqb_curinodes -
456 OCFS2_DQUOT(dquot)->dq_originodes;
457 olditime = dquot->dq_dqb.dqb_itime;
458 oldbtime = dquot->dq_dqb.dqb_btime;
459 ocfs2_global_disk2memdqb(dquot, &dqblk);
460 mlog(0, "Syncing global dquot %d space %lld+%lld, inodes %lld+%lld\n",
461 dquot->dq_id, dquot->dq_dqb.dqb_curspace, spacechange,
462 dquot->dq_dqb.dqb_curinodes, inodechange);
463 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
464 dquot->dq_dqb.dqb_curspace += spacechange;
465 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
466 dquot->dq_dqb.dqb_curinodes += inodechange;
467 /* Set properly space grace time... */
468 if (dquot->dq_dqb.dqb_bsoftlimit &&
469 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
470 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
471 oldbtime > 0) {
472 if (dquot->dq_dqb.dqb_btime > 0)
473 dquot->dq_dqb.dqb_btime =
474 min(dquot->dq_dqb.dqb_btime, oldbtime);
475 else
476 dquot->dq_dqb.dqb_btime = oldbtime;
477 }
478 } else {
479 dquot->dq_dqb.dqb_btime = 0;
480 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
481 }
482 /* Set properly inode grace time... */
483 if (dquot->dq_dqb.dqb_isoftlimit &&
484 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
485 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
486 olditime > 0) {
487 if (dquot->dq_dqb.dqb_itime > 0)
488 dquot->dq_dqb.dqb_itime =
489 min(dquot->dq_dqb.dqb_itime, olditime);
490 else
491 dquot->dq_dqb.dqb_itime = olditime;
492 }
493 } else {
494 dquot->dq_dqb.dqb_itime = 0;
495 clear_bit(DQ_INODES_B, &dquot->dq_flags);
496 }
497 /* All information is properly updated, clear the flags */
498 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
499 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
500 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
501 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
502 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
503 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
504 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
505 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
506 spin_unlock(&dq_data_lock);
507 err = ocfs2_qinfo_lock(info, freeing);
508 if (err < 0) {
509 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
510 " (type=%d, id=%u)\n", dquot->dq_type,
511 (unsigned)dquot->dq_id);
512 goto out;
513 }
514 if (freeing)
515 OCFS2_DQUOT(dquot)->dq_use_count--;
516 err = qtree_write_dquot(&info->dqi_gi, dquot);
517 if (err < 0)
518 goto out_qlock;
519 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
520 err = qtree_release_dquot(&info->dqi_gi, dquot);
521 if (info_dirty(sb_dqinfo(sb, type))) {
522 err2 = __ocfs2_global_write_info(sb, type);
523 if (!err)
524 err = err2;
525 }
526 }
527out_qlock:
528 ocfs2_qinfo_unlock(info, freeing);
529out:
530 if (err < 0)
531 mlog_errno(err);
532 return err;
533}
534
535/*
Mark Fasheh171bf932008-10-20 15:36:47 +0200536 * Functions for periodic syncing of dquots with global file
537 */
538static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
539{
540 handle_t *handle;
541 struct super_block *sb = dquot->dq_sb;
542 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
543 struct ocfs2_super *osb = OCFS2_SB(sb);
544 int status = 0;
545
546 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
547 dquot->dq_type, type, sb->s_id);
548 if (type != dquot->dq_type)
549 goto out;
550 status = ocfs2_lock_global_qf(oinfo, 1);
551 if (status < 0)
552 goto out;
553
554 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
555 if (IS_ERR(handle)) {
556 status = PTR_ERR(handle);
557 mlog_errno(status);
558 goto out_ilock;
559 }
560 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
561 status = ocfs2_sync_dquot(dquot);
562 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
563 if (status < 0)
564 mlog_errno(status);
565 /* We have to write local structure as well... */
566 dquot_mark_dquot_dirty(dquot);
567 status = dquot_commit(dquot);
568 if (status < 0)
569 mlog_errno(status);
570 ocfs2_commit_trans(osb, handle);
571out_ilock:
572 ocfs2_unlock_global_qf(oinfo, 1);
573out:
574 mlog_exit(status);
575 return status;
576}
577
578static void qsync_work_fn(struct work_struct *work)
579{
580 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
581 struct ocfs2_mem_dqinfo,
582 dqi_sync_work.work);
583 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
584
585 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
586 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
587 oinfo->dqi_syncjiff);
588}
589
590/*
Jan Kara9e33d692008-08-25 19:56:50 +0200591 * Wrappers for generic quota functions
592 */
593
594static int ocfs2_write_dquot(struct dquot *dquot)
595{
596 handle_t *handle;
597 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
598 int status = 0;
599
600 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
601
602 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
603 if (IS_ERR(handle)) {
604 status = PTR_ERR(handle);
605 mlog_errno(status);
606 goto out;
607 }
608 status = dquot_commit(dquot);
609 ocfs2_commit_trans(osb, handle);
610out:
611 mlog_exit(status);
612 return status;
613}
614
615int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
616{
617 struct ocfs2_mem_dqinfo *oinfo;
618 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
619 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
620
621 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
622 return 0;
623
624 oinfo = sb_dqinfo(sb, type)->dqi_priv;
625 /* We modify tree, leaf block, global info, local chunk header,
626 * global and local inode */
627 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
628 2 * OCFS2_INODE_UPDATE_CREDITS;
629}
630
631static int ocfs2_release_dquot(struct dquot *dquot)
632{
633 handle_t *handle;
634 struct ocfs2_mem_dqinfo *oinfo =
635 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
636 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
637 int status = 0;
638
639 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
640
641 status = ocfs2_lock_global_qf(oinfo, 1);
642 if (status < 0)
643 goto out;
644 handle = ocfs2_start_trans(osb,
645 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
646 if (IS_ERR(handle)) {
647 status = PTR_ERR(handle);
648 mlog_errno(status);
649 goto out_ilock;
650 }
651 status = dquot_release(dquot);
652 ocfs2_commit_trans(osb, handle);
653out_ilock:
654 ocfs2_unlock_global_qf(oinfo, 1);
655out:
656 mlog_exit(status);
657 return status;
658}
659
660int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
661{
662 struct ocfs2_mem_dqinfo *oinfo;
663 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
664 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
665 struct ocfs2_dinode *lfe, *gfe;
666
667 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
668 return 0;
669
670 oinfo = sb_dqinfo(sb, type)->dqi_priv;
671 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
672 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
673 /* We can extend local file + global file. In local file we
674 * can modify info, chunk header block and dquot block. In
675 * global file we can modify info, tree and leaf block */
676 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
677 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
678 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
679}
680
681static int ocfs2_acquire_dquot(struct dquot *dquot)
682{
683 handle_t *handle;
684 struct ocfs2_mem_dqinfo *oinfo =
685 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
686 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
687 int status = 0;
688
689 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
690 /* We need an exclusive lock, because we're going to update use count
691 * and instantiate possibly new dquot structure */
692 status = ocfs2_lock_global_qf(oinfo, 1);
693 if (status < 0)
694 goto out;
695 handle = ocfs2_start_trans(osb,
696 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
697 if (IS_ERR(handle)) {
698 status = PTR_ERR(handle);
699 mlog_errno(status);
700 goto out_ilock;
701 }
702 status = dquot_acquire(dquot);
703 ocfs2_commit_trans(osb, handle);
704out_ilock:
705 ocfs2_unlock_global_qf(oinfo, 1);
706out:
707 mlog_exit(status);
708 return status;
709}
710
711static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
712{
713 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
714 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
715 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
716 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
717 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
718 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
719 int sync = 0;
720 int status;
721 struct super_block *sb = dquot->dq_sb;
722 int type = dquot->dq_type;
723 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
724 handle_t *handle;
725 struct ocfs2_super *osb = OCFS2_SB(sb);
726
727 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
728 dquot_mark_dquot_dirty(dquot);
729
730 /* In case user set some limits, sync dquot immediately to global
731 * quota file so that information propagates quicker */
732 spin_lock(&dq_data_lock);
733 if (dquot->dq_flags & mask)
734 sync = 1;
735 spin_unlock(&dq_data_lock);
736 if (!sync) {
737 status = ocfs2_write_dquot(dquot);
738 goto out;
739 }
740 status = ocfs2_lock_global_qf(oinfo, 1);
741 if (status < 0)
742 goto out;
743 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
744 if (IS_ERR(handle)) {
745 status = PTR_ERR(handle);
746 mlog_errno(status);
747 goto out_ilock;
748 }
749 status = ocfs2_sync_dquot(dquot);
750 if (status < 0) {
751 mlog_errno(status);
752 goto out_trans;
753 }
754 /* Now write updated local dquot structure */
755 status = dquot_commit(dquot);
756out_trans:
757 ocfs2_commit_trans(osb, handle);
758out_ilock:
759 ocfs2_unlock_global_qf(oinfo, 1);
760out:
761 mlog_exit(status);
762 return status;
763}
764
765/* This should happen only after set_dqinfo(). */
766static int ocfs2_write_info(struct super_block *sb, int type)
767{
768 handle_t *handle;
769 int status = 0;
770 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
771
772 mlog_entry_void();
773
774 status = ocfs2_lock_global_qf(oinfo, 1);
775 if (status < 0)
776 goto out;
777 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
778 if (IS_ERR(handle)) {
779 status = PTR_ERR(handle);
780 mlog_errno(status);
781 goto out_ilock;
782 }
783 status = dquot_commit_info(sb, type);
784 ocfs2_commit_trans(OCFS2_SB(sb), handle);
785out_ilock:
786 ocfs2_unlock_global_qf(oinfo, 1);
787out:
788 mlog_exit(status);
789 return status;
790}
791
792/* This is difficult. We have to lock quota inode and start transaction
793 * in this function but we don't want to take the penalty of exlusive
794 * quota file lock when we are just going to use cached structures. So
795 * we just take read lock check whether we have dquot cached and if so,
796 * we don't have to take the write lock... */
797static int ocfs2_dquot_initialize(struct inode *inode, int type)
798{
799 handle_t *handle = NULL;
800 int status = 0;
801 struct super_block *sb = inode->i_sb;
802 struct ocfs2_mem_dqinfo *oinfo;
803 int exclusive = 0;
804 int cnt;
805 qid_t id;
806
807 mlog_entry_void();
808
809 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
810 if (type != -1 && cnt != type)
811 continue;
812 if (!sb_has_quota_active(sb, cnt))
813 continue;
814 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
815 status = ocfs2_lock_global_qf(oinfo, 0);
816 if (status < 0)
817 goto out;
818 /* This is just a performance optimization not a reliable test.
819 * Since we hold an inode lock, noone can actually release
820 * the structure until we are finished with initialization. */
821 if (inode->i_dquot[cnt] != NODQUOT) {
822 ocfs2_unlock_global_qf(oinfo, 0);
823 continue;
824 }
825 /* When we have inode lock, we know that no dquot_release() can
826 * run and thus we can safely check whether we need to
827 * read+modify global file to get quota information or whether
828 * our node already has it. */
829 if (cnt == USRQUOTA)
830 id = inode->i_uid;
831 else if (cnt == GRPQUOTA)
832 id = inode->i_gid;
833 else
834 BUG();
835 /* Obtain exclusion from quota off... */
836 down_write(&sb_dqopt(sb)->dqptr_sem);
837 exclusive = !dquot_is_cached(sb, id, cnt);
838 up_write(&sb_dqopt(sb)->dqptr_sem);
839 if (exclusive) {
840 status = ocfs2_lock_global_qf(oinfo, 1);
841 if (status < 0) {
842 exclusive = 0;
843 mlog_errno(status);
844 goto out_ilock;
845 }
846 handle = ocfs2_start_trans(OCFS2_SB(sb),
847 ocfs2_calc_qinit_credits(sb, cnt));
848 if (IS_ERR(handle)) {
849 status = PTR_ERR(handle);
850 mlog_errno(status);
851 goto out_ilock;
852 }
853 }
854 dquot_initialize(inode, cnt);
855 if (exclusive) {
856 ocfs2_commit_trans(OCFS2_SB(sb), handle);
857 ocfs2_unlock_global_qf(oinfo, 1);
858 }
859 ocfs2_unlock_global_qf(oinfo, 0);
860 }
861 mlog_exit(0);
862 return 0;
863out_ilock:
864 if (exclusive)
865 ocfs2_unlock_global_qf(oinfo, 1);
866 ocfs2_unlock_global_qf(oinfo, 0);
867out:
868 mlog_exit(status);
869 return status;
870}
871
872static int ocfs2_dquot_drop_slow(struct inode *inode)
873{
874 int status;
875 int cnt;
876 int got_lock[MAXQUOTAS] = {0, 0};
877 handle_t *handle;
878 struct super_block *sb = inode->i_sb;
879 struct ocfs2_mem_dqinfo *oinfo;
880
881 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
882 if (!sb_has_quota_active(sb, cnt))
883 continue;
884 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
885 status = ocfs2_lock_global_qf(oinfo, 1);
886 if (status < 0)
887 goto out;
888 got_lock[cnt] = 1;
889 }
890 handle = ocfs2_start_trans(OCFS2_SB(sb),
891 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
892 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
893 if (IS_ERR(handle)) {
894 status = PTR_ERR(handle);
895 mlog_errno(status);
896 goto out;
897 }
898 dquot_drop(inode);
899 ocfs2_commit_trans(OCFS2_SB(sb), handle);
900out:
901 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
902 if (got_lock[cnt]) {
903 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
904 ocfs2_unlock_global_qf(oinfo, 1);
905 }
906 return status;
907}
908
909/* See the comment before ocfs2_dquot_initialize. */
910static int ocfs2_dquot_drop(struct inode *inode)
911{
912 int status = 0;
913 struct super_block *sb = inode->i_sb;
914 struct ocfs2_mem_dqinfo *oinfo;
915 int exclusive = 0;
916 int cnt;
917 int got_lock[MAXQUOTAS] = {0, 0};
918
919 mlog_entry_void();
920 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
921 if (!sb_has_quota_active(sb, cnt))
922 continue;
923 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
924 status = ocfs2_lock_global_qf(oinfo, 0);
925 if (status < 0)
926 goto out;
927 got_lock[cnt] = 1;
928 }
929 /* Lock against anyone releasing references so that when when we check
930 * we know we are not going to be last ones to release dquot */
931 down_write(&sb_dqopt(sb)->dqptr_sem);
932 /* Urgh, this is a terrible hack :( */
933 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
934 if (inode->i_dquot[cnt] != NODQUOT &&
935 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
936 exclusive = 1;
937 break;
938 }
939 }
940 if (!exclusive)
941 dquot_drop_locked(inode);
942 up_write(&sb_dqopt(sb)->dqptr_sem);
943out:
944 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
945 if (got_lock[cnt]) {
946 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
947 ocfs2_unlock_global_qf(oinfo, 0);
948 }
949 /* In case we bailed out because we had to do expensive locking
950 * do it now... */
951 if (exclusive)
952 status = ocfs2_dquot_drop_slow(inode);
953 mlog_exit(status);
954 return status;
955}
956
957static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
958{
959 struct ocfs2_dquot *dquot =
960 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
961
962 if (!dquot)
963 return NULL;
964 return &dquot->dq_dquot;
965}
966
967static void ocfs2_destroy_dquot(struct dquot *dquot)
968{
969 kmem_cache_free(ocfs2_dquot_cachep, dquot);
970}
971
972struct dquot_operations ocfs2_quota_operations = {
973 .initialize = ocfs2_dquot_initialize,
974 .drop = ocfs2_dquot_drop,
975 .alloc_space = dquot_alloc_space,
976 .alloc_inode = dquot_alloc_inode,
977 .free_space = dquot_free_space,
978 .free_inode = dquot_free_inode,
979 .transfer = dquot_transfer,
980 .write_dquot = ocfs2_write_dquot,
981 .acquire_dquot = ocfs2_acquire_dquot,
982 .release_dquot = ocfs2_release_dquot,
983 .mark_dirty = ocfs2_mark_dquot_dirty,
984 .write_info = ocfs2_write_info,
985 .alloc_dquot = ocfs2_alloc_dquot,
986 .destroy_dquot = ocfs2_destroy_dquot,
987};
Mark Fasheh171bf932008-10-20 15:36:47 +0200988
989int ocfs2_quota_setup(void)
990{
991 ocfs2_quota_wq = create_workqueue("o2quot");
992 if (!ocfs2_quota_wq)
993 return -ENOMEM;
994 return 0;
995}
996
997void ocfs2_quota_shutdown(void)
998{
999 if (ocfs2_quota_wq) {
1000 flush_workqueue(ocfs2_quota_wq);
1001 destroy_workqueue(ocfs2_quota_wq);
1002 ocfs2_quota_wq = NULL;
1003 }
1004}