2 * Implementation of operations over global quota file
4 #include <linux/spinlock.h>
6 #include <linux/quota.h>
7 #include <linux/quotaops.h>
8 #include <linux/dqblk_qtree.h>
9 #include <linux/jiffies.h>
10 #include <linux/writeback.h>
11 #include <linux/workqueue.h>
13 #define MLOG_MASK_PREFIX ML_QUOTA
14 #include <cluster/masklog.h>
19 #include "blockcheck.h"
28 static struct workqueue_struct *ocfs2_quota_wq = NULL;
30 static void qsync_work_fn(struct work_struct *work);
32 static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
34 struct ocfs2_global_disk_dqblk *d = dp;
35 struct mem_dqblk *m = &dquot->dq_dqb;
37 /* Update from disk only entries not set by the admin */
38 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
39 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
40 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
42 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
43 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
44 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
45 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
46 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
48 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
49 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
50 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
51 m->dqb_btime = le64_to_cpu(d->dqb_btime);
52 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
53 m->dqb_itime = le64_to_cpu(d->dqb_itime);
54 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
57 static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
59 struct ocfs2_global_disk_dqblk *d = dp;
60 struct mem_dqblk *m = &dquot->dq_dqb;
62 d->dqb_id = cpu_to_le32(dquot->dq_id);
63 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
64 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
65 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
66 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
67 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
68 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
69 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
70 d->dqb_btime = cpu_to_le64(m->dqb_btime);
71 d->dqb_itime = cpu_to_le64(m->dqb_itime);
72 d->dqb_pad1 = d->dqb_pad2 = 0;
75 static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
77 struct ocfs2_global_disk_dqblk *d = dp;
78 struct ocfs2_mem_dqinfo *oinfo =
79 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
81 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
83 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
86 struct qtree_fmt_operations ocfs2_global_ops = {
87 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
88 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
89 .is_id = ocfs2_global_is_id,
92 static int ocfs2_validate_quota_block(struct super_block *sb,
93 struct buffer_head *bh)
95 struct ocfs2_disk_dqtrailer *dqt =
96 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
98 mlog(0, "Validating quota block %llu\n",
99 (unsigned long long)bh->b_blocknr);
101 BUG_ON(!buffer_uptodate(bh));
104 * If the ecc fails, we return the error but otherwise
105 * leave the filesystem running. We know any error is
106 * local to this block.
108 return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
111 int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
112 struct buffer_head **bh)
115 struct buffer_head *tmp = *bh;
117 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
118 ocfs2_validate_quota_block);
122 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
129 static int ocfs2_get_quota_block(struct inode *inode, int block,
130 struct buffer_head **bh)
135 down_read(&OCFS2_I(inode)->ip_alloc_sem);
136 err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
137 up_read(&OCFS2_I(inode)->ip_alloc_sem);
142 *bh = sb_getblk(inode->i_sb, pblock);
150 /* Read data from global quotafile - avoid pagecache and such because we cannot
151 * afford acquiring the locks... We use quota cluster lock to serialize
152 * operations. Caller is responsible for acquiring it. */
153 ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
154 size_t len, loff_t off)
156 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
157 struct inode *gqinode = oinfo->dqi_gqinode;
158 loff_t i_size = i_size_read(gqinode);
159 int offset = off & (sb->s_blocksize - 1);
160 sector_t blk = off >> sb->s_blocksize_bits;
162 struct buffer_head *bh;
163 size_t toread, tocopy;
167 if (off + len > i_size)
171 tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
173 err = ocfs2_read_quota_block(gqinode, blk, &bh);
178 memcpy(data, bh->b_data + offset, tocopy);
188 /* Write to quotafile (we know the transaction is already started and has
190 ssize_t ocfs2_quota_write(struct super_block *sb, int type,
191 const char *data, size_t len, loff_t off)
193 struct mem_dqinfo *info = sb_dqinfo(sb, type);
194 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
195 struct inode *gqinode = oinfo->dqi_gqinode;
196 int offset = off & (sb->s_blocksize - 1);
197 sector_t blk = off >> sb->s_blocksize_bits;
198 int err = 0, new = 0, ja_type;
199 struct buffer_head *bh = NULL;
200 handle_t *handle = journal_current_handle();
203 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
204 "because transaction was not started.\n",
205 (unsigned long long)off, (unsigned long long)len);
208 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
210 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
213 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
214 if (gqinode->i_size < off + len) {
216 ocfs2_align_bytes_to_blocks(sb, off + len);
218 /* Space is already allocated in ocfs2_global_read_dquot() */
219 err = ocfs2_simple_size_update(gqinode,
226 /* Not rewriting whole block? */
227 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
229 err = ocfs2_read_quota_block(gqinode, blk, &bh);
230 ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
232 err = ocfs2_get_quota_block(gqinode, blk, &bh);
233 ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
241 memset(bh->b_data, 0, sb->s_blocksize);
242 memcpy(bh->b_data + offset, data, len);
243 flush_dcache_page(bh->b_page);
244 set_buffer_uptodate(bh);
246 ocfs2_set_buffer_uptodate(gqinode, bh);
247 err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type);
252 err = ocfs2_journal_dirty(handle, bh);
258 mutex_unlock(&gqinode->i_mutex);
262 gqinode->i_version++;
263 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
264 mutex_unlock(&gqinode->i_mutex);
268 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
271 struct buffer_head *bh = NULL;
273 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
276 spin_lock(&dq_data_lock);
277 if (!oinfo->dqi_gqi_count++)
278 oinfo->dqi_gqi_bh = bh;
280 WARN_ON(bh != oinfo->dqi_gqi_bh);
281 spin_unlock(&dq_data_lock);
285 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
287 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
288 brelse(oinfo->dqi_gqi_bh);
289 spin_lock(&dq_data_lock);
290 if (!--oinfo->dqi_gqi_count)
291 oinfo->dqi_gqi_bh = NULL;
292 spin_unlock(&dq_data_lock);
295 /* Read information header from global quota file */
296 int ocfs2_global_read_info(struct super_block *sb, int type)
298 struct inode *gqinode = NULL;
299 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
300 GROUP_QUOTA_SYSTEM_INODE };
301 struct ocfs2_global_disk_dqinfo dinfo;
302 struct mem_dqinfo *info = sb_dqinfo(sb, type);
303 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
308 /* Read global header */
309 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
312 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
317 oinfo->dqi_gi.dqi_sb = sb;
318 oinfo->dqi_gi.dqi_type = type;
319 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
320 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
321 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
322 oinfo->dqi_gqi_bh = NULL;
323 oinfo->dqi_gqi_count = 0;
324 oinfo->dqi_gqinode = gqinode;
325 status = ocfs2_lock_global_qf(oinfo, 0);
330 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
331 sizeof(struct ocfs2_global_disk_dqinfo),
332 OCFS2_GLOBAL_INFO_OFF);
333 ocfs2_unlock_global_qf(oinfo, 0);
334 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
335 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
342 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
343 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
344 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
345 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
346 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
347 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
348 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
349 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
350 OCFS2_QBLK_RESERVED_SPACE;
351 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
352 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
353 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
354 msecs_to_jiffies(oinfo->dqi_syncms));
361 /* Write information to global quota file. Expects exlusive lock on quota
362 * file inode and quota info */
363 static int __ocfs2_global_write_info(struct super_block *sb, int type)
365 struct mem_dqinfo *info = sb_dqinfo(sb, type);
366 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
367 struct ocfs2_global_disk_dqinfo dinfo;
370 spin_lock(&dq_data_lock);
371 info->dqi_flags &= ~DQF_INFO_DIRTY;
372 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
373 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
374 spin_unlock(&dq_data_lock);
375 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
376 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
377 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
378 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
379 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
380 sizeof(struct ocfs2_global_disk_dqinfo),
381 OCFS2_GLOBAL_INFO_OFF);
382 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
383 mlog(ML_ERROR, "Cannot write global quota info structure\n");
391 int ocfs2_global_write_info(struct super_block *sb, int type)
394 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
396 err = ocfs2_qinfo_lock(info, 1);
399 err = __ocfs2_global_write_info(sb, type);
400 ocfs2_qinfo_unlock(info, 1);
404 static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
406 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
409 * We may need to allocate tree blocks and a leaf block but not the
412 return oinfo->dqi_gi.dqi_qtree_depth;
415 static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
417 /* We modify all the allocated blocks, tree root, and info block */
418 return (ocfs2_global_qinit_alloc(sb, type) + 2) *
419 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
422 /* Read in information from global quota file and acquire a reference to it.
423 * dquot_acquire() has already started the transaction and locked quota file */
424 int ocfs2_global_read_dquot(struct dquot *dquot)
426 int err, err2, ex = 0;
427 struct super_block *sb = dquot->dq_sb;
428 int type = dquot->dq_type;
429 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
430 struct ocfs2_super *osb = OCFS2_SB(sb);
431 struct inode *gqinode = info->dqi_gqinode;
432 int need_alloc = ocfs2_global_qinit_alloc(sb, type);
433 handle_t *handle = NULL;
435 err = ocfs2_qinfo_lock(info, 0);
438 err = qtree_read_dquot(&info->dqi_gi, dquot);
441 OCFS2_DQUOT(dquot)->dq_use_count++;
442 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
443 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
444 ocfs2_qinfo_unlock(info, 0);
446 if (!dquot->dq_off) { /* No real quota entry? */
449 * Add blocks to quota file before we start a transaction since
450 * locking allocators ranks above a transaction start
452 WARN_ON(journal_current_handle());
453 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
454 err = ocfs2_extend_no_holes(gqinode,
455 gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
457 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
462 handle = ocfs2_start_trans(osb,
463 ocfs2_calc_global_qinit_credits(sb, type));
464 if (IS_ERR(handle)) {
465 err = PTR_ERR(handle);
468 err = ocfs2_qinfo_lock(info, ex);
471 err = qtree_write_dquot(&info->dqi_gi, dquot);
472 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
473 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
479 ocfs2_qinfo_unlock(info, 1);
481 ocfs2_qinfo_unlock(info, 0);
484 ocfs2_commit_trans(osb, handle);
491 /* Sync local information about quota modifications with global quota file.
492 * Caller must have started the transaction and obtained exclusive lock for
493 * global quota file inode */
494 int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
497 struct super_block *sb = dquot->dq_sb;
498 int type = dquot->dq_type;
499 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
500 struct ocfs2_global_disk_dqblk dqblk;
501 s64 spacechange, inodechange;
502 time_t olditime, oldbtime;
504 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
505 sizeof(struct ocfs2_global_disk_dqblk),
507 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
509 mlog(ML_ERROR, "Short read from global quota file "
516 /* Update space and inode usage. Get also other information from
517 * global quota file so that we don't overwrite any changes there.
519 spin_lock(&dq_data_lock);
520 spacechange = dquot->dq_dqb.dqb_curspace -
521 OCFS2_DQUOT(dquot)->dq_origspace;
522 inodechange = dquot->dq_dqb.dqb_curinodes -
523 OCFS2_DQUOT(dquot)->dq_originodes;
524 olditime = dquot->dq_dqb.dqb_itime;
525 oldbtime = dquot->dq_dqb.dqb_btime;
526 ocfs2_global_disk2memdqb(dquot, &dqblk);
527 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
528 dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
529 dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
530 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
531 dquot->dq_dqb.dqb_curspace += spacechange;
532 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
533 dquot->dq_dqb.dqb_curinodes += inodechange;
534 /* Set properly space grace time... */
535 if (dquot->dq_dqb.dqb_bsoftlimit &&
536 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
537 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
539 if (dquot->dq_dqb.dqb_btime > 0)
540 dquot->dq_dqb.dqb_btime =
541 min(dquot->dq_dqb.dqb_btime, oldbtime);
543 dquot->dq_dqb.dqb_btime = oldbtime;
546 dquot->dq_dqb.dqb_btime = 0;
547 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
549 /* Set properly inode grace time... */
550 if (dquot->dq_dqb.dqb_isoftlimit &&
551 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
552 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
554 if (dquot->dq_dqb.dqb_itime > 0)
555 dquot->dq_dqb.dqb_itime =
556 min(dquot->dq_dqb.dqb_itime, olditime);
558 dquot->dq_dqb.dqb_itime = olditime;
561 dquot->dq_dqb.dqb_itime = 0;
562 clear_bit(DQ_INODES_B, &dquot->dq_flags);
564 /* All information is properly updated, clear the flags */
565 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
566 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
567 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
568 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
569 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
570 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
571 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
572 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
573 spin_unlock(&dq_data_lock);
574 err = ocfs2_qinfo_lock(info, freeing);
576 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
577 " (type=%d, id=%u)\n", dquot->dq_type,
578 (unsigned)dquot->dq_id);
582 OCFS2_DQUOT(dquot)->dq_use_count--;
583 err = qtree_write_dquot(&info->dqi_gi, dquot);
586 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
587 err = qtree_release_dquot(&info->dqi_gi, dquot);
588 if (info_dirty(sb_dqinfo(sb, type))) {
589 err2 = __ocfs2_global_write_info(sb, type);
595 ocfs2_qinfo_unlock(info, freeing);
603 * Functions for periodic syncing of dquots with global file
605 static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
608 struct super_block *sb = dquot->dq_sb;
609 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
610 struct ocfs2_super *osb = OCFS2_SB(sb);
613 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
614 dquot->dq_type, type, sb->s_id);
615 if (type != dquot->dq_type)
617 status = ocfs2_lock_global_qf(oinfo, 1);
621 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
622 if (IS_ERR(handle)) {
623 status = PTR_ERR(handle);
627 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
628 status = ocfs2_sync_dquot(dquot);
629 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
632 /* We have to write local structure as well... */
633 dquot_mark_dquot_dirty(dquot);
634 status = dquot_commit(dquot);
637 ocfs2_commit_trans(osb, handle);
639 ocfs2_unlock_global_qf(oinfo, 1);
645 static void qsync_work_fn(struct work_struct *work)
647 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
648 struct ocfs2_mem_dqinfo,
650 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
652 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
653 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
654 msecs_to_jiffies(oinfo->dqi_syncms));
658 * Wrappers for generic quota functions
661 static int ocfs2_write_dquot(struct dquot *dquot)
664 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
667 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
669 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
670 if (IS_ERR(handle)) {
671 status = PTR_ERR(handle);
675 status = dquot_commit(dquot);
676 ocfs2_commit_trans(osb, handle);
682 static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
684 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
686 * We modify tree, leaf block, global info, local chunk header,
687 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
688 * accounts for inode update
690 return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
691 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
692 OCFS2_QINFO_WRITE_CREDITS +
693 OCFS2_INODE_UPDATE_CREDITS;
696 static int ocfs2_release_dquot(struct dquot *dquot)
699 struct ocfs2_mem_dqinfo *oinfo =
700 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
701 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
704 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
706 status = ocfs2_lock_global_qf(oinfo, 1);
709 handle = ocfs2_start_trans(osb,
710 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
711 if (IS_ERR(handle)) {
712 status = PTR_ERR(handle);
716 status = dquot_release(dquot);
717 ocfs2_commit_trans(osb, handle);
719 ocfs2_unlock_global_qf(oinfo, 1);
725 static int ocfs2_acquire_dquot(struct dquot *dquot)
727 struct ocfs2_mem_dqinfo *oinfo =
728 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
731 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
732 /* We need an exclusive lock, because we're going to update use count
733 * and instantiate possibly new dquot structure */
734 status = ocfs2_lock_global_qf(oinfo, 1);
737 status = dquot_acquire(dquot);
738 ocfs2_unlock_global_qf(oinfo, 1);
744 static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
746 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
747 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
748 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
749 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
750 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
751 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
754 struct super_block *sb = dquot->dq_sb;
755 int type = dquot->dq_type;
756 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
758 struct ocfs2_super *osb = OCFS2_SB(sb);
760 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
761 dquot_mark_dquot_dirty(dquot);
763 /* In case user set some limits, sync dquot immediately to global
764 * quota file so that information propagates quicker */
765 spin_lock(&dq_data_lock);
766 if (dquot->dq_flags & mask)
768 spin_unlock(&dq_data_lock);
769 /* This is a slight hack but we can't afford getting global quota
770 * lock if we already have a transaction started. */
771 if (!sync || journal_current_handle()) {
772 status = ocfs2_write_dquot(dquot);
775 status = ocfs2_lock_global_qf(oinfo, 1);
778 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
779 if (IS_ERR(handle)) {
780 status = PTR_ERR(handle);
784 status = ocfs2_sync_dquot(dquot);
789 /* Now write updated local dquot structure */
790 status = dquot_commit(dquot);
792 ocfs2_commit_trans(osb, handle);
794 ocfs2_unlock_global_qf(oinfo, 1);
800 /* This should happen only after set_dqinfo(). */
801 static int ocfs2_write_info(struct super_block *sb, int type)
805 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
809 status = ocfs2_lock_global_qf(oinfo, 1);
812 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
813 if (IS_ERR(handle)) {
814 status = PTR_ERR(handle);
818 status = dquot_commit_info(sb, type);
819 ocfs2_commit_trans(OCFS2_SB(sb), handle);
821 ocfs2_unlock_global_qf(oinfo, 1);
827 static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
829 struct ocfs2_dquot *dquot =
830 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
834 return &dquot->dq_dquot;
837 static void ocfs2_destroy_dquot(struct dquot *dquot)
839 kmem_cache_free(ocfs2_dquot_cachep, dquot);
842 struct dquot_operations ocfs2_quota_operations = {
843 .initialize = dquot_initialize,
845 .alloc_space = dquot_alloc_space,
846 .alloc_inode = dquot_alloc_inode,
847 .free_space = dquot_free_space,
848 .free_inode = dquot_free_inode,
849 .transfer = dquot_transfer,
850 .write_dquot = ocfs2_write_dquot,
851 .acquire_dquot = ocfs2_acquire_dquot,
852 .release_dquot = ocfs2_release_dquot,
853 .mark_dirty = ocfs2_mark_dquot_dirty,
854 .write_info = ocfs2_write_info,
855 .alloc_dquot = ocfs2_alloc_dquot,
856 .destroy_dquot = ocfs2_destroy_dquot,
859 int ocfs2_quota_setup(void)
861 ocfs2_quota_wq = create_workqueue("o2quot");
867 void ocfs2_quota_shutdown(void)
869 if (ocfs2_quota_wq) {
870 flush_workqueue(ocfs2_quota_wq);
871 destroy_workqueue(ocfs2_quota_wq);
872 ocfs2_quota_wq = NULL;