2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
63 #include "ops_address.h"
69 static u64 qd2offset(struct gfs2_quota_data *qd)
73 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
74 offset *= sizeof(struct gfs2_quota);
79 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
80 struct gfs2_quota_data **qdp)
82 struct gfs2_quota_data *qd;
85 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
92 set_bit(QDF_USER, &qd->qd_flags);
95 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
96 &gfs2_quota_glops, CREATE, &qd->qd_gl);
100 error = gfs2_lvb_hold(qd->qd_gl);
101 gfs2_glock_put(qd->qd_gl);
114 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
115 struct gfs2_quota_data **qdp)
117 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
124 spin_lock(&sdp->sd_quota_spin);
125 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
126 if (qd->qd_id == id &&
127 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
139 list_add(&qd->qd_list, &sdp->sd_quota_list);
140 atomic_inc(&sdp->sd_quota_count);
144 spin_unlock(&sdp->sd_quota_spin);
148 gfs2_lvb_unhold(new_qd->qd_gl);
155 error = qd_alloc(sdp, user, id, &new_qd);
161 static void qd_hold(struct gfs2_quota_data *qd)
163 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
165 spin_lock(&sdp->sd_quota_spin);
166 gfs2_assert(sdp, qd->qd_count);
168 spin_unlock(&sdp->sd_quota_spin);
171 static void qd_put(struct gfs2_quota_data *qd)
173 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
174 spin_lock(&sdp->sd_quota_spin);
175 gfs2_assert(sdp, qd->qd_count);
177 qd->qd_last_touched = jiffies;
178 spin_unlock(&sdp->sd_quota_spin);
181 static int slot_get(struct gfs2_quota_data *qd)
183 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
184 unsigned int c, o = 0, b;
185 unsigned char byte = 0;
187 spin_lock(&sdp->sd_quota_spin);
189 if (qd->qd_slot_count++) {
190 spin_unlock(&sdp->sd_quota_spin);
194 for (c = 0; c < sdp->sd_quota_chunks; c++)
195 for (o = 0; o < PAGE_SIZE; o++) {
196 byte = sdp->sd_quota_bitmap[c][o];
204 for (b = 0; b < 8; b++)
205 if (!(byte & (1 << b)))
207 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
209 if (qd->qd_slot >= sdp->sd_quota_slots)
212 sdp->sd_quota_bitmap[c][o] |= 1 << b;
214 spin_unlock(&sdp->sd_quota_spin);
220 spin_unlock(&sdp->sd_quota_spin);
224 static void slot_hold(struct gfs2_quota_data *qd)
226 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
228 spin_lock(&sdp->sd_quota_spin);
229 gfs2_assert(sdp, qd->qd_slot_count);
231 spin_unlock(&sdp->sd_quota_spin);
234 static void slot_put(struct gfs2_quota_data *qd)
236 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
238 spin_lock(&sdp->sd_quota_spin);
239 gfs2_assert(sdp, qd->qd_slot_count);
240 if (!--qd->qd_slot_count) {
241 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
244 spin_unlock(&sdp->sd_quota_spin);
247 static int bh_get(struct gfs2_quota_data *qd)
249 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
250 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
251 unsigned int block, offset;
252 struct buffer_head *bh;
254 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
256 mutex_lock(&sdp->sd_quota_mutex);
258 if (qd->qd_bh_count++) {
259 mutex_unlock(&sdp->sd_quota_mutex);
263 block = qd->qd_slot / sdp->sd_qc_per_block;
264 offset = qd->qd_slot % sdp->sd_qc_per_block;;
266 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
267 error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);
270 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
274 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
278 qd->qd_bh_qc = (struct gfs2_quota_change *)
279 (bh->b_data + sizeof(struct gfs2_meta_header) +
280 offset * sizeof(struct gfs2_quota_change));
282 mutex_unlock(&sdp->sd_quota_mutex);
290 mutex_unlock(&sdp->sd_quota_mutex);
294 static void bh_put(struct gfs2_quota_data *qd)
296 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
298 mutex_lock(&sdp->sd_quota_mutex);
299 gfs2_assert(sdp, qd->qd_bh_count);
300 if (!--qd->qd_bh_count) {
305 mutex_unlock(&sdp->sd_quota_mutex);
308 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
310 struct gfs2_quota_data *qd = NULL;
316 if (sdp->sd_vfs->s_flags & MS_RDONLY)
319 spin_lock(&sdp->sd_quota_spin);
321 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
322 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
323 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
324 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
327 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
329 set_bit(QDF_LOCKED, &qd->qd_flags);
330 gfs2_assert_warn(sdp, qd->qd_count);
332 qd->qd_change_sync = qd->qd_change;
333 gfs2_assert_warn(sdp, qd->qd_slot_count);
343 spin_unlock(&sdp->sd_quota_spin);
346 gfs2_assert_warn(sdp, qd->qd_change_sync);
349 clear_bit(QDF_LOCKED, &qd->qd_flags);
361 static int qd_trylock(struct gfs2_quota_data *qd)
363 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
365 if (sdp->sd_vfs->s_flags & MS_RDONLY)
368 spin_lock(&sdp->sd_quota_spin);
370 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
371 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
372 spin_unlock(&sdp->sd_quota_spin);
376 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
378 set_bit(QDF_LOCKED, &qd->qd_flags);
379 gfs2_assert_warn(sdp, qd->qd_count);
381 qd->qd_change_sync = qd->qd_change;
382 gfs2_assert_warn(sdp, qd->qd_slot_count);
385 spin_unlock(&sdp->sd_quota_spin);
387 gfs2_assert_warn(sdp, qd->qd_change_sync);
389 clear_bit(QDF_LOCKED, &qd->qd_flags);
398 static void qd_unlock(struct gfs2_quota_data *qd)
400 gfs2_assert_warn(qd->qd_gl->gl_sbd,
401 test_bit(QDF_LOCKED, &qd->qd_flags));
402 clear_bit(QDF_LOCKED, &qd->qd_flags);
408 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
409 struct gfs2_quota_data **qdp)
413 error = qd_get(sdp, user, id, create, qdp);
417 error = slot_get(*qdp);
421 error = bh_get(*qdp);
434 static void qdsb_put(struct gfs2_quota_data *qd)
441 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
444 struct gfs2_alloc *al = &ip->i_alloc;
445 struct gfs2_quota_data **qd = al->al_qd;
448 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
449 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
455 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
485 gfs2_quota_unhold(ip);
489 void gfs2_quota_unhold(struct gfs2_inode *ip)
491 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
492 struct gfs2_alloc *al = &ip->i_alloc;
495 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
497 for (x = 0; x < al->al_qd_num; x++) {
498 qdsb_put(al->al_qd[x]);
504 static int sort_qd(const void *a, const void *b)
506 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
507 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
509 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
510 !test_bit(QDF_USER, &qd_b->qd_flags)) {
511 if (test_bit(QDF_USER, &qd_a->qd_flags))
516 if (qd_a->qd_id < qd_b->qd_id)
518 if (qd_a->qd_id > qd_b->qd_id)
524 static void do_qc(struct gfs2_quota_data *qd, s64 change)
526 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
527 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
528 struct gfs2_quota_change *qc = qd->qd_bh_qc;
531 mutex_lock(&sdp->sd_quota_mutex);
532 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
534 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
537 if (test_bit(QDF_USER, &qd->qd_flags))
538 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
539 qc->qc_id = cpu_to_be32(qd->qd_id);
542 x = be64_to_cpu(qc->qc_change) + change;
543 qc->qc_change = cpu_to_be64(x);
545 spin_lock(&sdp->sd_quota_spin);
547 spin_unlock(&sdp->sd_quota_spin);
550 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
551 clear_bit(QDF_CHANGE, &qd->qd_flags);
556 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
561 mutex_unlock(&sdp->sd_quota_mutex);
567 * This function was mostly borrowed from gfs2_block_truncate_page which was
568 * in turn mostly borrowed from ext3
570 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
571 s64 change, struct gfs2_quota_data *qd)
573 struct inode *inode = &ip->i_inode;
574 struct address_space *mapping = inode->i_mapping;
575 unsigned long index = loc >> PAGE_CACHE_SHIFT;
576 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
577 unsigned blocksize, iblock, pos;
578 struct buffer_head *bh;
582 struct gfs2_quota_host qp;
586 page = grab_cache_page(mapping, index);
590 blocksize = inode->i_sb->s_blocksize;
591 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
593 if (!page_has_buffers(page))
594 create_empty_buffers(page, blocksize, 0);
596 bh = page_buffers(page);
598 while (offset >= pos) {
599 bh = bh->b_this_page;
604 if (!buffer_mapped(bh)) {
605 gfs2_get_block(inode, iblock, bh, 1);
606 if (!buffer_mapped(bh))
610 if (PageUptodate(page))
611 set_buffer_uptodate(bh);
613 if (!buffer_uptodate(bh)) {
614 ll_rw_block(READ_META, 1, &bh);
616 if (!buffer_uptodate(bh))
620 gfs2_trans_add_bh(ip->i_gl, bh, 0);
622 kaddr = kmap_atomic(page, KM_USER0);
623 ptr = kaddr + offset;
624 gfs2_quota_in(&qp, ptr);
625 qp.qu_value += change;
627 gfs2_quota_out(&qp, ptr);
628 flush_dcache_page(page);
629 kunmap_atomic(kaddr, KM_USER0);
631 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
632 qd->qd_qb.qb_value = cpu_to_be64(value);
633 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
634 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
637 page_cache_release(page);
641 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
643 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
644 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
645 unsigned int data_blocks, ind_blocks;
646 struct gfs2_holder *ghs, i_gh;
648 struct gfs2_quota_data *qd;
650 unsigned int nalloc = 0;
651 struct gfs2_alloc *al = NULL;
654 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
655 &data_blocks, &ind_blocks);
657 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
661 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
662 for (qx = 0; qx < num_qd; qx++) {
663 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
665 GL_NOCACHE, &ghs[qx]);
670 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
674 for (x = 0; x < num_qd; x++) {
677 offset = qd2offset(qda[x]);
678 error = gfs2_write_alloc_required(ip, offset,
679 sizeof(struct gfs2_quota),
688 al = gfs2_alloc_get(ip);
690 al->al_requested = nalloc * (data_blocks + ind_blocks);
692 error = gfs2_inplace_reserve(ip);
696 error = gfs2_trans_begin(sdp,
697 al->al_rgd->rd_ri.ri_length +
698 num_qd * data_blocks +
699 nalloc * ind_blocks +
700 RES_DINODE + num_qd +
705 error = gfs2_trans_begin(sdp,
706 num_qd * data_blocks +
707 RES_DINODE + num_qd, 0);
712 for (x = 0; x < num_qd; x++) {
714 offset = qd2offset(qd);
715 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
716 (struct gfs2_quota_data *)
721 do_qc(qd, -qd->qd_change_sync);
730 gfs2_inplace_release(ip);
735 gfs2_glock_dq_uninit(&i_gh);
738 gfs2_glock_dq_uninit(&ghs[qx]);
740 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
744 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
745 struct gfs2_holder *q_gh)
747 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
748 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
749 struct gfs2_holder i_gh;
750 struct gfs2_quota_host q;
751 char buf[sizeof(struct gfs2_quota)];
752 struct file_ra_state ra_state;
754 struct gfs2_quota_lvb *qlvb;
756 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
758 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
762 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
764 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
766 gfs2_glock_dq_uninit(q_gh);
767 error = gfs2_glock_nq_init(qd->qd_gl,
768 LM_ST_EXCLUSIVE, GL_NOCACHE,
773 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
777 memset(buf, 0, sizeof(struct gfs2_quota));
779 error = gfs2_internal_read(ip, &ra_state, buf,
780 &pos, sizeof(struct gfs2_quota));
784 gfs2_glock_dq_uninit(&i_gh);
787 gfs2_quota_in(&q, buf);
788 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
789 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
791 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
792 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
793 qlvb->qb_value = cpu_to_be64(q.qu_value);
796 if (gfs2_glock_is_blocking(qd->qd_gl)) {
797 gfs2_glock_dq_uninit(q_gh);
806 gfs2_glock_dq_uninit(&i_gh);
808 gfs2_glock_dq_uninit(q_gh);
812 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
814 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
815 struct gfs2_alloc *al = &ip->i_alloc;
819 gfs2_quota_hold(ip, uid, gid);
821 if (capable(CAP_SYS_RESOURCE) ||
822 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
825 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
828 for (x = 0; x < al->al_qd_num; x++) {
829 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
835 set_bit(GIF_QD_LOCKED, &ip->i_flags);
838 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
839 gfs2_quota_unhold(ip);
845 static int need_sync(struct gfs2_quota_data *qd)
847 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
848 struct gfs2_tune *gt = &sdp->sd_tune;
850 unsigned int num, den;
853 if (!qd->qd_qb.qb_limit)
856 spin_lock(&sdp->sd_quota_spin);
857 value = qd->qd_change;
858 spin_unlock(&sdp->sd_quota_spin);
860 spin_lock(>->gt_spin);
861 num = gt->gt_quota_scale_num;
862 den = gt->gt_quota_scale_den;
863 spin_unlock(>->gt_spin);
867 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
868 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
871 value *= gfs2_jindex_size(sdp) * num;
873 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
874 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
881 void gfs2_quota_unlock(struct gfs2_inode *ip)
883 struct gfs2_alloc *al = &ip->i_alloc;
884 struct gfs2_quota_data *qda[4];
885 unsigned int count = 0;
888 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
891 for (x = 0; x < al->al_qd_num; x++) {
892 struct gfs2_quota_data *qd;
896 sync = need_sync(qd);
898 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
900 if (sync && qd_trylock(qd))
906 for (x = 0; x < count; x++)
911 gfs2_quota_unhold(ip);
916 static int print_message(struct gfs2_quota_data *qd, char *type)
918 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
920 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
921 sdp->sd_fsname, type,
922 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
928 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
930 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
931 struct gfs2_alloc *al = &ip->i_alloc;
932 struct gfs2_quota_data *qd;
937 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
940 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
943 for (x = 0; x < al->al_qd_num; x++) {
946 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
947 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
950 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
951 spin_lock(&sdp->sd_quota_spin);
952 value += qd->qd_change;
953 spin_unlock(&sdp->sd_quota_spin);
955 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
956 print_message(qd, "exceeded");
959 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
960 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
961 time_after_eq(jiffies, qd->qd_last_warn +
963 gt_quota_warn_period) * HZ)) {
964 error = print_message(qd, "warning");
965 qd->qd_last_warn = jiffies;
972 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
975 struct gfs2_alloc *al = &ip->i_alloc;
976 struct gfs2_quota_data *qd;
978 unsigned int found = 0;
980 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
982 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
985 for (x = 0; x < al->al_qd_num; x++) {
988 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
989 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
996 int gfs2_quota_sync(struct gfs2_sbd *sdp)
998 struct gfs2_quota_data **qda;
999 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1000 unsigned int num_qd;
1004 sdp->sd_quota_sync_gen++;
1006 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1014 error = qd_fish(sdp, qda + num_qd);
1015 if (error || !qda[num_qd])
1017 if (++num_qd == max_qd)
1023 error = do_sync(num_qd, qda);
1025 for (x = 0; x < num_qd; x++)
1026 qda[x]->qd_sync_gen =
1027 sdp->sd_quota_sync_gen;
1029 for (x = 0; x < num_qd; x++)
1032 } while (!error && num_qd == max_qd);
1039 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1041 struct gfs2_quota_data *qd;
1042 struct gfs2_holder q_gh;
1045 error = qd_get(sdp, user, id, CREATE, &qd);
1049 error = do_glock(qd, FORCE, &q_gh);
1051 gfs2_glock_dq_uninit(&q_gh);
1058 int gfs2_quota_init(struct gfs2_sbd *sdp)
1060 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1061 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1062 unsigned int x, slot = 0;
1063 unsigned int found = 0;
1068 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
1069 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1070 gfs2_consist_inode(ip);
1073 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1074 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1078 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1079 sizeof(unsigned char *), GFP_KERNEL);
1080 if (!sdp->sd_quota_bitmap)
1083 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1084 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1085 if (!sdp->sd_quota_bitmap[x])
1089 for (x = 0; x < blocks; x++) {
1090 struct buffer_head *bh;
1095 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1100 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1103 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1108 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1110 struct gfs2_quota_change_host qc;
1111 struct gfs2_quota_data *qd;
1113 gfs2_quota_change_in(&qc, bh->b_data +
1114 sizeof(struct gfs2_meta_header) +
1115 y * sizeof(struct gfs2_quota_change));
1119 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1126 set_bit(QDF_CHANGE, &qd->qd_flags);
1127 qd->qd_change = qc.qc_change;
1129 qd->qd_slot_count = 1;
1130 qd->qd_last_touched = jiffies;
1132 spin_lock(&sdp->sd_quota_spin);
1133 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1134 list_add(&qd->qd_list, &sdp->sd_quota_list);
1135 atomic_inc(&sdp->sd_quota_count);
1136 spin_unlock(&sdp->sd_quota_spin);
1147 fs_info(sdp, "found %u quota changes\n", found);
1152 gfs2_quota_cleanup(sdp);
1156 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1158 struct gfs2_quota_data *qd, *safe;
1161 spin_lock(&sdp->sd_quota_spin);
1162 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1163 if (!qd->qd_count &&
1164 time_after_eq(jiffies, qd->qd_last_touched +
1165 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1166 list_move(&qd->qd_list, &dead);
1167 gfs2_assert_warn(sdp,
1168 atomic_read(&sdp->sd_quota_count) > 0);
1169 atomic_dec(&sdp->sd_quota_count);
1172 spin_unlock(&sdp->sd_quota_spin);
1174 while (!list_empty(&dead)) {
1175 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1176 list_del(&qd->qd_list);
1178 gfs2_assert_warn(sdp, !qd->qd_change);
1179 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1180 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1182 gfs2_lvb_unhold(qd->qd_gl);
1187 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1189 struct list_head *head = &sdp->sd_quota_list;
1190 struct gfs2_quota_data *qd;
1193 spin_lock(&sdp->sd_quota_spin);
1194 while (!list_empty(head)) {
1195 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1197 if (qd->qd_count > 1 ||
1198 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1199 list_move(&qd->qd_list, head);
1200 spin_unlock(&sdp->sd_quota_spin);
1202 spin_lock(&sdp->sd_quota_spin);
1206 list_del(&qd->qd_list);
1207 atomic_dec(&sdp->sd_quota_count);
1208 spin_unlock(&sdp->sd_quota_spin);
1210 if (!qd->qd_count) {
1211 gfs2_assert_warn(sdp, !qd->qd_change);
1212 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1214 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1215 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1217 gfs2_lvb_unhold(qd->qd_gl);
1220 spin_lock(&sdp->sd_quota_spin);
1222 spin_unlock(&sdp->sd_quota_spin);
1224 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1226 if (sdp->sd_quota_bitmap) {
1227 for (x = 0; x < sdp->sd_quota_chunks; x++)
1228 kfree(sdp->sd_quota_bitmap[x]);
1229 kfree(sdp->sd_quota_bitmap);