gfs2: Don't try to sync non-changes
[linux-2.6-block.git] / fs / gfs2 / quota.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0d0868bd 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
7/*
8 * Quota change tags are associated with each transaction that allocates or
9 * deallocates space. Those changes are accumulated locally to each node (in a
10 * per-node file) and then are periodically synced to the quota file. This
11 * avoids the bottleneck of constantly touching the quota file, but introduces
12 * fuzziness in the current usage value of IDs that are being used on different
13 * nodes in the cluster simultaneously. So, it is possible for a user on
14 * multiple nodes to overrun their quota, but that overrun is controlable.
1e72c0f7 15 * Since quota tags are part of transactions, there is no need for a quota check
b3b94faa
DT
16 * program to be run on node crashes or anything like that.
17 *
18 * There are couple of knobs that let the administrator manage the quota
19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
20 * sitting on one node before being synced to the quota file. (The default is
21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
22 * of quota file syncs increases as the user moves closer to their limit. The
23 * more frequent the syncs, the more accurate the quota enforcement, but that
24 * means that there is more contention between the nodes for the quota file.
25 * The default value is one. This sets the maximum theoretical quota overrun
26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
27 * practice, the maximum overrun you see should be much less.) A "quota_scale"
28 * number greater than one makes quota syncs more frequent and reduces the
29 * maximum overrun. Numbers less than one (but greater than zero) make quota
30 * syncs less frequent.
31 *
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33 * the quota file, so it is not being constantly read.
34 */
35
d77d1b58
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
b3b94faa
DT
38#include <linux/sched.h>
39#include <linux/slab.h>
1495f230 40#include <linux/mm.h>
b3b94faa
DT
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
37b2c837
SW
48#include <linux/kthread.h>
49#include <linux/freezer.h>
2ec46505 50#include <linux/quota.h>
1d371b5e 51#include <linux/dqblk_xfs.h>
9b9f039d 52#include <linux/lockref.h>
2147dbfd 53#include <linux/list_lru.h>
c754fbbb
SW
54#include <linux/rcupdate.h>
55#include <linux/rculist_bl.h>
56#include <linux/bit_spinlock.h>
57#include <linux/jhash.h>
1e3d3620 58#include <linux/vmalloc.h>
b3b94faa
DT
59
60#include "gfs2.h"
5c676f6d 61#include "incore.h"
b3b94faa
DT
62#include "bmap.h"
63#include "glock.h"
64#include "glops.h"
b3b94faa
DT
65#include "log.h"
66#include "meta_io.h"
67#include "quota.h"
68#include "rgrp.h"
69#include "super.h"
70#include "trans.h"
18ec7d5c 71#include "inode.h"
5c676f6d 72#include "util.h"
b3b94faa 73
c754fbbb 74#define GFS2_QD_HASH_SHIFT 12
47a9a527 75#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
c754fbbb
SW
76#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
77
432928c9
BP
78#define QC_CHANGE 0
79#define QC_SYNC 1
80
c754fbbb 81/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
2d9e7230 82/* -> sd_bitmap_lock */
7d80823e 83static DEFINE_SPINLOCK(qd_lock);
2147dbfd 84struct list_lru gfs2_qd_lru;
0a7ab79c 85
c754fbbb
SW
86static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
87
88static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
89 const struct kqid qid)
90{
91 unsigned int h;
92
93 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
94 h = jhash(&qid, sizeof(struct kqid), h);
95
96 return h & GFS2_QD_HASH_MASK;
97}
98
99static inline void spin_lock_bucket(unsigned int hash)
100{
101 hlist_bl_lock(&qd_hash_table[hash]);
102}
103
104static inline void spin_unlock_bucket(unsigned int hash)
105{
106 hlist_bl_unlock(&qd_hash_table[hash]);
107}
108
109static void gfs2_qd_dealloc(struct rcu_head *rcu)
110{
111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
a475c5dd
AG
112 struct gfs2_sbd *sdp = qd->qd_sbd;
113
c754fbbb 114 kmem_cache_free(gfs2_quotad_cachep, qd);
a475c5dd
AG
115 if (atomic_dec_and_test(&sdp->sd_quota_count))
116 wake_up(&sdp->sd_kill_wait);
c754fbbb
SW
117}
118
faada74a 119static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
0a7ab79c 120{
faada74a 121 struct gfs2_sbd *sdp = qd->qd_sbd;
0a7ab79c 122
faada74a
AG
123 spin_lock(&qd_lock);
124 list_del(&qd->qd_list);
125 spin_unlock(&qd_lock);
0a7ab79c 126
faada74a
AG
127 spin_lock_bucket(qd->qd_hash);
128 hlist_bl_del_rcu(&qd->qd_hlist);
129 spin_unlock_bucket(qd->qd_hash);
2147dbfd 130
fae2e73a
AG
131 if (!gfs2_withdrawn(sdp)) {
132 gfs2_assert_warn(sdp, !qd->qd_change);
133 gfs2_assert_warn(sdp, !qd->qd_slot_count);
134 gfs2_assert_warn(sdp, !qd->qd_bh_count);
135 }
0a7ab79c 136
faada74a
AG
137 gfs2_glock_put(qd->qd_gl);
138 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
139}
c754fbbb 140
faada74a
AG
141static void gfs2_qd_list_dispose(struct list_head *list)
142{
143 struct gfs2_quota_data *qd;
0a7ab79c 144
faada74a
AG
145 while (!list_empty(list)) {
146 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
147 list_del(&qd->qd_lru);
0a7ab79c 148
faada74a 149 gfs2_qd_dispose(qd);
0a7ab79c 150 }
2147dbfd
SW
151}
152
153
3f97b163
VD
154static enum lru_status gfs2_qd_isolate(struct list_head *item,
155 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
2147dbfd
SW
156{
157 struct list_head *dispose = arg;
6b0e9a5f
AG
158 struct gfs2_quota_data *qd =
159 list_entry(item, struct gfs2_quota_data, qd_lru);
160 enum lru_status status;
2147dbfd
SW
161
162 if (!spin_trylock(&qd->qd_lockref.lock))
163 return LRU_SKIP;
164
6b0e9a5f 165 status = LRU_SKIP;
2147dbfd
SW
166 if (qd->qd_lockref.count == 0) {
167 lockref_mark_dead(&qd->qd_lockref);
3f97b163 168 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
6b0e9a5f 169 status = LRU_REMOVED;
2147dbfd
SW
170 }
171
172 spin_unlock(&qd->qd_lockref.lock);
6b0e9a5f 173 return status;
2147dbfd
SW
174}
175
176static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
177 struct shrink_control *sc)
178{
179 LIST_HEAD(dispose);
180 unsigned long freed;
181
182 if (!(sc->gfp_mask & __GFP_FS))
183 return SHRINK_STOP;
184
503c358c
VD
185 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
186 gfs2_qd_isolate, &dispose);
2147dbfd 187
faada74a 188 gfs2_qd_list_dispose(&dispose);
2147dbfd 189
1ab6c499
DC
190 return freed;
191}
0a7ab79c 192
2147dbfd
SW
193static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
194 struct shrink_control *sc)
1ab6c499 195{
503c358c 196 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
0a7ab79c
AD
197}
198
2147dbfd
SW
199struct shrinker gfs2_qd_shrinker = {
200 .count_objects = gfs2_qd_shrink_count,
201 .scan_objects = gfs2_qd_shrink_scan,
202 .seeks = DEFAULT_SEEKS,
203 .flags = SHRINKER_NUMA_AWARE,
204};
205
206
2f6c9896
EB
207static u64 qd2index(struct gfs2_quota_data *qd)
208{
05e0a60d
EB
209 struct kqid qid = qd->qd_id;
210 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
37f71577 211 ((qid.type == USRQUOTA) ? 0 : 1);
2f6c9896
EB
212}
213
cd915493 214static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 215{
cd915493 216 u64 offset;
b3b94faa 217
2f6c9896 218 offset = qd2index(qd);
b3b94faa
DT
219 offset *= sizeof(struct gfs2_quota);
220
221 return offset;
222}
223
c754fbbb 224static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
b3b94faa
DT
225{
226 struct gfs2_quota_data *qd;
227 int error;
228
37b2c837 229 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
b3b94faa 230 if (!qd)
c754fbbb 231 return NULL;
b3b94faa 232
c754fbbb 233 qd->qd_sbd = sdp;
bb73ae8f 234 qd->qd_lockref.count = 0;
9b9f039d 235 spin_lock_init(&qd->qd_lockref.lock);
05e0a60d 236 qd->qd_id = qid;
b3b94faa 237 qd->qd_slot = -1;
2147dbfd 238 INIT_LIST_HEAD(&qd->qd_lru);
c754fbbb 239 qd->qd_hash = hash;
b3b94faa 240
2f6c9896 241 error = gfs2_glock_get(sdp, qd2index(qd),
b3b94faa
DT
242 &gfs2_quota_glops, CREATE, &qd->qd_gl);
243 if (error)
244 goto fail;
245
c754fbbb 246 return qd;
b3b94faa 247
a91ea69f 248fail:
37b2c837 249 kmem_cache_free(gfs2_quotad_cachep, qd);
c754fbbb 250 return NULL;
b3b94faa
DT
251}
252
c754fbbb
SW
253static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
254 const struct gfs2_sbd *sdp,
255 struct kqid qid)
b3b94faa 256{
c754fbbb
SW
257 struct gfs2_quota_data *qd;
258 struct hlist_bl_node *h;
b3b94faa 259
c754fbbb
SW
260 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
261 if (!qid_eq(qd->qd_id, qid))
262 continue;
263 if (qd->qd_sbd != sdp)
264 continue;
265 if (lockref_get_not_dead(&qd->qd_lockref)) {
266 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
267 return qd;
b3b94faa 268 }
c754fbbb 269 }
b3b94faa 270
c754fbbb
SW
271 return NULL;
272}
b3b94faa 273
b3b94faa 274
c754fbbb
SW
275static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
276 struct gfs2_quota_data **qdp)
277{
278 struct gfs2_quota_data *qd, *new_qd;
279 unsigned int hash = gfs2_qd_hash(sdp, qid);
b3b94faa 280
c754fbbb
SW
281 rcu_read_lock();
282 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
283 rcu_read_unlock();
b3b94faa 284
c754fbbb
SW
285 if (qd)
286 return 0;
287
288 new_qd = qd_alloc(hash, sdp, qid);
289 if (!new_qd)
290 return -ENOMEM;
291
292 spin_lock(&qd_lock);
293 spin_lock_bucket(hash);
294 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
295 if (qd == NULL) {
bb73ae8f 296 new_qd->qd_lockref.count++;
c754fbbb
SW
297 *qdp = new_qd;
298 list_add(&new_qd->qd_list, &sdp->sd_quota_list);
299 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
300 atomic_inc(&sdp->sd_quota_count);
301 }
302 spin_unlock_bucket(hash);
303 spin_unlock(&qd_lock);
304
305 if (qd) {
306 gfs2_glock_put(new_qd->qd_gl);
307 kmem_cache_free(gfs2_quotad_cachep, new_qd);
b3b94faa 308 }
c754fbbb
SW
309
310 return 0;
b3b94faa
DT
311}
312
c754fbbb 313
b3b94faa
DT
314static void qd_hold(struct gfs2_quota_data *qd)
315{
481f6e7d 316 struct gfs2_sbd *sdp = qd->qd_sbd;
9b9f039d
SW
317 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
318 lockref_get(&qd->qd_lockref);
b3b94faa
DT
319}
320
321static void qd_put(struct gfs2_quota_data *qd)
322{
a475c5dd
AG
323 struct gfs2_sbd *sdp;
324
2147dbfd
SW
325 if (lockref_put_or_lock(&qd->qd_lockref))
326 return;
9b9f039d 327
a475c5dd
AG
328 BUG_ON(__lockref_is_dead(&qd->qd_lockref));
329 sdp = qd->qd_sbd;
330 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
331 lockref_mark_dead(&qd->qd_lockref);
332 spin_unlock(&qd->qd_lockref.lock);
333
334 gfs2_qd_dispose(qd);
335 return;
336 }
337
2147dbfd
SW
338 qd->qd_lockref.count = 0;
339 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
340 spin_unlock(&qd->qd_lockref.lock);
b3b94faa
DT
341}
342
343static int slot_get(struct gfs2_quota_data *qd)
344{
ee2411a8
SW
345 struct gfs2_sbd *sdp = qd->qd_sbd;
346 unsigned int bit;
347 int error = 0;
b3b94faa 348
2d9e7230 349 spin_lock(&sdp->sd_bitmap_lock);
ee2411a8
SW
350 if (qd->qd_slot_count != 0)
351 goto out;
b3b94faa 352
ee2411a8
SW
353 error = -ENOSPC;
354 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
355 if (bit < sdp->sd_quota_slots) {
356 set_bit(bit, sdp->sd_quota_bitmap);
357 qd->qd_slot = bit;
e9fb7c73 358 error = 0;
ee2411a8
SW
359out:
360 qd->qd_slot_count++;
b3b94faa 361 }
2d9e7230 362 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa 363
ee2411a8 364 return error;
b3b94faa
DT
365}
366
367static void slot_hold(struct gfs2_quota_data *qd)
368{
ee2411a8 369 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 370
2d9e7230 371 spin_lock(&sdp->sd_bitmap_lock);
b3b94faa
DT
372 gfs2_assert(sdp, qd->qd_slot_count);
373 qd->qd_slot_count++;
2d9e7230 374 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa
DT
375}
376
377static void slot_put(struct gfs2_quota_data *qd)
378{
ee2411a8 379 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 380
2d9e7230 381 spin_lock(&sdp->sd_bitmap_lock);
b3b94faa
DT
382 gfs2_assert(sdp, qd->qd_slot_count);
383 if (!--qd->qd_slot_count) {
ee2411a8 384 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
b3b94faa
DT
385 qd->qd_slot = -1;
386 }
2d9e7230 387 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa
DT
388}
389
390static int bh_get(struct gfs2_quota_data *qd)
391{
481f6e7d 392 struct gfs2_sbd *sdp = qd->qd_sbd;
c360abbb
BP
393 struct inode *inode = sdp->sd_qc_inode;
394 struct gfs2_inode *ip = GFS2_I(inode);
b3b94faa 395 unsigned int block, offset;
b3b94faa 396 struct buffer_head *bh;
c360abbb 397 struct iomap iomap = { };
b3b94faa
DT
398 int error;
399
f55ab26a 400 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
401
402 if (qd->qd_bh_count++) {
f55ab26a 403 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
404 return 0;
405 }
406
407 block = qd->qd_slot / sdp->sd_qc_per_block;
0d0868bd 408 offset = qd->qd_slot % sdp->sd_qc_per_block;
b3b94faa 409
c360abbb
BP
410 error = gfs2_iomap_get(inode,
411 (loff_t)block << inode->i_blkbits,
412 i_blocksize(inode), &iomap);
b3b94faa
DT
413 if (error)
414 goto fail;
c360abbb
BP
415 error = -ENOENT;
416 if (iomap.type != IOMAP_MAPPED)
417 goto fail;
418
419 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
420 DIO_WAIT, 0, &bh);
b3b94faa
DT
421 if (error)
422 goto fail;
423 error = -EIO;
424 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
425 goto fail_brelse;
426
427 qd->qd_bh = bh;
428 qd->qd_bh_qc = (struct gfs2_quota_change *)
429 (bh->b_data + sizeof(struct gfs2_meta_header) +
430 offset * sizeof(struct gfs2_quota_change));
431
2e95b665 432 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
433
434 return 0;
435
a91ea69f 436fail_brelse:
b3b94faa 437 brelse(bh);
a91ea69f 438fail:
b3b94faa 439 qd->qd_bh_count--;
f55ab26a 440 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
441 return error;
442}
443
444static void bh_put(struct gfs2_quota_data *qd)
445{
481f6e7d 446 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 447
f55ab26a 448 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
449 gfs2_assert(sdp, qd->qd_bh_count);
450 if (!--qd->qd_bh_count) {
451 brelse(qd->qd_bh);
452 qd->qd_bh = NULL;
453 qd->qd_bh_qc = NULL;
454 }
f55ab26a 455 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
456}
457
1bf59bf6
SW
458static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
459 u64 *sync_gen)
460{
461 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
462 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
463 (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
464 return 0;
465
2147dbfd
SW
466 if (!lockref_get_not_dead(&qd->qd_lockref))
467 return 0;
1bf59bf6 468
2147dbfd 469 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
1bf59bf6 470 set_bit(QDF_LOCKED, &qd->qd_flags);
1bf59bf6 471 qd->qd_change_sync = qd->qd_change;
2d9e7230 472 slot_hold(qd);
1bf59bf6
SW
473 return 1;
474}
475
b3b94faa
DT
476static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
477{
b846f2d7 478 struct gfs2_quota_data *qd = NULL, *iter;
b3b94faa 479 int error;
b3b94faa
DT
480
481 *qdp = NULL;
482
bc98a42c 483 if (sb_rdonly(sdp->sd_vfs))
b3b94faa
DT
484 return 0;
485
7d80823e 486 spin_lock(&qd_lock);
b3b94faa 487
b846f2d7
JK
488 list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
489 if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
490 qd = iter;
1bf59bf6 491 break;
b846f2d7 492 }
b3b94faa
DT
493 }
494
7d80823e 495 spin_unlock(&qd_lock);
b3b94faa
DT
496
497 if (qd) {
b3b94faa
DT
498 error = bh_get(qd);
499 if (error) {
500 clear_bit(QDF_LOCKED, &qd->qd_flags);
501 slot_put(qd);
502 qd_put(qd);
503 return error;
504 }
505 }
506
507 *qdp = qd;
508
509 return 0;
510}
511
b3b94faa
DT
512static void qd_unlock(struct gfs2_quota_data *qd)
513{
481f6e7d 514 gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
b3b94faa
DT
515 clear_bit(QDF_LOCKED, &qd->qd_flags);
516 bh_put(qd);
517 slot_put(qd);
518 qd_put(qd);
519}
520
b59c8b6f 521static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
b3b94faa
DT
522 struct gfs2_quota_data **qdp)
523{
524 int error;
525
05e0a60d 526 error = qd_get(sdp, qid, qdp);
b3b94faa
DT
527 if (error)
528 return error;
529
530 error = slot_get(*qdp);
531 if (error)
532 goto fail;
533
534 error = bh_get(*qdp);
535 if (error)
536 goto fail_slot;
537
538 return 0;
539
a91ea69f 540fail_slot:
b3b94faa 541 slot_put(*qdp);
a91ea69f 542fail:
b3b94faa
DT
543 qd_put(*qdp);
544 return error;
545}
546
547static void qdsb_put(struct gfs2_quota_data *qd)
548{
549 bh_put(qd);
550 slot_put(qd);
551 qd_put(qd);
552}
553
b54e9a0b 554/**
2fba46a0
BP
555 * gfs2_qa_get - make sure we have a quota allocations data structure,
556 * if necessary
b54e9a0b
BP
557 * @ip: the inode for this reservation
558 */
2fba46a0 559int gfs2_qa_get(struct gfs2_inode *ip)
b54e9a0b 560{
b54e9a0b 561 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
5fcff61e 562 struct inode *inode = &ip->i_inode;
b54e9a0b
BP
563
564 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
565 return 0;
566
5fcff61e 567 spin_lock(&inode->i_lock);
b54e9a0b 568 if (ip->i_qadata == NULL) {
5fcff61e
BP
569 struct gfs2_qadata *tmp;
570
571 spin_unlock(&inode->i_lock);
572 tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
573 if (!tmp)
574 return -ENOMEM;
575
576 spin_lock(&inode->i_lock);
577 if (ip->i_qadata == NULL)
578 ip->i_qadata = tmp;
579 else
580 kmem_cache_free(gfs2_qadata_cachep, tmp);
b54e9a0b 581 }
2fba46a0 582 ip->i_qadata->qa_ref++;
5fcff61e
BP
583 spin_unlock(&inode->i_lock);
584 return 0;
b54e9a0b
BP
585}
586
2fba46a0 587void gfs2_qa_put(struct gfs2_inode *ip)
b54e9a0b 588{
5fcff61e
BP
589 struct inode *inode = &ip->i_inode;
590
591 spin_lock(&inode->i_lock);
2fba46a0 592 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
b54e9a0b
BP
593 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
594 ip->i_qadata = NULL;
595 }
5fcff61e 596 spin_unlock(&inode->i_lock);
b54e9a0b
BP
597}
598
7c06b5d6 599int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
b3b94faa 600{
feaa7bba 601 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
5407e242 602 struct gfs2_quota_data **qd;
b3b94faa
DT
603 int error;
604
b54e9a0b
BP
605 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
606 return 0;
607
2fba46a0
BP
608 error = gfs2_qa_get(ip);
609 if (error)
610 return error;
5407e242 611
b54e9a0b 612 qd = ip->i_qadata->qa_qd;
5407e242 613
b54e9a0b 614 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
2fba46a0
BP
615 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
616 error = -EIO;
dac0fc31 617 gfs2_qa_put(ip);
2fba46a0
BP
618 goto out;
619 }
b3b94faa 620
b59c8b6f 621 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
b3b94faa 622 if (error)
2fba46a0 623 goto out_unhold;
b54e9a0b 624 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
625 qd++;
626
b59c8b6f 627 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
b3b94faa 628 if (error)
2fba46a0 629 goto out_unhold;
b54e9a0b 630 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
631 qd++;
632
6b24c0d2
EB
633 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
634 !uid_eq(uid, ip->i_inode.i_uid)) {
b59c8b6f 635 error = qdsb_get(sdp, make_kqid_uid(uid), qd);
b3b94faa 636 if (error)
2fba46a0 637 goto out_unhold;
b54e9a0b 638 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
639 qd++;
640 }
641
6b24c0d2
EB
642 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
643 !gid_eq(gid, ip->i_inode.i_gid)) {
b59c8b6f 644 error = qdsb_get(sdp, make_kqid_gid(gid), qd);
b3b94faa 645 if (error)
2fba46a0 646 goto out_unhold;
b54e9a0b 647 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
648 qd++;
649 }
650
2fba46a0 651out_unhold:
b3b94faa
DT
652 if (error)
653 gfs2_quota_unhold(ip);
2fba46a0 654out:
b3b94faa
DT
655 return error;
656}
657
658void gfs2_quota_unhold(struct gfs2_inode *ip)
659{
feaa7bba 660 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b58bf407 661 u32 x;
b3b94faa 662
b54e9a0b 663 if (ip->i_qadata == NULL)
5407e242 664 return;
2fba46a0 665
b3b94faa
DT
666 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
667
b54e9a0b
BP
668 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
669 qdsb_put(ip->i_qadata->qa_qd[x]);
670 ip->i_qadata->qa_qd[x] = NULL;
b3b94faa 671 }
b54e9a0b 672 ip->i_qadata->qa_qd_num = 0;
2fba46a0 673 gfs2_qa_put(ip);
b3b94faa
DT
674}
675
676static int sort_qd(const void *a, const void *b)
677{
48fac179
SW
678 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
679 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa 680
05e0a60d 681 if (qid_lt(qd_a->qd_id, qd_b->qd_id))
48fac179 682 return -1;
05e0a60d 683 if (qid_lt(qd_b->qd_id, qd_a->qd_id))
48fac179 684 return 1;
48fac179 685 return 0;
b3b94faa
DT
686}
687
432928c9 688static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
b3b94faa 689{
481f6e7d 690 struct gfs2_sbd *sdp = qd->qd_sbd;
feaa7bba 691 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 692 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 693 s64 x;
b3b94faa 694
f55ab26a 695 mutex_lock(&sdp->sd_quota_mutex);
350a9b0a 696 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
b3b94faa
DT
697
698 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
699 qc->qc_change = 0;
700 qc->qc_flags = 0;
05e0a60d 701 if (qd->qd_id.type == USRQUOTA)
b3b94faa 702 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
05e0a60d 703 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
b3b94faa
DT
704 }
705
b44b84d7 706 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
707 qc->qc_change = cpu_to_be64(x);
708
7d80823e 709 spin_lock(&qd_lock);
b3b94faa 710 qd->qd_change = x;
7d80823e 711 spin_unlock(&qd_lock);
b3b94faa 712
432928c9
BP
713 if (qc_type == QC_CHANGE) {
714 if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
715 qd_hold(qd);
716 slot_hold(qd);
717 }
718 } else {
b3b94faa
DT
719 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
720 clear_bit(QDF_CHANGE, &qd->qd_flags);
721 qc->qc_flags = 0;
722 qc->qc_id = 0;
723 slot_put(qd);
724 qd_put(qd);
b3b94faa 725 }
907b9bce 726
9cde2898
AD
727 if (change < 0) /* Reset quiet flag if we freed some blocks */
728 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
f55ab26a 729 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
730}
731
d96dad27 732static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
39a72580
AD
733 unsigned off, void *buf, unsigned bytes)
734{
d96dad27 735 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
39a72580 736 struct inode *inode = &ip->i_inode;
39a72580
AD
737 struct address_space *mapping = inode->i_mapping;
738 struct page *page;
739 struct buffer_head *bh;
39a72580
AD
740 u64 blk;
741 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
742 unsigned to_write = bytes, pg_off = off;
39a72580 743
09cbfeaf 744 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
39a72580
AD
745 boff = off % bsize;
746
111c7d27 747 page = grab_cache_page(mapping, index);
39a72580
AD
748 if (!page)
749 return -ENOMEM;
750 if (!page_has_buffers(page))
751 create_empty_buffers(page, bsize, 0);
752
753 bh = page_buffers(page);
f0418e4b 754 for(;;) {
39a72580
AD
755 /* Find the beginning block within the page */
756 if (pg_off >= ((bnum * bsize) + bsize)) {
757 bh = bh->b_this_page;
758 bnum++;
759 blk++;
760 continue;
761 }
762 if (!buffer_mapped(bh)) {
763 gfs2_block_map(inode, blk, bh, 1);
764 if (!buffer_mapped(bh))
765 goto unlock_out;
766 /* If it's a newly allocated disk block, zero it */
767 if (buffer_new(bh))
768 zero_user(page, bnum * bsize, bh->b_size);
769 }
770 if (PageUptodate(page))
771 set_buffer_uptodate(bh);
86a020cc
ZY
772 if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
773 goto unlock_out;
768963ab 774 gfs2_trans_add_data(ip->i_gl, bh);
39a72580
AD
775
776 /* If we need to write to the next block as well */
777 if (to_write > (bsize - boff)) {
778 pg_off += (bsize - boff);
779 to_write -= (bsize - boff);
780 boff = pg_off % bsize;
781 continue;
782 }
f0418e4b 783 break;
39a72580
AD
784 }
785
786 /* Write to the page, now that we have setup the buffer(s) */
d68d0c6c 787 memcpy_to_page(page, off, buf, bytes);
39a72580 788 flush_dcache_page(page);
39a72580 789 unlock_page(page);
09cbfeaf 790 put_page(page);
39a72580
AD
791
792 return 0;
793
794unlock_out:
795 unlock_page(page);
09cbfeaf 796 put_page(page);
39a72580
AD
797 return -EIO;
798}
799
adfd2b5e 800static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
39a72580
AD
801 loff_t loc)
802{
803 unsigned long pg_beg;
804 unsigned pg_off, nbytes, overflow = 0;
e34c16c9 805 int error;
39a72580
AD
806 void *ptr;
807
808 nbytes = sizeof(struct gfs2_quota);
809
09cbfeaf 810 pg_beg = loc >> PAGE_SHIFT;
45eb0504 811 pg_off = offset_in_page(loc);
39a72580
AD
812
813 /* If the quota straddles a page boundary, split the write in two */
e34c16c9 814 if ((pg_off + nbytes) > PAGE_SIZE)
09cbfeaf 815 overflow = (pg_off + nbytes) - PAGE_SIZE;
39a72580
AD
816
817 ptr = qp;
d96dad27 818 error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
39a72580
AD
819 nbytes - overflow);
820 /* If there's an overflow, write the remaining bytes to the next page */
e34c16c9 821 if (!error && overflow)
d96dad27 822 error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
39a72580
AD
823 ptr + nbytes - overflow,
824 overflow);
825 return error;
826}
827
18ec7d5c 828/**
1e72c0f7 829 * gfs2_adjust_quota - adjust record of current block usage
ee1768e4 830 * @sdp: The superblock
1e72c0f7 831 * @loc: Offset of the entry in the quota file
e285c100 832 * @change: The amount of usage change to record
1e72c0f7 833 * @qd: The quota data
e285c100 834 * @fdq: The updated limits to record
18ec7d5c
SW
835 *
836 * This function was mostly borrowed from gfs2_block_truncate_page which was
837 * in turn mostly borrowed from ext3
1e72c0f7
SW
838 *
839 * Returns: 0 or -ve on error
18ec7d5c 840 */
1e72c0f7 841
ee1768e4 842static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
e285c100 843 s64 change, struct gfs2_quota_data *qd,
14bf61ff 844 struct qc_dqblk *fdq)
18ec7d5c 845{
ee1768e4 846 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
feaa7bba 847 struct inode *inode = &ip->i_inode;
951b4bd5 848 struct gfs2_quota q;
39a72580 849 int err;
e285c100 850 u64 size;
18ec7d5c 851
891a8e93 852 if (gfs2_is_stuffed(ip)) {
7a607a41 853 err = gfs2_unstuff_dinode(ip);
891a8e93
SW
854 if (err)
855 return err;
856 }
7e619bc3
AD
857
858 memset(&q, 0, sizeof(struct gfs2_quota));
4306629e 859 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
7e619bc3
AD
860 if (err < 0)
861 return err;
862
39a72580 863 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
7e619bc3 864 err = -EIO;
951b4bd5 865 be64_add_cpu(&q.qu_value, change);
1bdf4535 866 if (((s64)be64_to_cpu(q.qu_value)) < 0)
39a72580 867 q.qu_value = 0; /* Never go negative on quota usage */
951b4bd5 868 qd->qd_qb.qb_value = q.qu_value;
7e619bc3 869 if (fdq) {
14bf61ff
JK
870 if (fdq->d_fieldmask & QC_SPC_SOFT) {
871 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 872 qd->qd_qb.qb_warn = q.qu_warn;
7e619bc3 873 }
14bf61ff
JK
874 if (fdq->d_fieldmask & QC_SPC_HARD) {
875 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 876 qd->qd_qb.qb_limit = q.qu_limit;
7e619bc3 877 }
14bf61ff
JK
878 if (fdq->d_fieldmask & QC_SPACE) {
879 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 880 qd->qd_qb.qb_value = q.qu_value;
802ec9b6 881 }
7e619bc3
AD
882 }
883
adfd2b5e 884 err = gfs2_write_disk_quota(sdp, &q, loc);
39a72580
AD
885 if (!err) {
886 size = loc + sizeof(struct gfs2_quota);
887 if (size > inode->i_size)
888 i_size_write(inode, size);
078cd827 889 inode->i_mtime = inode->i_atime = current_time(inode);
39a72580
AD
890 mark_inode_dirty(inode);
891 set_bit(QDF_REFRESH, &qd->qd_flags);
7e619bc3 892 }
e285c100 893
18ec7d5c
SW
894 return err;
895}
896
b3b94faa
DT
897static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
898{
481f6e7d 899 struct gfs2_sbd *sdp = (*qda)->qd_sbd;
feaa7bba 900 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
7b9cff46 901 struct gfs2_alloc_parms ap = { .aflags = 0, };
b3b94faa
DT
902 unsigned int data_blocks, ind_blocks;
903 struct gfs2_holder *ghs, i_gh;
904 unsigned int qx, x;
905 struct gfs2_quota_data *qd;
71f890f7 906 unsigned reserved;
f42faf4f 907 loff_t offset;
20b95bf2 908 unsigned int nalloc = 0, blocks;
b3b94faa
DT
909 int error;
910
2fba46a0 911 error = gfs2_qa_get(ip);
0a305e49
BP
912 if (error)
913 return error;
914
b3b94faa
DT
915 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
916 &data_blocks, &ind_blocks);
917
6da2ec56 918 ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
2fba46a0
BP
919 if (!ghs) {
920 error = -ENOMEM;
921 goto out;
922 }
b3b94faa
DT
923
924 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
5955102c 925 inode_lock(&ip->i_inode);
b3b94faa 926 for (qx = 0; qx < num_qd; qx++) {
1e72c0f7 927 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
b3b94faa
DT
928 GL_NOCACHE, &ghs[qx]);
929 if (error)
2fba46a0 930 goto out_dq;
b3b94faa
DT
931 }
932
933 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
934 if (error)
2fba46a0 935 goto out_dq;
b3b94faa
DT
936
937 for (x = 0; x < num_qd; x++) {
b3b94faa 938 offset = qd2offset(qda[x]);
461cb419
BP
939 if (gfs2_write_alloc_required(ip, offset,
940 sizeof(struct gfs2_quota)))
b3b94faa
DT
941 nalloc++;
942 }
943
20b95bf2
AD
944 /*
945 * 1 blk for unstuffing inode if stuffed. We add this extra
946 * block to the reservation unconditionally. If the inode
947 * doesn't need unstuffing, the block will be released to the
948 * rgrp since it won't be allocated during the transaction
949 */
7e619bc3
AD
950 /* +3 in the end for unstuffing block, inode size update block
951 * and another block in case quota straddles page boundary and
952 * two blocks need to be updated instead of 1 */
953 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
b3b94faa 954
71f890f7 955 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
7b9cff46
SW
956 ap.target = reserved;
957 error = gfs2_inplace_reserve(ip, &ap);
20b95bf2
AD
958 if (error)
959 goto out_alloc;
b3b94faa 960
20b95bf2 961 if (nalloc)
71f890f7 962 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
20b95bf2
AD
963
964 error = gfs2_trans_begin(sdp, blocks, 0);
965 if (error)
966 goto out_ipres;
b3b94faa
DT
967
968 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
969 qd = qda[x];
970 offset = qd2offset(qd);
ee1768e4
BP
971 error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
972 NULL);
18ec7d5c 973 if (error)
b3b94faa 974 goto out_end_trans;
b3b94faa 975
432928c9 976 do_qc(qd, -qd->qd_change_sync, QC_SYNC);
662e3a55 977 set_bit(QDF_REFRESH, &qd->qd_flags);
b3b94faa
DT
978 }
979
980 error = 0;
981
a91ea69f 982out_end_trans:
b3b94faa 983 gfs2_trans_end(sdp);
a91ea69f 984out_ipres:
20b95bf2 985 gfs2_inplace_release(ip);
a91ea69f 986out_alloc:
b3b94faa 987 gfs2_glock_dq_uninit(&i_gh);
2fba46a0 988out_dq:
b3b94faa
DT
989 while (qx--)
990 gfs2_glock_dq_uninit(&ghs[qx]);
5955102c 991 inode_unlock(&ip->i_inode);
b3b94faa 992 kfree(ghs);
c1696fb8 993 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
805c0907 994 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
2fba46a0
BP
995out:
996 gfs2_qa_put(ip);
b3b94faa
DT
997 return error;
998}
999
e285c100
SW
1000static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
1001{
1002 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1003 struct gfs2_quota q;
1004 struct gfs2_quota_lvb *qlvb;
1005 loff_t pos;
1006 int error;
1007
1008 memset(&q, 0, sizeof(struct gfs2_quota));
1009 pos = qd2offset(qd);
4306629e 1010 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
e285c100
SW
1011 if (error < 0)
1012 return error;
1013
4e2f8849 1014 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
e285c100
SW
1015 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1016 qlvb->__pad = 0;
1017 qlvb->qb_limit = q.qu_limit;
1018 qlvb->qb_warn = q.qu_warn;
1019 qlvb->qb_value = q.qu_value;
1020 qd->qd_qb = *qlvb;
1021
1022 return 0;
1023}
1024
b3b94faa
DT
1025static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1026 struct gfs2_holder *q_gh)
1027{
481f6e7d 1028 struct gfs2_sbd *sdp = qd->qd_sbd;
feaa7bba 1029 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 1030 struct gfs2_holder i_gh;
b3b94faa
DT
1031 int error;
1032
481f6e7d 1033 gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
a91ea69f 1034restart:
b3b94faa
DT
1035 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1036 if (error)
1037 return error;
1038
30133177
AD
1039 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1040 force_refresh = FORCE;
1041
4e2f8849 1042 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
b3b94faa 1043
e9fc2aa0 1044 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
b3b94faa 1045 gfs2_glock_dq_uninit(q_gh);
91094d0f
SW
1046 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1047 GL_NOCACHE, q_gh);
b3b94faa
DT
1048 if (error)
1049 return error;
1050
e9fc2aa0 1051 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
1052 if (error)
1053 goto fail;
1054
e285c100
SW
1055 error = update_qd(sdp, qd);
1056 if (error)
1e72c0f7 1057 goto fail_gunlock;
b3b94faa 1058
e285c100 1059 gfs2_glock_dq_uninit(&i_gh);
91094d0f
SW
1060 gfs2_glock_dq_uninit(q_gh);
1061 force_refresh = 0;
1062 goto restart;
b3b94faa
DT
1063 }
1064
1065 return 0;
1066
a91ea69f 1067fail_gunlock:
b3b94faa 1068 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 1069fail:
b3b94faa 1070 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
1071 return error;
1072}
1073
7c06b5d6 1074int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
b3b94faa 1075{
feaa7bba 1076 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
662e3a55 1077 struct gfs2_quota_data *qd;
b58bf407 1078 u32 x;
b3b94faa
DT
1079 int error = 0;
1080
eef46ab7
BP
1081 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
1082 sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
b3b94faa
DT
1083 return 0;
1084
b54e9a0b
BP
1085 error = gfs2_quota_hold(ip, uid, gid);
1086 if (error)
1087 return error;
1088
1089 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
5407e242 1090 sizeof(struct gfs2_quota_data *), sort_qd, NULL);
b3b94faa 1091
b54e9a0b
BP
1092 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1093 qd = ip->i_qadata->qa_qd[x];
1094 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
b3b94faa
DT
1095 if (error)
1096 break;
1097 }
1098
1099 if (!error)
1100 set_bit(GIF_QD_LOCKED, &ip->i_flags);
1101 else {
1102 while (x--)
b54e9a0b 1103 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
b3b94faa
DT
1104 gfs2_quota_unhold(ip);
1105 }
1106
1107 return error;
1108}
1109
2a4f6511 1110static bool need_sync(struct gfs2_quota_data *qd)
b3b94faa 1111{
481f6e7d 1112 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 1113 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 1114 s64 value;
b3b94faa 1115 unsigned int num, den;
b3b94faa
DT
1116
1117 if (!qd->qd_qb.qb_limit)
2a4f6511 1118 return false;
b3b94faa 1119
7d80823e 1120 spin_lock(&qd_lock);
b3b94faa 1121 value = qd->qd_change;
7d80823e 1122 spin_unlock(&qd_lock);
b3b94faa
DT
1123
1124 spin_lock(&gt->gt_spin);
1125 num = gt->gt_quota_scale_num;
1126 den = gt->gt_quota_scale_den;
1127 spin_unlock(&gt->gt_spin);
1128
9f494e9b 1129 if (value <= 0)
2a4f6511 1130 return false;
e9fc2aa0
SW
1131 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1132 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
2a4f6511 1133 return false;
b3b94faa
DT
1134 else {
1135 value *= gfs2_jindex_size(sdp) * num;
4abaca17 1136 value = div_s64(value, den);
e9fc2aa0 1137 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 1138 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
2a4f6511 1139 return false;
b3b94faa
DT
1140 }
1141
2a4f6511 1142 return true;
b3b94faa
DT
1143}
1144
1145void gfs2_quota_unlock(struct gfs2_inode *ip)
1146{
aabd7c72 1147 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
1148 struct gfs2_quota_data *qda[4];
1149 unsigned int count = 0;
b58bf407 1150 u32 x;
aabd7c72 1151 int found;
b3b94faa
DT
1152
1153 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
c9cb9e38 1154 return;
b3b94faa 1155
b54e9a0b 1156 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
b3b94faa 1157 struct gfs2_quota_data *qd;
2a4f6511 1158 bool sync;
b3b94faa 1159
b54e9a0b 1160 qd = ip->i_qadata->qa_qd[x];
b3b94faa
DT
1161 sync = need_sync(qd);
1162
b54e9a0b 1163 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
aabd7c72
SW
1164 if (!sync)
1165 continue;
1166
7d80823e 1167 spin_lock(&qd_lock);
aabd7c72 1168 found = qd_check_sync(sdp, qd, NULL);
7d80823e 1169 spin_unlock(&qd_lock);
aabd7c72
SW
1170
1171 if (!found)
1172 continue;
1173
1174 gfs2_assert_warn(sdp, qd->qd_change_sync);
1175 if (bh_get(qd)) {
1176 clear_bit(QDF_LOCKED, &qd->qd_flags);
1177 slot_put(qd);
1178 qd_put(qd);
1179 continue;
1180 }
b3b94faa 1181
aabd7c72 1182 qda[count++] = qd;
b3b94faa
DT
1183 }
1184
1185 if (count) {
1186 do_sync(count, qda);
1187 for (x = 0; x < count; x++)
1188 qd_unlock(qda[x]);
1189 }
1190
b3b94faa
DT
1191 gfs2_quota_unhold(ip);
1192}
1193
1194#define MAX_LINE 256
1195
1196static int print_message(struct gfs2_quota_data *qd, char *type)
1197{
481f6e7d 1198 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 1199
eef46ab7
BP
1200 if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
1201 fs_info(sdp, "quota %s for %s %u\n",
1202 type,
1203 (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1204 from_kqid(&init_user_ns, qd->qd_id));
b3b94faa
DT
1205
1206 return 0;
1207}
1208
25435e5e
AD
1209/**
1210 * gfs2_quota_check - check if allocating new blocks will exceed quota
1211 * @ip: The inode for which this check is being performed
1212 * @uid: The uid to check against
1213 * @gid: The gid to check against
1214 * @ap: The allocation parameters. ap->target contains the requested
1215 * blocks. ap->min_target, if set, contains the minimum blks
1216 * requested.
1217 *
1218 * Returns: 0 on success.
1219 * min_req = ap->min_target ? ap->min_target : ap->target;
243fea4d 1220 * quota must allow at least min_req blks for success and
25435e5e
AD
1221 * ap->allowed is set to the number of blocks allowed
1222 *
1223 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1224 * of blocks available.
1225 */
b8fbf471
AD
1226int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1227 struct gfs2_alloc_parms *ap)
b3b94faa 1228{
feaa7bba 1229 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa 1230 struct gfs2_quota_data *qd;
25435e5e 1231 s64 value, warn, limit;
b58bf407 1232 u32 x;
b3b94faa
DT
1233 int error = 0;
1234
25435e5e 1235 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
b3b94faa
DT
1236 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1237 return 0;
1238
b54e9a0b
BP
1239 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1240 qd = ip->i_qadata->qa_qd[x];
b3b94faa 1241
05e0a60d
EB
1242 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1243 qid_eq(qd->qd_id, make_kqid_gid(gid))))
b3b94faa
DT
1244 continue;
1245
25435e5e
AD
1246 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1247 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
e9fc2aa0 1248 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
7d80823e 1249 spin_lock(&qd_lock);
25435e5e 1250 value += qd->qd_change;
7d80823e 1251 spin_unlock(&qd_lock);
b3b94faa 1252
25435e5e
AD
1253 if (limit > 0 && (limit - value) < ap->allowed)
1254 ap->allowed = limit - value;
1255 /* If we can't meet the target */
1256 if (limit && limit < (value + (s64)ap->target)) {
1257 /* If no min_target specified or we don't meet
1258 * min_target, return -EDQUOT */
1259 if (!ap->min_target || ap->min_target > ap->allowed) {
9cde2898
AD
1260 if (!test_and_set_bit(QDF_QMSG_QUIET,
1261 &qd->qd_flags)) {
1262 print_message(qd, "exceeded");
1263 quota_send_warning(qd->qd_id,
1264 sdp->sd_vfs->s_dev,
1265 QUOTA_NL_BHARDWARN);
1266 }
25435e5e
AD
1267 error = -EDQUOT;
1268 break;
1269 }
1270 } else if (warn && warn < value &&
b3b94faa 1271 time_after_eq(jiffies, qd->qd_last_warn +
25435e5e
AD
1272 gfs2_tune_get(sdp, gt_quota_warn_period)
1273 * HZ)) {
05e0a60d 1274 quota_send_warning(qd->qd_id,
2ec46505 1275 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
b3b94faa
DT
1276 error = print_message(qd, "warning");
1277 qd->qd_last_warn = jiffies;
1278 }
1279 }
b3b94faa
DT
1280 return error;
1281}
1282
cd915493 1283void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
7c06b5d6 1284 kuid_t uid, kgid_t gid)
b3b94faa 1285{
b3b94faa 1286 struct gfs2_quota_data *qd;
b58bf407 1287 u32 x;
b54e9a0b 1288 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa 1289
eef46ab7
BP
1290 if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
1291 sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
b54e9a0b 1292 gfs2_assert_warn(sdp, change))
b3b94faa 1293 return;
383f01fb 1294 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
b3b94faa
DT
1295 return;
1296
f9615fe3
BP
1297 if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1298 ip->i_qadata->qa_ref > 0))
1299 return;
b54e9a0b
BP
1300 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1301 qd = ip->i_qadata->qa_qd[x];
b3b94faa 1302
05e0a60d
EB
1303 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1304 qid_eq(qd->qd_id, make_kqid_gid(gid))) {
432928c9 1305 do_qc(qd, change, QC_CHANGE);
b3b94faa
DT
1306 }
1307 }
1308}
1309
ceed1723 1310int gfs2_quota_sync(struct super_block *sb, int type)
b3b94faa 1311{
8c42d637 1312 struct gfs2_sbd *sdp = sb->s_fs_info;
b3b94faa 1313 struct gfs2_quota_data **qda;
f3b64b57 1314 unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
b3b94faa
DT
1315 unsigned int num_qd;
1316 unsigned int x;
1317 int error = 0;
1318
b3b94faa
DT
1319 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1320 if (!qda)
1321 return -ENOMEM;
1322
e46c772d
SW
1323 mutex_lock(&sdp->sd_quota_sync_mutex);
1324 sdp->sd_quota_sync_gen++;
1325
b3b94faa
DT
1326 do {
1327 num_qd = 0;
1328
1329 for (;;) {
1330 error = qd_fish(sdp, qda + num_qd);
1331 if (error || !qda[num_qd])
1332 break;
1333 if (++num_qd == max_qd)
1334 break;
1335 }
1336
1337 if (num_qd) {
1338 if (!error)
1339 error = do_sync(num_qd, qda);
1340 if (!error)
1341 for (x = 0; x < num_qd; x++)
1342 qda[x]->qd_sync_gen =
1343 sdp->sd_quota_sync_gen;
1344
1345 for (x = 0; x < num_qd; x++)
1346 qd_unlock(qda[x]);
1347 }
1348 } while (!error && num_qd == max_qd);
1349
e46c772d 1350 mutex_unlock(&sdp->sd_quota_sync_mutex);
b3b94faa
DT
1351 kfree(qda);
1352
1353 return error;
1354}
1355
ed87dabc 1356int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
b3b94faa
DT
1357{
1358 struct gfs2_quota_data *qd;
1359 struct gfs2_holder q_gh;
1360 int error;
1361
05e0a60d 1362 error = qd_get(sdp, qid, &qd);
b3b94faa
DT
1363 if (error)
1364 return error;
1365
1366 error = do_glock(qd, FORCE, &q_gh);
1367 if (!error)
1368 gfs2_glock_dq_uninit(&q_gh);
1369
1370 qd_put(qd);
b3b94faa
DT
1371 return error;
1372}
1373
b3b94faa
DT
1374int gfs2_quota_init(struct gfs2_sbd *sdp)
1375{
feaa7bba 1376 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
a2e0f799
SW
1377 u64 size = i_size_read(sdp->sd_qc_inode);
1378 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
b3b94faa
DT
1379 unsigned int x, slot = 0;
1380 unsigned int found = 0;
c754fbbb 1381 unsigned int hash;
ee2411a8 1382 unsigned int bm_size;
cd915493
SW
1383 u64 dblock;
1384 u32 extlen = 0;
b3b94faa
DT
1385 int error;
1386
a2e0f799 1387 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
907b9bce 1388 return -EIO;
a2e0f799 1389
b3b94faa 1390 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
ee2411a8
SW
1391 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1392 bm_size *= sizeof(unsigned long);
b3b94faa 1393 error = -ENOMEM;
fcf10d38 1394 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
ee2411a8 1395 if (sdp->sd_quota_bitmap == NULL)
fcf10d38 1396 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
88dca4ca 1397 __GFP_ZERO);
b3b94faa
DT
1398 if (!sdp->sd_quota_bitmap)
1399 return error;
1400
b3b94faa
DT
1401 for (x = 0; x < blocks; x++) {
1402 struct buffer_head *bh;
7aed98fb 1403 const struct gfs2_quota_change *qc;
b3b94faa
DT
1404 unsigned int y;
1405
1406 if (!extlen) {
9153dac1
AG
1407 extlen = 32;
1408 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
b3b94faa
DT
1409 if (error)
1410 goto fail;
1411 }
b3b94faa 1412 error = -EIO;
7276b3b0
SW
1413 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1414 if (!bh)
1415 goto fail;
b3b94faa
DT
1416 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1417 brelse(bh);
1418 goto fail;
1419 }
1420
7aed98fb 1421 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
7276b3b0 1422 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1423 y++, slot++) {
b3b94faa 1424 struct gfs2_quota_data *qd;
7aed98fb
SW
1425 s64 qc_change = be64_to_cpu(qc->qc_change);
1426 u32 qc_flags = be32_to_cpu(qc->qc_flags);
1427 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1428 USRQUOTA : GRPQUOTA;
1429 struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1430 be32_to_cpu(qc->qc_id));
1431 qc++;
1432 if (!qc_change)
b3b94faa
DT
1433 continue;
1434
c754fbbb
SW
1435 hash = gfs2_qd_hash(sdp, qc_id);
1436 qd = qd_alloc(hash, sdp, qc_id);
1437 if (qd == NULL) {
b3b94faa
DT
1438 brelse(bh);
1439 goto fail;
1440 }
1441
1442 set_bit(QDF_CHANGE, &qd->qd_flags);
7aed98fb 1443 qd->qd_change = qc_change;
b3b94faa
DT
1444 qd->qd_slot = slot;
1445 qd->qd_slot_count = 1;
b3b94faa 1446
7d80823e 1447 spin_lock(&qd_lock);
ee2411a8 1448 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
b3b94faa
DT
1449 list_add(&qd->qd_list, &sdp->sd_quota_list);
1450 atomic_inc(&sdp->sd_quota_count);
7d80823e 1451 spin_unlock(&qd_lock);
b3b94faa 1452
c754fbbb
SW
1453 spin_lock_bucket(hash);
1454 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1455 spin_unlock_bucket(hash);
1456
b3b94faa
DT
1457 found++;
1458 }
1459
1460 brelse(bh);
1461 dblock++;
1462 extlen--;
1463 }
1464
1465 if (found)
1466 fs_info(sdp, "found %u quota changes\n", found);
1467
1468 return 0;
1469
a91ea69f 1470fail:
b3b94faa
DT
1471 gfs2_quota_cleanup(sdp);
1472 return error;
1473}
1474
b3b94faa
DT
1475void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1476{
b3b94faa 1477 struct gfs2_quota_data *qd;
961fe342 1478 LIST_HEAD(dispose);
a475c5dd
AG
1479 int count;
1480
1481 BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
b3b94faa 1482
7d80823e 1483 spin_lock(&qd_lock);
961fe342 1484 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
a475c5dd
AG
1485 spin_lock(&qd->qd_lockref.lock);
1486 if (qd->qd_lockref.count != 0) {
1487 spin_unlock(&qd->qd_lockref.lock);
1488 continue;
1489 }
1490 lockref_mark_dead(&qd->qd_lockref);
1491 spin_unlock(&qd->qd_lockref.lock);
1492
2147dbfd 1493 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
961fe342 1494 list_add(&qd->qd_lru, &dispose);
b3b94faa 1495 }
7d80823e 1496 spin_unlock(&qd_lock);
b3b94faa 1497
faada74a 1498 gfs2_qd_list_dispose(&dispose);
a475c5dd
AG
1499
1500 wait_event_timeout(sdp->sd_kill_wait,
1501 (count = atomic_read(&sdp->sd_quota_count)) == 0,
1502 HZ * 60);
1503
1504 if (count != 0)
1505 fs_err(sdp, "%d left-over quota data objects\n", count);
b3b94faa 1506
3cdcf63e
AV
1507 kvfree(sdp->sd_quota_bitmap);
1508 sdp->sd_quota_bitmap = NULL;
b3b94faa
DT
1509}
1510
37b2c837
SW
1511static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1512{
1513 if (error == 0 || error == -EROFS)
1514 return;
eb43e660 1515 if (!gfs2_withdrawn(sdp)) {
f34a6135
BP
1516 if (!cmpxchg(&sdp->sd_log_error, 0, error))
1517 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
942b0cdd
BP
1518 wake_up(&sdp->sd_logd_waitq);
1519 }
37b2c837
SW
1520}
1521
1522static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
8c42d637 1523 int (*fxn)(struct super_block *sb, int type),
37b2c837
SW
1524 unsigned long t, unsigned long *timeo,
1525 unsigned int *new_timeo)
1526{
1527 if (t >= *timeo) {
8c42d637 1528 int error = fxn(sdp->sd_vfs, 0);
37b2c837
SW
1529 quotad_error(sdp, msg, error);
1530 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1531 } else {
1532 *timeo -= t;
1533 }
1534}
1535
3d3c10f2
BM
1536void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1537 if (!sdp->sd_statfs_force_sync) {
1538 sdp->sd_statfs_force_sync = 1;
1539 wake_up(&sdp->sd_quota_wait);
1540 }
1541}
1542
1543
37b2c837
SW
1544/**
1545 * gfs2_quotad - Write cached quota changes into the quota file
c551f66c 1546 * @data: Pointer to GFS2 superblock
37b2c837
SW
1547 *
1548 */
1549
1550int gfs2_quotad(void *data)
1551{
1552 struct gfs2_sbd *sdp = data;
1553 struct gfs2_tune *tune = &sdp->sd_tune;
1554 unsigned long statfs_timeo = 0;
1555 unsigned long quotad_timeo = 0;
1556 unsigned long t = 0;
37b2c837
SW
1557
1558 while (!kthread_should_stop()) {
601ef0d5 1559 if (gfs2_withdrawn(sdp))
fe0690f0
AG
1560 break;
1561
37b2c837 1562 /* Update the master statfs file */
3d3c10f2
BM
1563 if (sdp->sd_statfs_force_sync) {
1564 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1565 quotad_error(sdp, "statfs", error);
1566 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1567 }
1568 else
1569 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1570 &statfs_timeo,
1571 &tune->gt_statfs_quantum);
37b2c837
SW
1572
1573 /* Update quota file */
edd2e9ac 1574 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
37b2c837
SW
1575 &quotad_timeo, &tune->gt_quota_quantum);
1576
a0acae0e
TH
1577 try_to_freeze();
1578
37b2c837
SW
1579 t = min(quotad_timeo, statfs_timeo);
1580
e4a8b548 1581 t = wait_event_interruptible_timeout(sdp->sd_quota_wait,
fe0690f0
AG
1582 sdp->sd_statfs_force_sync ||
1583 gfs2_withdrawn(sdp) ||
1584 kthread_should_stop(),
e4a8b548
AG
1585 t);
1586
1587 if (sdp->sd_statfs_force_sync)
813e0c46 1588 t = 0;
37b2c837
SW
1589 }
1590
1591 return 0;
1592}
1593
e54b2e2d 1594static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1d371b5e
SW
1595{
1596 struct gfs2_sbd *sdp = sb->s_fs_info;
1597
e54b2e2d 1598 memset(state, 0, sizeof(*state));
ad6bb90f
CH
1599
1600 switch (sdp->sd_args.ar_quota) {
eef46ab7
BP
1601 case GFS2_QUOTA_QUIET:
1602 fallthrough;
ad6bb90f 1603 case GFS2_QUOTA_ON:
e54b2e2d
JK
1604 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1605 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
df561f66 1606 fallthrough;
ad6bb90f 1607 case GFS2_QUOTA_ACCOUNT:
e54b2e2d
JK
1608 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1609 QCI_SYSFILE;
1610 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1611 QCI_SYSFILE;
ad6bb90f
CH
1612 break;
1613 case GFS2_QUOTA_OFF:
1614 break;
1615 }
1d371b5e 1616 if (sdp->sd_quota_inode) {
e54b2e2d
JK
1617 state->s_state[USRQUOTA].ino =
1618 GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1619 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1d371b5e 1620 }
e54b2e2d
JK
1621 state->s_state[USRQUOTA].nextents = 1; /* unsupported */
1622 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1623 state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1d371b5e
SW
1624 return 0;
1625}
1626
74a8a103 1627static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
14bf61ff 1628 struct qc_dqblk *fdq)
113d6b3c
SW
1629{
1630 struct gfs2_sbd *sdp = sb->s_fs_info;
1631 struct gfs2_quota_lvb *qlvb;
1632 struct gfs2_quota_data *qd;
1633 struct gfs2_holder q_gh;
1634 int error;
1635
14bf61ff 1636 memset(fdq, 0, sizeof(*fdq));
113d6b3c
SW
1637
1638 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1639 return -ESRCH; /* Crazy XFS error code */
1640
236c64e4
EB
1641 if ((qid.type != USRQUOTA) &&
1642 (qid.type != GRPQUOTA))
113d6b3c
SW
1643 return -EINVAL;
1644
05e0a60d 1645 error = qd_get(sdp, qid, &qd);
113d6b3c
SW
1646 if (error)
1647 return error;
1648 error = do_glock(qd, FORCE, &q_gh);
1649 if (error)
1650 goto out;
1651
4e2f8849 1652 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
14bf61ff
JK
1653 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1654 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1655 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
113d6b3c
SW
1656
1657 gfs2_glock_dq_uninit(&q_gh);
1658out:
1659 qd_put(qd);
1660 return error;
1661}
1662
e285c100 1663/* GFS2 only supports a subset of the XFS fields */
14bf61ff 1664#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
e285c100 1665
74a8a103 1666static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
14bf61ff 1667 struct qc_dqblk *fdq)
e285c100
SW
1668{
1669 struct gfs2_sbd *sdp = sb->s_fs_info;
1670 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1671 struct gfs2_quota_data *qd;
1672 struct gfs2_holder q_gh, i_gh;
1673 unsigned int data_blocks, ind_blocks;
1674 unsigned int blocks = 0;
1675 int alloc_required;
e285c100
SW
1676 loff_t offset;
1677 int error;
1678
1679 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1680 return -ESRCH; /* Crazy XFS error code */
1681
236c64e4
EB
1682 if ((qid.type != USRQUOTA) &&
1683 (qid.type != GRPQUOTA))
e285c100 1684 return -EINVAL;
e285c100
SW
1685
1686 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1687 return -EINVAL;
e285c100 1688
05e0a60d 1689 error = qd_get(sdp, qid, &qd);
e285c100
SW
1690 if (error)
1691 return error;
1692
2fba46a0 1693 error = gfs2_qa_get(ip);
0a305e49
BP
1694 if (error)
1695 goto out_put;
1696
5955102c 1697 inode_lock(&ip->i_inode);
e285c100
SW
1698 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1699 if (error)
0a305e49 1700 goto out_unlockput;
e285c100
SW
1701 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1702 if (error)
1703 goto out_q;
1704
1705 /* Check for existing entry, if none then alloc new blocks */
1706 error = update_qd(sdp, qd);
1707 if (error)
1708 goto out_i;
1709
1710 /* If nothing has changed, this is a no-op */
14bf61ff
JK
1711 if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1712 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1713 fdq->d_fieldmask ^= QC_SPC_SOFT;
802ec9b6 1714
14bf61ff
JK
1715 if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1716 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1717 fdq->d_fieldmask ^= QC_SPC_HARD;
802ec9b6 1718
14bf61ff
JK
1719 if ((fdq->d_fieldmask & QC_SPACE) &&
1720 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1721 fdq->d_fieldmask ^= QC_SPACE;
802ec9b6 1722
e285c100
SW
1723 if (fdq->d_fieldmask == 0)
1724 goto out_i;
1725
1726 offset = qd2offset(qd);
461cb419 1727 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
e79a46a0
AD
1728 if (gfs2_is_stuffed(ip))
1729 alloc_required = 1;
e285c100 1730 if (alloc_required) {
7b9cff46 1731 struct gfs2_alloc_parms ap = { .aflags = 0, };
e285c100
SW
1732 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1733 &data_blocks, &ind_blocks);
564e12b1 1734 blocks = 1 + data_blocks + ind_blocks;
7b9cff46
SW
1735 ap.target = blocks;
1736 error = gfs2_inplace_reserve(ip, &ap);
e285c100 1737 if (error)
564e12b1 1738 goto out_i;
71f890f7 1739 blocks += gfs2_rg_blocks(ip, blocks);
e285c100
SW
1740 }
1741
e79a46a0
AD
1742 /* Some quotas span block boundaries and can update two blocks,
1743 adding an extra block to the transaction to handle such quotas */
1744 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
e285c100
SW
1745 if (error)
1746 goto out_release;
1747
1748 /* Apply changes */
ee1768e4 1749 error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
9cde2898
AD
1750 if (!error)
1751 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
e285c100
SW
1752
1753 gfs2_trans_end(sdp);
1754out_release:
564e12b1 1755 if (alloc_required)
e285c100 1756 gfs2_inplace_release(ip);
e285c100
SW
1757out_i:
1758 gfs2_glock_dq_uninit(&i_gh);
1759out_q:
1760 gfs2_glock_dq_uninit(&q_gh);
0a305e49 1761out_unlockput:
2fba46a0 1762 gfs2_qa_put(ip);
5955102c 1763 inode_unlock(&ip->i_inode);
0a305e49 1764out_put:
e285c100
SW
1765 qd_put(qd);
1766 return error;
1767}
1768
cc632e7f
SW
1769const struct quotactl_ops gfs2_quotactl_ops = {
1770 .quota_sync = gfs2_quota_sync,
e54b2e2d 1771 .get_state = gfs2_quota_get_state,
b9b2dd36 1772 .get_dqblk = gfs2_get_dqblk,
c472b432 1773 .set_dqblk = gfs2_set_dqblk,
cc632e7f 1774};
c754fbbb
SW
1775
1776void __init gfs2_quota_hash_init(void)
1777{
1778 unsigned i;
1779
1780 for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1781 INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1782}