buffer: remove folio_create_empty_buffers()
[linux-block.git] / fs / gfs2 / quota.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0d0868bd 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
7/*
8 * Quota change tags are associated with each transaction that allocates or
9 * deallocates space. Those changes are accumulated locally to each node (in a
10 * per-node file) and then are periodically synced to the quota file. This
11 * avoids the bottleneck of constantly touching the quota file, but introduces
12 * fuzziness in the current usage value of IDs that are being used on different
13 * nodes in the cluster simultaneously. So, it is possible for a user on
14 * multiple nodes to overrun their quota, but that overrun is controlable.
1e72c0f7 15 * Since quota tags are part of transactions, there is no need for a quota check
b3b94faa
DT
16 * program to be run on node crashes or anything like that.
17 *
18 * There are couple of knobs that let the administrator manage the quota
19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
20 * sitting on one node before being synced to the quota file. (The default is
21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
22 * of quota file syncs increases as the user moves closer to their limit. The
23 * more frequent the syncs, the more accurate the quota enforcement, but that
24 * means that there is more contention between the nodes for the quota file.
25 * The default value is one. This sets the maximum theoretical quota overrun
26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
27 * practice, the maximum overrun you see should be much less.) A "quota_scale"
28 * number greater than one makes quota syncs more frequent and reduces the
29 * maximum overrun. Numbers less than one (but greater than zero) make quota
30 * syncs less frequent.
31 *
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33 * the quota file, so it is not being constantly read.
34 */
35
d77d1b58
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
b3b94faa
DT
38#include <linux/sched.h>
39#include <linux/slab.h>
1495f230 40#include <linux/mm.h>
b3b94faa
DT
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
37b2c837
SW
48#include <linux/kthread.h>
49#include <linux/freezer.h>
2ec46505 50#include <linux/quota.h>
1d371b5e 51#include <linux/dqblk_xfs.h>
9b9f039d 52#include <linux/lockref.h>
2147dbfd 53#include <linux/list_lru.h>
c754fbbb
SW
54#include <linux/rcupdate.h>
55#include <linux/rculist_bl.h>
56#include <linux/bit_spinlock.h>
57#include <linux/jhash.h>
1e3d3620 58#include <linux/vmalloc.h>
b3b94faa
DT
59
60#include "gfs2.h"
5c676f6d 61#include "incore.h"
b3b94faa
DT
62#include "bmap.h"
63#include "glock.h"
64#include "glops.h"
b3b94faa
DT
65#include "log.h"
66#include "meta_io.h"
67#include "quota.h"
68#include "rgrp.h"
69#include "super.h"
70#include "trans.h"
18ec7d5c 71#include "inode.h"
5c676f6d 72#include "util.h"
b3b94faa 73
c754fbbb 74#define GFS2_QD_HASH_SHIFT 12
47a9a527 75#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
c754fbbb
SW
76#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
77
432928c9
BP
78#define QC_CHANGE 0
79#define QC_SYNC 1
80
c754fbbb 81/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
2d9e7230 82/* -> sd_bitmap_lock */
7d80823e 83static DEFINE_SPINLOCK(qd_lock);
2147dbfd 84struct list_lru gfs2_qd_lru;
0a7ab79c 85
c754fbbb
SW
86static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
87
88static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
89 const struct kqid qid)
90{
91 unsigned int h;
92
93 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
94 h = jhash(&qid, sizeof(struct kqid), h);
95
96 return h & GFS2_QD_HASH_MASK;
97}
98
99static inline void spin_lock_bucket(unsigned int hash)
100{
101 hlist_bl_lock(&qd_hash_table[hash]);
102}
103
104static inline void spin_unlock_bucket(unsigned int hash)
105{
106 hlist_bl_unlock(&qd_hash_table[hash]);
107}
108
109static void gfs2_qd_dealloc(struct rcu_head *rcu)
110{
111 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
a475c5dd
AG
112 struct gfs2_sbd *sdp = qd->qd_sbd;
113
c754fbbb 114 kmem_cache_free(gfs2_quotad_cachep, qd);
a475c5dd
AG
115 if (atomic_dec_and_test(&sdp->sd_quota_count))
116 wake_up(&sdp->sd_kill_wait);
c754fbbb
SW
117}
118
faada74a 119static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
0a7ab79c 120{
faada74a 121 struct gfs2_sbd *sdp = qd->qd_sbd;
2147dbfd 122
faada74a
AG
123 spin_lock(&qd_lock);
124 list_del(&qd->qd_list);
125 spin_unlock(&qd_lock);
0a7ab79c 126
faada74a
AG
127 spin_lock_bucket(qd->qd_hash);
128 hlist_bl_del_rcu(&qd->qd_hlist);
129 spin_unlock_bucket(qd->qd_hash);
c754fbbb 130
fae2e73a 131 if (!gfs2_withdrawn(sdp)) {
0a7ab79c 132 gfs2_assert_warn(sdp, !qd->qd_change);
0e072cac 133 gfs2_assert_warn(sdp, !qd->qd_slot_ref);
0a7ab79c 134 gfs2_assert_warn(sdp, !qd->qd_bh_count);
fae2e73a 135 }
0a7ab79c 136
faada74a
AG
137 gfs2_glock_put(qd->qd_gl);
138 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
139}
0a7ab79c 140
faada74a
AG
141static void gfs2_qd_list_dispose(struct list_head *list)
142{
143 struct gfs2_quota_data *qd;
0a7ab79c 144
faada74a
AG
145 while (!list_empty(list)) {
146 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
147 list_del(&qd->qd_lru);
0a7ab79c 148
faada74a 149 gfs2_qd_dispose(qd);
0a7ab79c 150 }
2147dbfd
SW
151}
152
153
3f97b163
VD
154static enum lru_status gfs2_qd_isolate(struct list_head *item,
155 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
2147dbfd
SW
156{
157 struct list_head *dispose = arg;
6b0e9a5f
AG
158 struct gfs2_quota_data *qd =
159 list_entry(item, struct gfs2_quota_data, qd_lru);
160 enum lru_status status;
2147dbfd
SW
161
162 if (!spin_trylock(&qd->qd_lockref.lock))
163 return LRU_SKIP;
164
6b0e9a5f 165 status = LRU_SKIP;
2147dbfd
SW
166 if (qd->qd_lockref.count == 0) {
167 lockref_mark_dead(&qd->qd_lockref);
3f97b163 168 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
6b0e9a5f 169 status = LRU_REMOVED;
2147dbfd
SW
170 }
171
172 spin_unlock(&qd->qd_lockref.lock);
6b0e9a5f 173 return status;
2147dbfd
SW
174}
175
176static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
177 struct shrink_control *sc)
178{
179 LIST_HEAD(dispose);
180 unsigned long freed;
181
182 if (!(sc->gfp_mask & __GFP_FS))
183 return SHRINK_STOP;
184
503c358c
VD
185 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
186 gfs2_qd_isolate, &dispose);
2147dbfd 187
faada74a 188 gfs2_qd_list_dispose(&dispose);
2147dbfd 189
1ab6c499
DC
190 return freed;
191}
0a7ab79c 192
2147dbfd
SW
193static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
194 struct shrink_control *sc)
1ab6c499 195{
503c358c 196 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
0a7ab79c
AD
197}
198
8ee0fd9c 199static struct shrinker *gfs2_qd_shrinker;
2147dbfd 200
8ee0fd9c
QZ
201int __init gfs2_qd_shrinker_init(void)
202{
203 gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
204 if (!gfs2_qd_shrinker)
205 return -ENOMEM;
206
207 gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
208 gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
209
210 shrinker_register(gfs2_qd_shrinker);
211
212 return 0;
213}
214
215void gfs2_qd_shrinker_exit(void)
216{
217 shrinker_free(gfs2_qd_shrinker);
218}
2147dbfd 219
2f6c9896
EB
220static u64 qd2index(struct gfs2_quota_data *qd)
221{
05e0a60d
EB
222 struct kqid qid = qd->qd_id;
223 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
37f71577 224 ((qid.type == USRQUOTA) ? 0 : 1);
2f6c9896
EB
225}
226
cd915493 227static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 228{
8f190c97 229 return qd2index(qd) * sizeof(struct gfs2_quota);
b3b94faa
DT
230}
231
c754fbbb 232static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
b3b94faa
DT
233{
234 struct gfs2_quota_data *qd;
235 int error;
236
37b2c837 237 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
b3b94faa 238 if (!qd)
c754fbbb 239 return NULL;
b3b94faa 240
c754fbbb 241 qd->qd_sbd = sdp;
bb73ae8f 242 qd->qd_lockref.count = 0;
9b9f039d 243 spin_lock_init(&qd->qd_lockref.lock);
05e0a60d 244 qd->qd_id = qid;
b3b94faa 245 qd->qd_slot = -1;
2147dbfd 246 INIT_LIST_HEAD(&qd->qd_lru);
c754fbbb 247 qd->qd_hash = hash;
b3b94faa 248
2f6c9896 249 error = gfs2_glock_get(sdp, qd2index(qd),
b3b94faa
DT
250 &gfs2_quota_glops, CREATE, &qd->qd_gl);
251 if (error)
252 goto fail;
253
c754fbbb 254 return qd;
b3b94faa 255
a91ea69f 256fail:
37b2c837 257 kmem_cache_free(gfs2_quotad_cachep, qd);
c754fbbb 258 return NULL;
b3b94faa
DT
259}
260
c754fbbb
SW
261static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
262 const struct gfs2_sbd *sdp,
263 struct kqid qid)
b3b94faa 264{
c754fbbb
SW
265 struct gfs2_quota_data *qd;
266 struct hlist_bl_node *h;
b3b94faa 267
c754fbbb
SW
268 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
269 if (!qid_eq(qd->qd_id, qid))
270 continue;
271 if (qd->qd_sbd != sdp)
272 continue;
273 if (lockref_get_not_dead(&qd->qd_lockref)) {
274 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
275 return qd;
b3b94faa 276 }
c754fbbb 277 }
b3b94faa 278
c754fbbb
SW
279 return NULL;
280}
b3b94faa 281
b3b94faa 282
c754fbbb
SW
283static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
284 struct gfs2_quota_data **qdp)
285{
286 struct gfs2_quota_data *qd, *new_qd;
287 unsigned int hash = gfs2_qd_hash(sdp, qid);
b3b94faa 288
c754fbbb
SW
289 rcu_read_lock();
290 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
291 rcu_read_unlock();
b3b94faa 292
c754fbbb
SW
293 if (qd)
294 return 0;
295
296 new_qd = qd_alloc(hash, sdp, qid);
297 if (!new_qd)
298 return -ENOMEM;
299
300 spin_lock(&qd_lock);
301 spin_lock_bucket(hash);
302 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
303 if (qd == NULL) {
bb73ae8f 304 new_qd->qd_lockref.count++;
c754fbbb
SW
305 *qdp = new_qd;
306 list_add(&new_qd->qd_list, &sdp->sd_quota_list);
307 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
308 atomic_inc(&sdp->sd_quota_count);
309 }
310 spin_unlock_bucket(hash);
311 spin_unlock(&qd_lock);
312
313 if (qd) {
314 gfs2_glock_put(new_qd->qd_gl);
315 kmem_cache_free(gfs2_quotad_cachep, new_qd);
b3b94faa 316 }
c754fbbb
SW
317
318 return 0;
b3b94faa
DT
319}
320
c754fbbb 321
b3b94faa
DT
322static void qd_hold(struct gfs2_quota_data *qd)
323{
481f6e7d 324 struct gfs2_sbd *sdp = qd->qd_sbd;
9b9f039d
SW
325 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
326 lockref_get(&qd->qd_lockref);
b3b94faa
DT
327}
328
329static void qd_put(struct gfs2_quota_data *qd)
330{
a475c5dd
AG
331 struct gfs2_sbd *sdp;
332
2147dbfd
SW
333 if (lockref_put_or_lock(&qd->qd_lockref))
334 return;
9b9f039d 335
a475c5dd
AG
336 BUG_ON(__lockref_is_dead(&qd->qd_lockref));
337 sdp = qd->qd_sbd;
338 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
339 lockref_mark_dead(&qd->qd_lockref);
340 spin_unlock(&qd->qd_lockref.lock);
341
342 gfs2_qd_dispose(qd);
343 return;
344 }
345
2147dbfd
SW
346 qd->qd_lockref.count = 0;
347 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
348 spin_unlock(&qd->qd_lockref.lock);
b3b94faa
DT
349}
350
351static int slot_get(struct gfs2_quota_data *qd)
352{
ee2411a8
SW
353 struct gfs2_sbd *sdp = qd->qd_sbd;
354 unsigned int bit;
355 int error = 0;
b3b94faa 356
2d9e7230 357 spin_lock(&sdp->sd_bitmap_lock);
0e072cac 358 if (qd->qd_slot_ref == 0) {
9ab7b78a
BP
359 bit = find_first_zero_bit(sdp->sd_quota_bitmap,
360 sdp->sd_quota_slots);
361 if (bit >= sdp->sd_quota_slots) {
362 error = -ENOSPC;
363 goto out;
364 }
ee2411a8
SW
365 set_bit(bit, sdp->sd_quota_bitmap);
366 qd->qd_slot = bit;
b3b94faa 367 }
0e072cac 368 qd->qd_slot_ref++;
9ab7b78a 369out:
2d9e7230 370 spin_unlock(&sdp->sd_bitmap_lock);
ee2411a8 371 return error;
b3b94faa
DT
372}
373
374static void slot_hold(struct gfs2_quota_data *qd)
375{
ee2411a8 376 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 377
2d9e7230 378 spin_lock(&sdp->sd_bitmap_lock);
0e072cac
BP
379 gfs2_assert(sdp, qd->qd_slot_ref);
380 qd->qd_slot_ref++;
2d9e7230 381 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa
DT
382}
383
384static void slot_put(struct gfs2_quota_data *qd)
385{
ee2411a8 386 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 387
2d9e7230 388 spin_lock(&sdp->sd_bitmap_lock);
0e072cac
BP
389 gfs2_assert(sdp, qd->qd_slot_ref);
390 if (!--qd->qd_slot_ref) {
ee2411a8 391 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
b3b94faa
DT
392 qd->qd_slot = -1;
393 }
2d9e7230 394 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa
DT
395}
396
397static int bh_get(struct gfs2_quota_data *qd)
398{
481f6e7d 399 struct gfs2_sbd *sdp = qd->qd_sbd;
c360abbb
BP
400 struct inode *inode = sdp->sd_qc_inode;
401 struct gfs2_inode *ip = GFS2_I(inode);
b3b94faa 402 unsigned int block, offset;
b3b94faa 403 struct buffer_head *bh;
c360abbb 404 struct iomap iomap = { };
b3b94faa
DT
405 int error;
406
f55ab26a 407 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
408
409 if (qd->qd_bh_count++) {
f55ab26a 410 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
411 return 0;
412 }
413
414 block = qd->qd_slot / sdp->sd_qc_per_block;
0d0868bd 415 offset = qd->qd_slot % sdp->sd_qc_per_block;
b3b94faa 416
c360abbb
BP
417 error = gfs2_iomap_get(inode,
418 (loff_t)block << inode->i_blkbits,
419 i_blocksize(inode), &iomap);
b3b94faa
DT
420 if (error)
421 goto fail;
c360abbb
BP
422 error = -ENOENT;
423 if (iomap.type != IOMAP_MAPPED)
424 goto fail;
425
426 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
427 DIO_WAIT, 0, &bh);
b3b94faa
DT
428 if (error)
429 goto fail;
430 error = -EIO;
431 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
432 goto fail_brelse;
433
434 qd->qd_bh = bh;
435 qd->qd_bh_qc = (struct gfs2_quota_change *)
436 (bh->b_data + sizeof(struct gfs2_meta_header) +
437 offset * sizeof(struct gfs2_quota_change));
438
2e95b665 439 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
440
441 return 0;
442
a91ea69f 443fail_brelse:
b3b94faa 444 brelse(bh);
a91ea69f 445fail:
b3b94faa 446 qd->qd_bh_count--;
f55ab26a 447 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
448 return error;
449}
450
451static void bh_put(struct gfs2_quota_data *qd)
452{
481f6e7d 453 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 454
f55ab26a 455 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
456 gfs2_assert(sdp, qd->qd_bh_count);
457 if (!--qd->qd_bh_count) {
458 brelse(qd->qd_bh);
459 qd->qd_bh = NULL;
460 qd->qd_bh_qc = NULL;
461 }
f55ab26a 462 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
463}
464
1bf59bf6
SW
465static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
466 u64 *sync_gen)
467{
468 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
469 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
470 (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
471 return 0;
472
2147dbfd
SW
473 if (!lockref_get_not_dead(&qd->qd_lockref))
474 return 0;
1bf59bf6 475
2147dbfd 476 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
1bf59bf6 477 set_bit(QDF_LOCKED, &qd->qd_flags);
1bf59bf6 478 qd->qd_change_sync = qd->qd_change;
2d9e7230 479 slot_hold(qd);
1bf59bf6
SW
480 return 1;
481}
482
7dbc6ae6
BP
483static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
484{
485 int error;
486
487 error = bh_get(qd);
488 if (!error)
489 return 0;
490
491 clear_bit(QDF_LOCKED, &qd->qd_flags);
492 slot_put(qd);
493 qd_put(qd);
494 return error;
495}
496
b3b94faa
DT
497static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
498{
b846f2d7 499 struct gfs2_quota_data *qd = NULL, *iter;
b3b94faa 500 int error;
b3b94faa
DT
501
502 *qdp = NULL;
503
bc98a42c 504 if (sb_rdonly(sdp->sd_vfs))
b3b94faa
DT
505 return 0;
506
7d80823e 507 spin_lock(&qd_lock);
b3b94faa 508
b846f2d7
JK
509 list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
510 if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
511 qd = iter;
1bf59bf6 512 break;
b846f2d7 513 }
b3b94faa
DT
514 }
515
7d80823e 516 spin_unlock(&qd_lock);
b3b94faa
DT
517
518 if (qd) {
7dbc6ae6
BP
519 error = qd_bh_get_or_undo(sdp, qd);
520 if (error)
b3b94faa 521 return error;
7dbc6ae6 522 *qdp = qd;
b3b94faa
DT
523 }
524
b3b94faa
DT
525 return 0;
526}
527
a4d22e33 528static void qdsb_put(struct gfs2_quota_data *qd)
b3b94faa 529{
b3b94faa
DT
530 bh_put(qd);
531 slot_put(qd);
532 qd_put(qd);
533}
534
a4d22e33
BP
535static void qd_unlock(struct gfs2_quota_data *qd)
536{
537 gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
538 clear_bit(QDF_LOCKED, &qd->qd_flags);
539 qdsb_put(qd);
540}
541
b59c8b6f 542static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
b3b94faa
DT
543 struct gfs2_quota_data **qdp)
544{
545 int error;
546
05e0a60d 547 error = qd_get(sdp, qid, qdp);
b3b94faa
DT
548 if (error)
549 return error;
550
551 error = slot_get(*qdp);
552 if (error)
553 goto fail;
554
555 error = bh_get(*qdp);
556 if (error)
557 goto fail_slot;
558
559 return 0;
560
a91ea69f 561fail_slot:
b3b94faa 562 slot_put(*qdp);
a91ea69f 563fail:
b3b94faa
DT
564 qd_put(*qdp);
565 return error;
566}
567
b54e9a0b 568/**
2fba46a0
BP
569 * gfs2_qa_get - make sure we have a quota allocations data structure,
570 * if necessary
b54e9a0b
BP
571 * @ip: the inode for this reservation
572 */
2fba46a0 573int gfs2_qa_get(struct gfs2_inode *ip)
b54e9a0b 574{
b54e9a0b 575 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
5fcff61e 576 struct inode *inode = &ip->i_inode;
b54e9a0b
BP
577
578 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
579 return 0;
580
5fcff61e 581 spin_lock(&inode->i_lock);
b54e9a0b 582 if (ip->i_qadata == NULL) {
5fcff61e
BP
583 struct gfs2_qadata *tmp;
584
585 spin_unlock(&inode->i_lock);
586 tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
587 if (!tmp)
588 return -ENOMEM;
589
590 spin_lock(&inode->i_lock);
591 if (ip->i_qadata == NULL)
592 ip->i_qadata = tmp;
593 else
594 kmem_cache_free(gfs2_qadata_cachep, tmp);
b54e9a0b 595 }
2fba46a0 596 ip->i_qadata->qa_ref++;
5fcff61e
BP
597 spin_unlock(&inode->i_lock);
598 return 0;
b54e9a0b
BP
599}
600
2fba46a0 601void gfs2_qa_put(struct gfs2_inode *ip)
b54e9a0b 602{
5fcff61e
BP
603 struct inode *inode = &ip->i_inode;
604
605 spin_lock(&inode->i_lock);
2fba46a0 606 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
b54e9a0b
BP
607 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
608 ip->i_qadata = NULL;
609 }
5fcff61e 610 spin_unlock(&inode->i_lock);
b54e9a0b
BP
611}
612
7c06b5d6 613int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
b3b94faa 614{
feaa7bba 615 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
5407e242 616 struct gfs2_quota_data **qd;
b3b94faa
DT
617 int error;
618
b54e9a0b
BP
619 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
620 return 0;
621
2fba46a0
BP
622 error = gfs2_qa_get(ip);
623 if (error)
624 return error;
5407e242 625
b54e9a0b 626 qd = ip->i_qadata->qa_qd;
5407e242 627
b54e9a0b 628 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
2fba46a0
BP
629 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
630 error = -EIO;
dac0fc31 631 gfs2_qa_put(ip);
2fba46a0
BP
632 goto out;
633 }
b3b94faa 634
b59c8b6f 635 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
b3b94faa 636 if (error)
2fba46a0 637 goto out_unhold;
b54e9a0b 638 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
639 qd++;
640
b59c8b6f 641 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
b3b94faa 642 if (error)
2fba46a0 643 goto out_unhold;
b54e9a0b 644 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
645 qd++;
646
6b24c0d2
EB
647 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
648 !uid_eq(uid, ip->i_inode.i_uid)) {
b59c8b6f 649 error = qdsb_get(sdp, make_kqid_uid(uid), qd);
b3b94faa 650 if (error)
2fba46a0 651 goto out_unhold;
b54e9a0b 652 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
653 qd++;
654 }
655
6b24c0d2
EB
656 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
657 !gid_eq(gid, ip->i_inode.i_gid)) {
b59c8b6f 658 error = qdsb_get(sdp, make_kqid_gid(gid), qd);
b3b94faa 659 if (error)
2fba46a0 660 goto out_unhold;
b54e9a0b 661 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
662 qd++;
663 }
664
2fba46a0 665out_unhold:
b3b94faa
DT
666 if (error)
667 gfs2_quota_unhold(ip);
2fba46a0 668out:
b3b94faa
DT
669 return error;
670}
671
672void gfs2_quota_unhold(struct gfs2_inode *ip)
673{
feaa7bba 674 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b58bf407 675 u32 x;
b3b94faa 676
b54e9a0b 677 if (ip->i_qadata == NULL)
5407e242 678 return;
2fba46a0 679
b3b94faa
DT
680 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
681
b54e9a0b
BP
682 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
683 qdsb_put(ip->i_qadata->qa_qd[x]);
684 ip->i_qadata->qa_qd[x] = NULL;
b3b94faa 685 }
b54e9a0b 686 ip->i_qadata->qa_qd_num = 0;
2fba46a0 687 gfs2_qa_put(ip);
b3b94faa
DT
688}
689
690static int sort_qd(const void *a, const void *b)
691{
48fac179
SW
692 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
693 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa 694
05e0a60d 695 if (qid_lt(qd_a->qd_id, qd_b->qd_id))
48fac179 696 return -1;
05e0a60d 697 if (qid_lt(qd_b->qd_id, qd_a->qd_id))
48fac179 698 return 1;
48fac179 699 return 0;
b3b94faa
DT
700}
701
432928c9 702static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
b3b94faa 703{
481f6e7d 704 struct gfs2_sbd *sdp = qd->qd_sbd;
feaa7bba 705 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 706 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 707 s64 x;
b3b94faa 708
f55ab26a 709 mutex_lock(&sdp->sd_quota_mutex);
350a9b0a 710 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
b3b94faa
DT
711
712 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
713 qc->qc_change = 0;
714 qc->qc_flags = 0;
05e0a60d 715 if (qd->qd_id.type == USRQUOTA)
b3b94faa 716 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
05e0a60d 717 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
b3b94faa
DT
718 }
719
b44b84d7 720 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
721 qc->qc_change = cpu_to_be64(x);
722
7d80823e 723 spin_lock(&qd_lock);
b3b94faa 724 qd->qd_change = x;
7d80823e 725 spin_unlock(&qd_lock);
b3b94faa 726
432928c9
BP
727 if (qc_type == QC_CHANGE) {
728 if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
729 qd_hold(qd);
730 slot_hold(qd);
731 }
732 } else {
b3b94faa
DT
733 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
734 clear_bit(QDF_CHANGE, &qd->qd_flags);
735 qc->qc_flags = 0;
736 qc->qc_id = 0;
737 slot_put(qd);
738 qd_put(qd);
b3b94faa 739 }
907b9bce 740
9cde2898
AD
741 if (change < 0) /* Reset quiet flag if we freed some blocks */
742 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
f55ab26a 743 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
744}
745
d96dad27 746static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
39a72580
AD
747 unsigned off, void *buf, unsigned bytes)
748{
d96dad27 749 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
39a72580 750 struct inode *inode = &ip->i_inode;
39a72580 751 struct address_space *mapping = inode->i_mapping;
4064a0aa 752 struct folio *folio;
39a72580 753 struct buffer_head *bh;
39a72580
AD
754 u64 blk;
755 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
756 unsigned to_write = bytes, pg_off = off;
39a72580 757
09cbfeaf 758 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
39a72580
AD
759 boff = off % bsize;
760
4064a0aa
MWO
761 folio = filemap_grab_folio(mapping, index);
762 if (IS_ERR(folio))
763 return PTR_ERR(folio);
764 bh = folio_buffers(folio);
765 if (!bh)
0a88810d 766 bh = create_empty_buffers(folio, bsize, 0);
39a72580 767
4064a0aa
MWO
768 for (;;) {
769 /* Find the beginning block within the folio */
39a72580
AD
770 if (pg_off >= ((bnum * bsize) + bsize)) {
771 bh = bh->b_this_page;
772 bnum++;
773 blk++;
774 continue;
775 }
776 if (!buffer_mapped(bh)) {
777 gfs2_block_map(inode, blk, bh, 1);
778 if (!buffer_mapped(bh))
779 goto unlock_out;
780 /* If it's a newly allocated disk block, zero it */
781 if (buffer_new(bh))
4064a0aa
MWO
782 folio_zero_range(folio, bnum * bsize,
783 bh->b_size);
39a72580 784 }
4064a0aa 785 if (folio_test_uptodate(folio))
39a72580 786 set_buffer_uptodate(bh);
86a020cc
ZY
787 if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
788 goto unlock_out;
768963ab 789 gfs2_trans_add_data(ip->i_gl, bh);
39a72580
AD
790
791 /* If we need to write to the next block as well */
792 if (to_write > (bsize - boff)) {
793 pg_off += (bsize - boff);
794 to_write -= (bsize - boff);
795 boff = pg_off % bsize;
796 continue;
797 }
f0418e4b 798 break;
39a72580
AD
799 }
800
4064a0aa
MWO
801 /* Write to the folio, now that we have setup the buffer(s) */
802 memcpy_to_folio(folio, off, buf, bytes);
803 flush_dcache_folio(folio);
804 folio_unlock(folio);
805 folio_put(folio);
39a72580
AD
806
807 return 0;
808
809unlock_out:
4064a0aa
MWO
810 folio_unlock(folio);
811 folio_put(folio);
39a72580
AD
812 return -EIO;
813}
814
adfd2b5e 815static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
39a72580
AD
816 loff_t loc)
817{
818 unsigned long pg_beg;
819 unsigned pg_off, nbytes, overflow = 0;
e34c16c9 820 int error;
39a72580
AD
821 void *ptr;
822
823 nbytes = sizeof(struct gfs2_quota);
824
09cbfeaf 825 pg_beg = loc >> PAGE_SHIFT;
45eb0504 826 pg_off = offset_in_page(loc);
39a72580
AD
827
828 /* If the quota straddles a page boundary, split the write in two */
e34c16c9 829 if ((pg_off + nbytes) > PAGE_SIZE)
09cbfeaf 830 overflow = (pg_off + nbytes) - PAGE_SIZE;
39a72580
AD
831
832 ptr = qp;
d96dad27 833 error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
39a72580
AD
834 nbytes - overflow);
835 /* If there's an overflow, write the remaining bytes to the next page */
e34c16c9 836 if (!error && overflow)
d96dad27 837 error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
39a72580
AD
838 ptr + nbytes - overflow,
839 overflow);
840 return error;
841}
842
18ec7d5c 843/**
1e72c0f7 844 * gfs2_adjust_quota - adjust record of current block usage
ee1768e4 845 * @sdp: The superblock
1e72c0f7 846 * @loc: Offset of the entry in the quota file
e285c100 847 * @change: The amount of usage change to record
1e72c0f7 848 * @qd: The quota data
e285c100 849 * @fdq: The updated limits to record
18ec7d5c
SW
850 *
851 * This function was mostly borrowed from gfs2_block_truncate_page which was
852 * in turn mostly borrowed from ext3
1e72c0f7
SW
853 *
854 * Returns: 0 or -ve on error
18ec7d5c 855 */
1e72c0f7 856
ee1768e4 857static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
e285c100 858 s64 change, struct gfs2_quota_data *qd,
14bf61ff 859 struct qc_dqblk *fdq)
18ec7d5c 860{
ee1768e4 861 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
feaa7bba 862 struct inode *inode = &ip->i_inode;
951b4bd5 863 struct gfs2_quota q;
39a72580 864 int err;
e285c100 865 u64 size;
18ec7d5c 866
891a8e93 867 if (gfs2_is_stuffed(ip)) {
7a607a41 868 err = gfs2_unstuff_dinode(ip);
891a8e93
SW
869 if (err)
870 return err;
871 }
7e619bc3
AD
872
873 memset(&q, 0, sizeof(struct gfs2_quota));
4306629e 874 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
7e619bc3
AD
875 if (err < 0)
876 return err;
877
39a72580 878 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
951b4bd5 879 be64_add_cpu(&q.qu_value, change);
1bdf4535 880 if (((s64)be64_to_cpu(q.qu_value)) < 0)
39a72580 881 q.qu_value = 0; /* Never go negative on quota usage */
951b4bd5 882 qd->qd_qb.qb_value = q.qu_value;
7e619bc3 883 if (fdq) {
14bf61ff
JK
884 if (fdq->d_fieldmask & QC_SPC_SOFT) {
885 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 886 qd->qd_qb.qb_warn = q.qu_warn;
7e619bc3 887 }
14bf61ff
JK
888 if (fdq->d_fieldmask & QC_SPC_HARD) {
889 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 890 qd->qd_qb.qb_limit = q.qu_limit;
7e619bc3 891 }
14bf61ff
JK
892 if (fdq->d_fieldmask & QC_SPACE) {
893 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 894 qd->qd_qb.qb_value = q.qu_value;
802ec9b6 895 }
7e619bc3
AD
896 }
897
adfd2b5e 898 err = gfs2_write_disk_quota(sdp, &q, loc);
39a72580
AD
899 if (!err) {
900 size = loc + sizeof(struct gfs2_quota);
901 if (size > inode->i_size)
902 i_size_write(inode, size);
d85f1b5b 903 inode->i_mtime = inode_set_ctime_current(inode);
39a72580
AD
904 mark_inode_dirty(inode);
905 set_bit(QDF_REFRESH, &qd->qd_flags);
7e619bc3 906 }
e285c100 907
18ec7d5c
SW
908 return err;
909}
910
b3b94faa
DT
911static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
912{
481f6e7d 913 struct gfs2_sbd *sdp = (*qda)->qd_sbd;
feaa7bba 914 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
7b9cff46 915 struct gfs2_alloc_parms ap = { .aflags = 0, };
b3b94faa
DT
916 unsigned int data_blocks, ind_blocks;
917 struct gfs2_holder *ghs, i_gh;
918 unsigned int qx, x;
919 struct gfs2_quota_data *qd;
71f890f7 920 unsigned reserved;
f42faf4f 921 loff_t offset;
20b95bf2 922 unsigned int nalloc = 0, blocks;
b3b94faa
DT
923 int error;
924
925 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
926 &data_blocks, &ind_blocks);
927
6da2ec56 928 ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
3932e507
BP
929 if (!ghs)
930 return -ENOMEM;
b3b94faa
DT
931
932 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
5955102c 933 inode_lock(&ip->i_inode);
b3b94faa 934 for (qx = 0; qx < num_qd; qx++) {
1e72c0f7 935 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
b3b94faa
DT
936 GL_NOCACHE, &ghs[qx]);
937 if (error)
2fba46a0 938 goto out_dq;
b3b94faa
DT
939 }
940
941 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
942 if (error)
2fba46a0 943 goto out_dq;
b3b94faa
DT
944
945 for (x = 0; x < num_qd; x++) {
b3b94faa 946 offset = qd2offset(qda[x]);
461cb419
BP
947 if (gfs2_write_alloc_required(ip, offset,
948 sizeof(struct gfs2_quota)))
b3b94faa
DT
949 nalloc++;
950 }
951
20b95bf2
AD
952 /*
953 * 1 blk for unstuffing inode if stuffed. We add this extra
954 * block to the reservation unconditionally. If the inode
955 * doesn't need unstuffing, the block will be released to the
956 * rgrp since it won't be allocated during the transaction
957 */
7e619bc3
AD
958 /* +3 in the end for unstuffing block, inode size update block
959 * and another block in case quota straddles page boundary and
960 * two blocks need to be updated instead of 1 */
961 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
b3b94faa 962
71f890f7 963 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
7b9cff46
SW
964 ap.target = reserved;
965 error = gfs2_inplace_reserve(ip, &ap);
20b95bf2
AD
966 if (error)
967 goto out_alloc;
b3b94faa 968
20b95bf2 969 if (nalloc)
71f890f7 970 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
20b95bf2
AD
971
972 error = gfs2_trans_begin(sdp, blocks, 0);
973 if (error)
974 goto out_ipres;
b3b94faa
DT
975
976 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
977 qd = qda[x];
978 offset = qd2offset(qd);
ee1768e4
BP
979 error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
980 NULL);
18ec7d5c 981 if (error)
b3b94faa 982 goto out_end_trans;
b3b94faa 983
432928c9 984 do_qc(qd, -qd->qd_change_sync, QC_SYNC);
662e3a55 985 set_bit(QDF_REFRESH, &qd->qd_flags);
b3b94faa
DT
986 }
987
a91ea69f 988out_end_trans:
b3b94faa 989 gfs2_trans_end(sdp);
a91ea69f 990out_ipres:
20b95bf2 991 gfs2_inplace_release(ip);
a91ea69f 992out_alloc:
b3b94faa 993 gfs2_glock_dq_uninit(&i_gh);
2fba46a0 994out_dq:
b3b94faa
DT
995 while (qx--)
996 gfs2_glock_dq_uninit(&ghs[qx]);
5955102c 997 inode_unlock(&ip->i_inode);
b3b94faa 998 kfree(ghs);
c1696fb8 999 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
805c0907 1000 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
fce17cb0
BP
1001 if (!error) {
1002 for (x = 0; x < num_qd; x++)
1003 qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
1004 }
b3b94faa
DT
1005 return error;
1006}
1007
e285c100
SW
1008static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
1009{
1010 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1011 struct gfs2_quota q;
1012 struct gfs2_quota_lvb *qlvb;
1013 loff_t pos;
1014 int error;
1015
1016 memset(&q, 0, sizeof(struct gfs2_quota));
1017 pos = qd2offset(qd);
4306629e 1018 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
e285c100
SW
1019 if (error < 0)
1020 return error;
1021
4e2f8849 1022 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
e285c100
SW
1023 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1024 qlvb->__pad = 0;
1025 qlvb->qb_limit = q.qu_limit;
1026 qlvb->qb_warn = q.qu_warn;
1027 qlvb->qb_value = q.qu_value;
1028 qd->qd_qb = *qlvb;
1029
1030 return 0;
1031}
1032
b3b94faa
DT
1033static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1034 struct gfs2_holder *q_gh)
1035{
481f6e7d 1036 struct gfs2_sbd *sdp = qd->qd_sbd;
feaa7bba 1037 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 1038 struct gfs2_holder i_gh;
b3b94faa
DT
1039 int error;
1040
481f6e7d 1041 gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
a91ea69f 1042restart:
b3b94faa
DT
1043 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1044 if (error)
1045 return error;
1046
30133177
AD
1047 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1048 force_refresh = FORCE;
1049
4e2f8849 1050 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
b3b94faa 1051
e9fc2aa0 1052 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
b3b94faa 1053 gfs2_glock_dq_uninit(q_gh);
91094d0f
SW
1054 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1055 GL_NOCACHE, q_gh);
b3b94faa
DT
1056 if (error)
1057 return error;
1058
e9fc2aa0 1059 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
1060 if (error)
1061 goto fail;
1062
e285c100
SW
1063 error = update_qd(sdp, qd);
1064 if (error)
1e72c0f7 1065 goto fail_gunlock;
b3b94faa 1066
e285c100 1067 gfs2_glock_dq_uninit(&i_gh);
91094d0f
SW
1068 gfs2_glock_dq_uninit(q_gh);
1069 force_refresh = 0;
1070 goto restart;
b3b94faa
DT
1071 }
1072
1073 return 0;
1074
a91ea69f 1075fail_gunlock:
b3b94faa 1076 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 1077fail:
b3b94faa 1078 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
1079 return error;
1080}
1081
7c06b5d6 1082int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
b3b94faa 1083{
feaa7bba 1084 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
662e3a55 1085 struct gfs2_quota_data *qd;
b58bf407 1086 u32 x;
f511e60a 1087 int error;
b3b94faa 1088
eef46ab7
BP
1089 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
1090 sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
b3b94faa
DT
1091 return 0;
1092
b54e9a0b
BP
1093 error = gfs2_quota_hold(ip, uid, gid);
1094 if (error)
1095 return error;
1096
1097 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
5407e242 1098 sizeof(struct gfs2_quota_data *), sort_qd, NULL);
b3b94faa 1099
b54e9a0b
BP
1100 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1101 qd = ip->i_qadata->qa_qd[x];
1102 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
b3b94faa
DT
1103 if (error)
1104 break;
1105 }
1106
1107 if (!error)
1108 set_bit(GIF_QD_LOCKED, &ip->i_flags);
1109 else {
1110 while (x--)
b54e9a0b 1111 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
b3b94faa
DT
1112 gfs2_quota_unhold(ip);
1113 }
1114
1115 return error;
1116}
1117
2a4f6511 1118static bool need_sync(struct gfs2_quota_data *qd)
b3b94faa 1119{
481f6e7d 1120 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 1121 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 1122 s64 value;
b3b94faa 1123 unsigned int num, den;
b3b94faa
DT
1124
1125 if (!qd->qd_qb.qb_limit)
2a4f6511 1126 return false;
b3b94faa 1127
7d80823e 1128 spin_lock(&qd_lock);
b3b94faa 1129 value = qd->qd_change;
7d80823e 1130 spin_unlock(&qd_lock);
b3b94faa
DT
1131
1132 spin_lock(&gt->gt_spin);
1133 num = gt->gt_quota_scale_num;
1134 den = gt->gt_quota_scale_den;
1135 spin_unlock(&gt->gt_spin);
1136
9f494e9b 1137 if (value <= 0)
2a4f6511 1138 return false;
e9fc2aa0
SW
1139 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1140 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
2a4f6511 1141 return false;
b3b94faa
DT
1142 else {
1143 value *= gfs2_jindex_size(sdp) * num;
4abaca17 1144 value = div_s64(value, den);
e9fc2aa0 1145 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 1146 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
2a4f6511 1147 return false;
b3b94faa
DT
1148 }
1149
2a4f6511 1150 return true;
b3b94faa
DT
1151}
1152
1153void gfs2_quota_unlock(struct gfs2_inode *ip)
1154{
aabd7c72 1155 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
c9ff3c65 1156 struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
b3b94faa 1157 unsigned int count = 0;
b58bf407 1158 u32 x;
aabd7c72 1159 int found;
b3b94faa
DT
1160
1161 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
c9cb9e38 1162 return;
b3b94faa 1163
b54e9a0b 1164 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
b3b94faa 1165 struct gfs2_quota_data *qd;
2a4f6511 1166 bool sync;
b3b94faa 1167
b54e9a0b 1168 qd = ip->i_qadata->qa_qd[x];
b3b94faa
DT
1169 sync = need_sync(qd);
1170
b54e9a0b 1171 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
aabd7c72
SW
1172 if (!sync)
1173 continue;
1174
7d80823e 1175 spin_lock(&qd_lock);
aabd7c72 1176 found = qd_check_sync(sdp, qd, NULL);
7d80823e 1177 spin_unlock(&qd_lock);
aabd7c72
SW
1178
1179 if (!found)
1180 continue;
1181
7dbc6ae6
BP
1182 if (!qd_bh_get_or_undo(sdp, qd))
1183 qda[count++] = qd;
b3b94faa
DT
1184 }
1185
1186 if (count) {
1187 do_sync(count, qda);
1188 for (x = 0; x < count; x++)
1189 qd_unlock(qda[x]);
1190 }
1191
b3b94faa
DT
1192 gfs2_quota_unhold(ip);
1193}
1194
1195#define MAX_LINE 256
1196
1197static int print_message(struct gfs2_quota_data *qd, char *type)
1198{
481f6e7d 1199 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 1200
eef46ab7
BP
1201 if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
1202 fs_info(sdp, "quota %s for %s %u\n",
1203 type,
1204 (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1205 from_kqid(&init_user_ns, qd->qd_id));
b3b94faa
DT
1206
1207 return 0;
1208}
1209
25435e5e
AD
1210/**
1211 * gfs2_quota_check - check if allocating new blocks will exceed quota
1212 * @ip: The inode for which this check is being performed
1213 * @uid: The uid to check against
1214 * @gid: The gid to check against
1215 * @ap: The allocation parameters. ap->target contains the requested
1216 * blocks. ap->min_target, if set, contains the minimum blks
1217 * requested.
1218 *
1219 * Returns: 0 on success.
1220 * min_req = ap->min_target ? ap->min_target : ap->target;
243fea4d 1221 * quota must allow at least min_req blks for success and
25435e5e
AD
1222 * ap->allowed is set to the number of blocks allowed
1223 *
1224 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1225 * of blocks available.
1226 */
b8fbf471
AD
1227int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1228 struct gfs2_alloc_parms *ap)
b3b94faa 1229{
feaa7bba 1230 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa 1231 struct gfs2_quota_data *qd;
25435e5e 1232 s64 value, warn, limit;
b58bf407 1233 u32 x;
b3b94faa
DT
1234 int error = 0;
1235
25435e5e 1236 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
b3b94faa
DT
1237 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1238 return 0;
1239
b54e9a0b
BP
1240 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1241 qd = ip->i_qadata->qa_qd[x];
b3b94faa 1242
05e0a60d
EB
1243 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1244 qid_eq(qd->qd_id, make_kqid_gid(gid))))
b3b94faa
DT
1245 continue;
1246
25435e5e
AD
1247 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1248 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
e9fc2aa0 1249 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
7d80823e 1250 spin_lock(&qd_lock);
25435e5e 1251 value += qd->qd_change;
7d80823e 1252 spin_unlock(&qd_lock);
b3b94faa 1253
25435e5e
AD
1254 if (limit > 0 && (limit - value) < ap->allowed)
1255 ap->allowed = limit - value;
1256 /* If we can't meet the target */
1257 if (limit && limit < (value + (s64)ap->target)) {
1258 /* If no min_target specified or we don't meet
1259 * min_target, return -EDQUOT */
1260 if (!ap->min_target || ap->min_target > ap->allowed) {
9cde2898
AD
1261 if (!test_and_set_bit(QDF_QMSG_QUIET,
1262 &qd->qd_flags)) {
1263 print_message(qd, "exceeded");
1264 quota_send_warning(qd->qd_id,
1265 sdp->sd_vfs->s_dev,
1266 QUOTA_NL_BHARDWARN);
1267 }
25435e5e
AD
1268 error = -EDQUOT;
1269 break;
1270 }
1271 } else if (warn && warn < value &&
b3b94faa 1272 time_after_eq(jiffies, qd->qd_last_warn +
25435e5e
AD
1273 gfs2_tune_get(sdp, gt_quota_warn_period)
1274 * HZ)) {
05e0a60d 1275 quota_send_warning(qd->qd_id,
2ec46505 1276 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
b3b94faa
DT
1277 error = print_message(qd, "warning");
1278 qd->qd_last_warn = jiffies;
1279 }
1280 }
b3b94faa
DT
1281 return error;
1282}
1283
cd915493 1284void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
7c06b5d6 1285 kuid_t uid, kgid_t gid)
b3b94faa 1286{
b3b94faa 1287 struct gfs2_quota_data *qd;
b58bf407 1288 u32 x;
b54e9a0b 1289 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa 1290
eef46ab7
BP
1291 if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
1292 sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
b54e9a0b 1293 gfs2_assert_warn(sdp, change))
b3b94faa 1294 return;
383f01fb 1295 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
b3b94faa
DT
1296 return;
1297
f9615fe3
BP
1298 if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1299 ip->i_qadata->qa_ref > 0))
1300 return;
b54e9a0b
BP
1301 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1302 qd = ip->i_qadata->qa_qd[x];
b3b94faa 1303
05e0a60d
EB
1304 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1305 qid_eq(qd->qd_id, make_kqid_gid(gid))) {
432928c9 1306 do_qc(qd, change, QC_CHANGE);
b3b94faa
DT
1307 }
1308 }
1309}
1310
06aa6fd3
BP
1311static bool qd_changed(struct gfs2_sbd *sdp)
1312{
1313 struct gfs2_quota_data *qd;
1314 bool changed = false;
1315
1316 spin_lock(&qd_lock);
1317 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1318 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
1319 !test_bit(QDF_CHANGE, &qd->qd_flags))
1320 continue;
1321
1322 changed = true;
1323 break;
1324 }
1325 spin_unlock(&qd_lock);
1326 return changed;
1327}
1328
ceed1723 1329int gfs2_quota_sync(struct super_block *sb, int type)
b3b94faa 1330{
8c42d637 1331 struct gfs2_sbd *sdp = sb->s_fs_info;
b3b94faa 1332 struct gfs2_quota_data **qda;
f3b64b57 1333 unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
b3b94faa
DT
1334 unsigned int num_qd;
1335 unsigned int x;
1336 int error = 0;
1337
06aa6fd3
BP
1338 if (!qd_changed(sdp))
1339 return 0;
b3b94faa 1340
b3b94faa
DT
1341 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1342 if (!qda)
1343 return -ENOMEM;
1344
e46c772d
SW
1345 mutex_lock(&sdp->sd_quota_sync_mutex);
1346 sdp->sd_quota_sync_gen++;
1347
b3b94faa
DT
1348 do {
1349 num_qd = 0;
1350
1351 for (;;) {
1352 error = qd_fish(sdp, qda + num_qd);
1353 if (error || !qda[num_qd])
1354 break;
1355 if (++num_qd == max_qd)
1356 break;
1357 }
1358
1359 if (num_qd) {
1360 if (!error)
1361 error = do_sync(num_qd, qda);
b3b94faa
DT
1362
1363 for (x = 0; x < num_qd; x++)
1364 qd_unlock(qda[x]);
1365 }
1366 } while (!error && num_qd == max_qd);
1367
e46c772d 1368 mutex_unlock(&sdp->sd_quota_sync_mutex);
b3b94faa
DT
1369 kfree(qda);
1370
1371 return error;
1372}
1373
ed87dabc 1374int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
b3b94faa
DT
1375{
1376 struct gfs2_quota_data *qd;
1377 struct gfs2_holder q_gh;
1378 int error;
1379
05e0a60d 1380 error = qd_get(sdp, qid, &qd);
b3b94faa
DT
1381 if (error)
1382 return error;
1383
1384 error = do_glock(qd, FORCE, &q_gh);
1385 if (!error)
1386 gfs2_glock_dq_uninit(&q_gh);
1387
1388 qd_put(qd);
b3b94faa
DT
1389 return error;
1390}
1391
b3b94faa
DT
1392int gfs2_quota_init(struct gfs2_sbd *sdp)
1393{
feaa7bba 1394 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
a2e0f799
SW
1395 u64 size = i_size_read(sdp->sd_qc_inode);
1396 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
b3b94faa
DT
1397 unsigned int x, slot = 0;
1398 unsigned int found = 0;
c754fbbb 1399 unsigned int hash;
ee2411a8 1400 unsigned int bm_size;
cd915493
SW
1401 u64 dblock;
1402 u32 extlen = 0;
b3b94faa
DT
1403 int error;
1404
a2e0f799 1405 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
907b9bce 1406 return -EIO;
a2e0f799 1407
b3b94faa 1408 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
ee2411a8
SW
1409 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1410 bm_size *= sizeof(unsigned long);
b3b94faa 1411 error = -ENOMEM;
fcf10d38 1412 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
ee2411a8 1413 if (sdp->sd_quota_bitmap == NULL)
fcf10d38 1414 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
88dca4ca 1415 __GFP_ZERO);
b3b94faa
DT
1416 if (!sdp->sd_quota_bitmap)
1417 return error;
1418
b3b94faa
DT
1419 for (x = 0; x < blocks; x++) {
1420 struct buffer_head *bh;
7aed98fb 1421 const struct gfs2_quota_change *qc;
b3b94faa
DT
1422 unsigned int y;
1423
1424 if (!extlen) {
9153dac1
AG
1425 extlen = 32;
1426 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
b3b94faa
DT
1427 if (error)
1428 goto fail;
1429 }
b3b94faa 1430 error = -EIO;
7276b3b0
SW
1431 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1432 if (!bh)
1433 goto fail;
b3b94faa
DT
1434 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1435 brelse(bh);
1436 goto fail;
1437 }
1438
7aed98fb 1439 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
7276b3b0 1440 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1441 y++, slot++) {
b3b94faa 1442 struct gfs2_quota_data *qd;
7aed98fb
SW
1443 s64 qc_change = be64_to_cpu(qc->qc_change);
1444 u32 qc_flags = be32_to_cpu(qc->qc_flags);
1445 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1446 USRQUOTA : GRPQUOTA;
1447 struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1448 be32_to_cpu(qc->qc_id));
1449 qc++;
1450 if (!qc_change)
b3b94faa
DT
1451 continue;
1452
c754fbbb
SW
1453 hash = gfs2_qd_hash(sdp, qc_id);
1454 qd = qd_alloc(hash, sdp, qc_id);
1455 if (qd == NULL) {
b3b94faa
DT
1456 brelse(bh);
1457 goto fail;
1458 }
1459
1460 set_bit(QDF_CHANGE, &qd->qd_flags);
7aed98fb 1461 qd->qd_change = qc_change;
b3b94faa 1462 qd->qd_slot = slot;
0e072cac 1463 qd->qd_slot_ref = 1;
b3b94faa 1464
7d80823e 1465 spin_lock(&qd_lock);
ee2411a8 1466 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
b3b94faa
DT
1467 list_add(&qd->qd_list, &sdp->sd_quota_list);
1468 atomic_inc(&sdp->sd_quota_count);
7d80823e 1469 spin_unlock(&qd_lock);
b3b94faa 1470
c754fbbb
SW
1471 spin_lock_bucket(hash);
1472 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1473 spin_unlock_bucket(hash);
1474
b3b94faa
DT
1475 found++;
1476 }
1477
1478 brelse(bh);
1479 dblock++;
1480 extlen--;
1481 }
1482
1483 if (found)
1484 fs_info(sdp, "found %u quota changes\n", found);
1485
1486 return 0;
1487
a91ea69f 1488fail:
b3b94faa
DT
1489 gfs2_quota_cleanup(sdp);
1490 return error;
1491}
1492
b3b94faa
DT
1493void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1494{
b3b94faa 1495 struct gfs2_quota_data *qd;
961fe342 1496 LIST_HEAD(dispose);
a475c5dd 1497 int count;
b3b94faa 1498
a475c5dd 1499 BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
b3b94faa 1500
7d80823e 1501 spin_lock(&qd_lock);
961fe342 1502 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
a475c5dd
AG
1503 spin_lock(&qd->qd_lockref.lock);
1504 if (qd->qd_lockref.count != 0) {
1505 spin_unlock(&qd->qd_lockref.lock);
1506 continue;
1507 }
1508 lockref_mark_dead(&qd->qd_lockref);
1509 spin_unlock(&qd->qd_lockref.lock);
c754fbbb 1510
2147dbfd 1511 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
961fe342 1512 list_add(&qd->qd_lru, &dispose);
b3b94faa 1513 }
7d80823e 1514 spin_unlock(&qd_lock);
b3b94faa 1515
faada74a 1516 gfs2_qd_list_dispose(&dispose);
a475c5dd
AG
1517
1518 wait_event_timeout(sdp->sd_kill_wait,
1519 (count = atomic_read(&sdp->sd_quota_count)) == 0,
1520 HZ * 60);
1521
1522 if (count != 0)
1523 fs_err(sdp, "%d left-over quota data objects\n", count);
b3b94faa 1524
3cdcf63e
AV
1525 kvfree(sdp->sd_quota_bitmap);
1526 sdp->sd_quota_bitmap = NULL;
b3b94faa
DT
1527}
1528
37b2c837
SW
1529static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1530{
1531 if (error == 0 || error == -EROFS)
1532 return;
eb43e660 1533 if (!gfs2_withdrawn(sdp)) {
f34a6135
BP
1534 if (!cmpxchg(&sdp->sd_log_error, 0, error))
1535 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
942b0cdd
BP
1536 wake_up(&sdp->sd_logd_waitq);
1537 }
37b2c837
SW
1538}
1539
1540static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
8c42d637 1541 int (*fxn)(struct super_block *sb, int type),
37b2c837
SW
1542 unsigned long t, unsigned long *timeo,
1543 unsigned int *new_timeo)
1544{
1545 if (t >= *timeo) {
8c42d637 1546 int error = fxn(sdp->sd_vfs, 0);
37b2c837
SW
1547 quotad_error(sdp, msg, error);
1548 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1549 } else {
1550 *timeo -= t;
1551 }
1552}
1553
3d3c10f2
BM
1554void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1555 if (!sdp->sd_statfs_force_sync) {
1556 sdp->sd_statfs_force_sync = 1;
1557 wake_up(&sdp->sd_quota_wait);
1558 }
1559}
1560
1561
37b2c837
SW
1562/**
1563 * gfs2_quotad - Write cached quota changes into the quota file
c551f66c 1564 * @data: Pointer to GFS2 superblock
37b2c837
SW
1565 *
1566 */
1567
1568int gfs2_quotad(void *data)
1569{
1570 struct gfs2_sbd *sdp = data;
1571 struct gfs2_tune *tune = &sdp->sd_tune;
1572 unsigned long statfs_timeo = 0;
1573 unsigned long quotad_timeo = 0;
1574 unsigned long t = 0;
37b2c837
SW
1575
1576 while (!kthread_should_stop()) {
601ef0d5 1577 if (gfs2_withdrawn(sdp))
fe0690f0
AG
1578 break;
1579
37b2c837 1580 /* Update the master statfs file */
3d3c10f2
BM
1581 if (sdp->sd_statfs_force_sync) {
1582 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1583 quotad_error(sdp, "statfs", error);
1584 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1585 }
1586 else
1587 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1588 &statfs_timeo,
1589 &tune->gt_statfs_quantum);
37b2c837
SW
1590
1591 /* Update quota file */
edd2e9ac 1592 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
37b2c837
SW
1593 &quotad_timeo, &tune->gt_quota_quantum);
1594
a0acae0e
TH
1595 try_to_freeze();
1596
37b2c837
SW
1597 t = min(quotad_timeo, statfs_timeo);
1598
e4a8b548 1599 t = wait_event_interruptible_timeout(sdp->sd_quota_wait,
fe0690f0
AG
1600 sdp->sd_statfs_force_sync ||
1601 gfs2_withdrawn(sdp) ||
1602 kthread_should_stop(),
e4a8b548
AG
1603 t);
1604
1605 if (sdp->sd_statfs_force_sync)
813e0c46 1606 t = 0;
37b2c837
SW
1607 }
1608
1609 return 0;
1610}
1611
e54b2e2d 1612static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1d371b5e
SW
1613{
1614 struct gfs2_sbd *sdp = sb->s_fs_info;
1615
e54b2e2d 1616 memset(state, 0, sizeof(*state));
ad6bb90f
CH
1617
1618 switch (sdp->sd_args.ar_quota) {
eef46ab7
BP
1619 case GFS2_QUOTA_QUIET:
1620 fallthrough;
ad6bb90f 1621 case GFS2_QUOTA_ON:
e54b2e2d
JK
1622 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1623 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
df561f66 1624 fallthrough;
ad6bb90f 1625 case GFS2_QUOTA_ACCOUNT:
e54b2e2d
JK
1626 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1627 QCI_SYSFILE;
1628 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1629 QCI_SYSFILE;
ad6bb90f
CH
1630 break;
1631 case GFS2_QUOTA_OFF:
1632 break;
1633 }
1d371b5e 1634 if (sdp->sd_quota_inode) {
e54b2e2d
JK
1635 state->s_state[USRQUOTA].ino =
1636 GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1637 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1d371b5e 1638 }
e54b2e2d
JK
1639 state->s_state[USRQUOTA].nextents = 1; /* unsupported */
1640 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1641 state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1d371b5e
SW
1642 return 0;
1643}
1644
74a8a103 1645static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
14bf61ff 1646 struct qc_dqblk *fdq)
113d6b3c
SW
1647{
1648 struct gfs2_sbd *sdp = sb->s_fs_info;
1649 struct gfs2_quota_lvb *qlvb;
1650 struct gfs2_quota_data *qd;
1651 struct gfs2_holder q_gh;
1652 int error;
1653
14bf61ff 1654 memset(fdq, 0, sizeof(*fdq));
113d6b3c
SW
1655
1656 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1657 return -ESRCH; /* Crazy XFS error code */
1658
236c64e4
EB
1659 if ((qid.type != USRQUOTA) &&
1660 (qid.type != GRPQUOTA))
113d6b3c
SW
1661 return -EINVAL;
1662
05e0a60d 1663 error = qd_get(sdp, qid, &qd);
113d6b3c
SW
1664 if (error)
1665 return error;
1666 error = do_glock(qd, FORCE, &q_gh);
1667 if (error)
1668 goto out;
1669
4e2f8849 1670 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
14bf61ff
JK
1671 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1672 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1673 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
113d6b3c
SW
1674
1675 gfs2_glock_dq_uninit(&q_gh);
1676out:
1677 qd_put(qd);
1678 return error;
1679}
1680
e285c100 1681/* GFS2 only supports a subset of the XFS fields */
14bf61ff 1682#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
e285c100 1683
74a8a103 1684static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
14bf61ff 1685 struct qc_dqblk *fdq)
e285c100
SW
1686{
1687 struct gfs2_sbd *sdp = sb->s_fs_info;
1688 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1689 struct gfs2_quota_data *qd;
1690 struct gfs2_holder q_gh, i_gh;
1691 unsigned int data_blocks, ind_blocks;
1692 unsigned int blocks = 0;
1693 int alloc_required;
e285c100
SW
1694 loff_t offset;
1695 int error;
1696
1697 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1698 return -ESRCH; /* Crazy XFS error code */
1699
236c64e4
EB
1700 if ((qid.type != USRQUOTA) &&
1701 (qid.type != GRPQUOTA))
e285c100 1702 return -EINVAL;
e285c100
SW
1703
1704 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1705 return -EINVAL;
e285c100 1706
05e0a60d 1707 error = qd_get(sdp, qid, &qd);
e285c100
SW
1708 if (error)
1709 return error;
1710
2fba46a0 1711 error = gfs2_qa_get(ip);
0a305e49
BP
1712 if (error)
1713 goto out_put;
1714
5955102c 1715 inode_lock(&ip->i_inode);
e285c100
SW
1716 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1717 if (error)
0a305e49 1718 goto out_unlockput;
e285c100
SW
1719 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1720 if (error)
1721 goto out_q;
1722
1723 /* Check for existing entry, if none then alloc new blocks */
1724 error = update_qd(sdp, qd);
1725 if (error)
1726 goto out_i;
1727
1728 /* If nothing has changed, this is a no-op */
14bf61ff
JK
1729 if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1730 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1731 fdq->d_fieldmask ^= QC_SPC_SOFT;
802ec9b6 1732
14bf61ff
JK
1733 if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1734 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1735 fdq->d_fieldmask ^= QC_SPC_HARD;
802ec9b6 1736
14bf61ff
JK
1737 if ((fdq->d_fieldmask & QC_SPACE) &&
1738 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1739 fdq->d_fieldmask ^= QC_SPACE;
802ec9b6 1740
e285c100
SW
1741 if (fdq->d_fieldmask == 0)
1742 goto out_i;
1743
1744 offset = qd2offset(qd);
461cb419 1745 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
e79a46a0
AD
1746 if (gfs2_is_stuffed(ip))
1747 alloc_required = 1;
e285c100 1748 if (alloc_required) {
7b9cff46 1749 struct gfs2_alloc_parms ap = { .aflags = 0, };
e285c100
SW
1750 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1751 &data_blocks, &ind_blocks);
564e12b1 1752 blocks = 1 + data_blocks + ind_blocks;
7b9cff46
SW
1753 ap.target = blocks;
1754 error = gfs2_inplace_reserve(ip, &ap);
e285c100 1755 if (error)
564e12b1 1756 goto out_i;
71f890f7 1757 blocks += gfs2_rg_blocks(ip, blocks);
e285c100
SW
1758 }
1759
e79a46a0
AD
1760 /* Some quotas span block boundaries and can update two blocks,
1761 adding an extra block to the transaction to handle such quotas */
1762 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
e285c100
SW
1763 if (error)
1764 goto out_release;
1765
1766 /* Apply changes */
ee1768e4 1767 error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
9cde2898
AD
1768 if (!error)
1769 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
e285c100
SW
1770
1771 gfs2_trans_end(sdp);
1772out_release:
564e12b1 1773 if (alloc_required)
e285c100 1774 gfs2_inplace_release(ip);
e285c100
SW
1775out_i:
1776 gfs2_glock_dq_uninit(&i_gh);
1777out_q:
1778 gfs2_glock_dq_uninit(&q_gh);
0a305e49 1779out_unlockput:
2fba46a0 1780 gfs2_qa_put(ip);
5955102c 1781 inode_unlock(&ip->i_inode);
0a305e49 1782out_put:
e285c100
SW
1783 qd_put(qd);
1784 return error;
1785}
1786
cc632e7f
SW
1787const struct quotactl_ops gfs2_quotactl_ops = {
1788 .quota_sync = gfs2_quota_sync,
e54b2e2d 1789 .get_state = gfs2_quota_get_state,
b9b2dd36 1790 .get_dqblk = gfs2_get_dqblk,
c472b432 1791 .set_dqblk = gfs2_set_dqblk,
cc632e7f 1792};
c754fbbb
SW
1793
1794void __init gfs2_quota_hash_init(void)
1795{
1796 unsigned i;
1797
1798 for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1799 INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1800}