1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
11 #include "buckets_types.h"
13 #include "sb-members.h"
15 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
17 return div_u64(s, ca->mi.bucket_size);
20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
22 return ((sector_t) b) * ca->mi.bucket_size;
25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
29 div_u64_rem(s, ca->mi.bucket_size, &remainder);
33 static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
36 return div_u64_rem(s, ca->mi.bucket_size, offset);
39 #define for_each_bucket(_b, _buckets) \
40 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
41 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
46 * We need to cram a spinlock in a single byte, because that's what we have left
47 * in struct bucket, and we care about the size of these - during fsck, we need
48 * in memory state for every single bucket on every device.
51 * while (xchg(&b->lock, 1) cpu_relax();
52 * but, it turns out not all architectures support xchg on a single byte.
54 * So now we use bit_spin_lock(), with fun games since we can't burn a whole
55 * ulong for this - we just need to make sure the lock bit always ends up in the
59 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
60 #define BUCKET_LOCK_BITNR 0
62 #define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
65 union ulong_byte_assert {
70 static inline void bucket_unlock(struct bucket *b)
72 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
74 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
75 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
78 static inline void bucket_lock(struct bucket *b)
80 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
81 TASK_UNINTERRUPTIBLE);
84 static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
86 return rcu_dereference_check(ca->buckets_gc,
88 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
89 lockdep_is_held(&ca->fs->gc_lock) ||
90 lockdep_is_held(&ca->bucket_lock));
93 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
95 struct bucket_array *buckets = gc_bucket_array(ca);
97 BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
98 return buckets->b + b;
101 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
103 return rcu_dereference_check(ca->bucket_gens,
105 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
106 lockdep_is_held(&ca->fs->gc_lock) ||
107 lockdep_is_held(&ca->bucket_lock));
110 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
112 struct bucket_gens *gens = bucket_gens(ca);
114 BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
118 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
119 const struct bch_extent_ptr *ptr)
121 return sector_to_bucket(ca, ptr->offset);
124 static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
125 const struct bch_extent_ptr *ptr)
127 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
129 return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
132 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
133 const struct bch_extent_ptr *ptr,
136 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
138 return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
141 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
142 const struct bch_extent_ptr *ptr)
144 return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
147 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
148 const struct bch_extent_ptr *ptr)
150 if (bkey_is_btree_ptr(k))
151 return BCH_DATA_btree;
153 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
156 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
158 EBUG_ON(sectors < 0);
160 return crc_is_compressed(p.crc)
161 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
162 p.crc.uncompressed_size)
166 static inline int gen_cmp(u8 a, u8 b)
171 static inline int gen_after(u8 a, u8 b)
173 int r = gen_cmp(a, b);
175 return r > 0 ? r : 0;
179 * ptr_stale() - check if a pointer points into a bucket that has been
182 static inline u8 ptr_stale(struct bch_dev *ca,
183 const struct bch_extent_ptr *ptr)
188 ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
196 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
197 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
199 struct bch_dev_usage ret;
201 bch2_dev_usage_read_fast(ca, &ret);
205 void bch2_dev_usage_init(struct bch_dev *);
206 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *);
208 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
213 case BCH_WATERMARK_NR:
215 case BCH_WATERMARK_stripe:
216 reserved += ca->mi.nbuckets >> 6;
218 case BCH_WATERMARK_normal:
219 reserved += ca->mi.nbuckets >> 6;
221 case BCH_WATERMARK_copygc:
222 reserved += ca->nr_btree_reserve;
224 case BCH_WATERMARK_btree:
225 reserved += ca->nr_btree_reserve;
227 case BCH_WATERMARK_btree_copygc:
228 case BCH_WATERMARK_reclaim:
229 case BCH_WATERMARK_interior_updates:
236 static inline u64 dev_buckets_free(struct bch_dev *ca,
237 struct bch_dev_usage usage,
238 enum bch_watermark watermark)
241 usage.d[BCH_DATA_free].buckets -
242 ca->nr_open_buckets -
243 bch2_dev_buckets_reserved(ca, watermark));
246 static inline u64 __dev_buckets_available(struct bch_dev *ca,
247 struct bch_dev_usage usage,
248 enum bch_watermark watermark)
251 usage.d[BCH_DATA_free].buckets
252 + usage.d[BCH_DATA_cached].buckets
253 + usage.d[BCH_DATA_need_gc_gens].buckets
254 + usage.d[BCH_DATA_need_discard].buckets
255 - ca->nr_open_buckets
256 - bch2_dev_buckets_reserved(ca, watermark));
259 static inline u64 dev_buckets_available(struct bch_dev *ca,
260 enum bch_watermark watermark)
262 return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
265 /* Filesystem usage: */
267 static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
269 return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
272 static inline unsigned fs_usage_u64s(struct bch_fs *c)
274 return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
277 static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
279 return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
282 static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
284 return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
287 static inline unsigned dev_usage_u64s(void)
289 return sizeof(struct bch_dev_usage) / sizeof(u64);
292 u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
294 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
296 void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
298 void bch2_fs_usage_to_text(struct printbuf *,
299 struct bch_fs *, struct bch_fs_usage_online *);
301 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
303 struct bch_fs_usage_short
304 bch2_fs_usage_read_short(struct bch_fs *);
306 void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *,
307 const struct bch_alloc_v4 *,
308 const struct bch_alloc_v4 *, u64, bool);
309 void bch2_dev_usage_update_m(struct bch_fs *, struct bch_dev *,
310 struct bucket *, struct bucket *);
312 /* key/bucket marking: */
314 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
315 unsigned journal_seq,
318 percpu_rwsem_assert_held(&c->mark_lock);
319 BUG_ON(!gc && !journal_seq);
321 return this_cpu_ptr(gc
323 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
326 int bch2_update_replicas(struct bch_fs *, struct bkey_s_c,
327 struct bch_replicas_entry_v1 *, s64,
329 int bch2_update_replicas_list(struct btree_trans *,
330 struct bch_replicas_entry_v1 *, s64);
331 int bch2_update_cached_sectors_list(struct btree_trans *, unsigned, s64);
332 int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
334 void bch2_fs_usage_initialize(struct bch_fs *);
336 int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c,
337 const struct bch_extent_ptr *,
338 s64, enum bch_data_type, u8, u8, u32);
340 int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
341 size_t, enum bch_data_type, unsigned,
342 struct gc_pos, unsigned);
344 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
345 struct bkey_s_c, struct bkey_s, unsigned);
346 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
347 struct bkey_s_c, struct bkey_s, unsigned);
349 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
354 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
355 if (!ret && _new.k->type) \
356 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\
360 void bch2_trans_account_disk_usage_change(struct btree_trans *);
362 void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
363 int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
365 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
366 size_t, enum bch_data_type, unsigned);
367 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
368 int bch2_trans_mark_dev_sbs(struct bch_fs *);
370 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
372 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
373 u64 b_offset = bucket_to_sector(ca, b);
374 u64 b_end = bucket_to_sector(ca, b + 1);
380 for (i = 0; i < layout->nr_superblocks; i++) {
381 u64 offset = le64_to_cpu(layout->sb_offset[i]);
382 u64 end = offset + (1 << layout->sb_max_size_bits);
384 if (!(offset >= b_end || end <= b_offset))
391 static inline const char *bch2_data_type_str(enum bch_data_type type)
393 return type < BCH_DATA_NR
394 ? __bch2_data_types[type]
395 : "(invalid data type)";
398 static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
400 if (type < BCH_DATA_NR)
401 prt_str(out, __bch2_data_types[type]);
403 prt_printf(out, "(invalid data type %u)", type);
406 /* disk reservations: */
408 static inline void bch2_disk_reservation_put(struct bch_fs *c,
409 struct disk_reservation *res)
412 this_cpu_sub(*c->online_reserved, res->sectors);
417 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
419 int __bch2_disk_reservation_add(struct bch_fs *,
420 struct disk_reservation *,
423 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
424 u64 sectors, int flags)
430 old = this_cpu_read(c->pcpu->sectors_available);
432 return __bch2_disk_reservation_add(c, res, sectors, flags);
435 } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
437 this_cpu_add(*c->online_reserved, sectors);
438 res->sectors += sectors;
441 return __bch2_disk_reservation_add(c, res, sectors, flags);
445 static inline struct disk_reservation
446 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
448 return (struct disk_reservation) {
452 .gen = c->capacity_gen,
454 .nr_replicas = nr_replicas,
458 static inline int bch2_disk_reservation_get(struct bch_fs *c,
459 struct disk_reservation *res,
460 u64 sectors, unsigned nr_replicas,
463 *res = bch2_disk_reservation_init(c, nr_replicas);
465 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
468 #define RESERVE_FACTOR 6
470 static inline u64 avail_factor(u64 r)
472 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
475 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
476 void bch2_dev_buckets_free(struct bch_dev *);
477 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
479 #endif /* _BUCKETS_H */