a->io_time[1] = swab64(a->io_time[1]);
a->stripe = swab32(a->stripe);
a->nr_external_backpointers = swab32(a->nr_external_backpointers);
- a->fragmentation_lru = swab64(a->fragmentation_lru);
a->stripe_sectors = swab32(a->stripe_sectors);
bps = alloc_v4_backpointers(a);
{
struct bch_alloc_v4 _a;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
+ struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL;
prt_newline(out);
printbuf_indent_add(out, 2);
prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
- prt_printf(out, "fragmentation %llu\n", a->fragmentation_lru);
+
+ if (ca)
+ prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca));
prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
printbuf_indent_sub(out, 2);
+
+ bch2_dev_put(ca);
}
void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
goto err;
}
- new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca);
- if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+ old_lru = alloc_lru_idx_fragmentation(*old_a, ca);
+ new_lru = alloc_lru_idx_fragmentation(*new_a, ca);
+ if (old_lru != new_lru) {
ret = bch2_lru_change(trans,
BCH_LRU_FRAGMENTATION_START,
bucket_to_u64(new.k->p),
- old_a->fragmentation_lru, new_a->fragmentation_lru);
+ old_lru, new_lru);
if (ret)
goto err;
}
if (ret)
return ret;
+ struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode);
+ if (!ca)
+ return 0;
+
a = bch2_alloc_to_v4(alloc_k, &a_convert);
- if (a->fragmentation_lru) {
+ u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
+ if (lru_idx) {
ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
- a->fragmentation_lru,
- alloc_k, last_flushed);
+ lru_idx, alloc_k, last_flushed);
if (ret)
- return ret;
+ goto err;
}
if (a->data_type != BCH_DATA_cached)
- return 0;
+ goto err;
if (fsck_err_on(!a->io_time[READ],
trans, alloc_key_cached_but_read_time_zero,
goto err;
err:
fsck_err:
+ bch2_dev_put(ca);
printbuf_exit(&buf);
return ret;
}
__u32 stripe;
__u32 nr_external_backpointers;
/* end of fields in original version of alloc_v4 */
- __u64 fragmentation_lru;
+ __u64 _fragmentation_lru; /* obsolete */
__u32 stripe_sectors;
__u32 pad;
} __packed __aligned(8);
return ret;
}
- gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca);
-
if (fsck_err_on(new.data_type != gc.data_type,
trans, alloc_key_data_type_wrong,
"bucket %llu:%llu gen %u has wrong data_type"
copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
copy_bucket_field(alloc_key_stripe_wrong, stripe);
copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
- copy_bucket_field(alloc_key_fragmentation_lru_wrong, fragmentation_lru);
#undef copy_bucket_field
if (!bch2_alloc_v4_cmp(*old, new))
u64 idx;
int ret;
- if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_pos),
+ struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_pos);
+
+ if (fsck_err_on(!ca,
trans, lru_entry_to_invalid_bucket,
"lru key points to nonexistent device:bucket %llu:%llu",
alloc_pos.inode, alloc_pos.offset))
idx = alloc_lru_idx_read(*a);
break;
case BCH_LRU_fragmentation:
- idx = a->fragmentation_lru;
+ idx = alloc_lru_idx_fragmentation(*a, ca);
break;
}
err:
fsck_err:
bch2_trans_iter_exit(trans, &iter);
+ bch2_dev_put(ca);
printbuf_exit(&buf2);
printbuf_exit(&buf1);
return ret;
a = bch2_alloc_to_v4(k, &a_convert);
dirty_sectors = bch2_bucket_sectors_dirty(*a);
bucket_size = ca->mi.bucket_size;
- fragmentation = a->fragmentation_lru;
+ fragmentation = alloc_lru_idx_fragmentation(*a, ca);
ret = bch2_btree_write_buffer_tryflush(trans);
bch_err_msg(c, ret, "flushing btree write buffer");
static int bch2_bucket_is_movable(struct btree_trans *trans,
struct move_bucket *b, u64 time)
{
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_alloc_v4 _a;
if (ret)
return ret;
+ struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
+ if (!ca)
+ goto out;
+
a = bch2_alloc_to_v4(k, &_a);
b->k.gen = a->gen;
b->sectors = bch2_bucket_sectors_dirty(*a);
+ u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
- ret = data_type_movable(a->data_type) &&
- a->fragmentation_lru &&
- a->fragmentation_lru <= time;
+ ret = lru_idx && lru_idx <= time;
+ bch2_dev_put(ca);
+out:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
x(alloc_key_data_type_inconsistency, 101, 0) \
x(alloc_key_to_missing_dev_bucket, 102, 0) \
x(alloc_key_cached_inconsistency, 103, 0) \
- x(alloc_key_cached_but_read_time_zero, 104, 0) \
- x(alloc_key_to_missing_lru_entry, 105, 0) \
+ x(alloc_key_cached_but_read_time_zero, 104, FSCK_AUTOFIX) \
+ x(alloc_key_to_missing_lru_entry, 105, FSCK_AUTOFIX) \
x(alloc_key_data_type_wrong, 106, FSCK_AUTOFIX) \
x(alloc_key_gen_wrong, 107, FSCK_AUTOFIX) \
x(alloc_key_dirty_sectors_wrong, 108, FSCK_AUTOFIX) \