| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include "bcachefs.h" |
| 4 | #include "alloc_foreground.h" |
| 5 | #include "bkey_buf.h" |
| 6 | #include "btree_gc.h" |
| 7 | #include "btree_update.h" |
| 8 | #include "btree_update_interior.h" |
| 9 | #include "buckets.h" |
| 10 | #include "disk_groups.h" |
| 11 | #include "inode.h" |
| 12 | #include "io.h" |
| 13 | #include "journal_reclaim.h" |
| 14 | #include "keylist.h" |
| 15 | #include "move.h" |
| 16 | #include "replicas.h" |
| 17 | #include "super-io.h" |
| 18 | #include "trace.h" |
| 19 | |
| 20 | #include <linux/ioprio.h> |
| 21 | #include <linux/kthread.h> |
| 22 | |
| 23 | #define SECTORS_IN_FLIGHT_PER_DEVICE 2048 |
| 24 | |
| 25 | struct moving_io { |
| 26 | struct list_head list; |
| 27 | struct closure cl; |
| 28 | bool read_completed; |
| 29 | |
| 30 | unsigned read_sectors; |
| 31 | unsigned write_sectors; |
| 32 | |
| 33 | struct bch_read_bio rbio; |
| 34 | |
| 35 | struct migrate_write write; |
| 36 | /* Must be last since it is variable size */ |
| 37 | struct bio_vec bi_inline_vecs[0]; |
| 38 | }; |
| 39 | |
| 40 | struct moving_context { |
| 41 | /* Closure for waiting on all reads and writes to complete */ |
| 42 | struct closure cl; |
| 43 | |
| 44 | struct bch_move_stats *stats; |
| 45 | |
| 46 | struct list_head reads; |
| 47 | |
| 48 | /* in flight sectors: */ |
| 49 | atomic_t read_sectors; |
| 50 | atomic_t write_sectors; |
| 51 | |
| 52 | wait_queue_head_t wait; |
| 53 | }; |
| 54 | |
| 55 | static int bch2_migrate_index_update(struct bch_write_op *op) |
| 56 | { |
| 57 | struct bch_fs *c = op->c; |
| 58 | struct btree_trans trans; |
| 59 | struct btree_iter *iter; |
| 60 | struct migrate_write *m = |
| 61 | container_of(op, struct migrate_write, op); |
| 62 | struct keylist *keys = &op->insert_keys; |
| 63 | struct bkey_buf _new, _insert; |
| 64 | int ret = 0; |
| 65 | |
| 66 | bch2_bkey_buf_init(&_new); |
| 67 | bch2_bkey_buf_init(&_insert); |
| 68 | bch2_bkey_buf_realloc(&_insert, c, U8_MAX); |
| 69 | |
| 70 | bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); |
| 71 | |
| 72 | iter = bch2_trans_get_iter(&trans, m->btree_id, |
| 73 | bkey_start_pos(&bch2_keylist_front(keys)->k), |
| 74 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); |
| 75 | |
| 76 | while (1) { |
| 77 | struct bkey_s_c k; |
| 78 | struct bkey_i *insert; |
| 79 | struct bkey_i_extent *new; |
| 80 | const union bch_extent_entry *entry; |
| 81 | struct extent_ptr_decoded p; |
| 82 | bool did_work = false; |
| 83 | bool extending = false, should_check_enospc; |
| 84 | s64 i_sectors_delta = 0, disk_sectors_delta = 0; |
| 85 | |
| 86 | bch2_trans_reset(&trans, 0); |
| 87 | |
| 88 | k = bch2_btree_iter_peek_slot(iter); |
| 89 | ret = bkey_err(k); |
| 90 | if (ret) |
| 91 | goto err; |
| 92 | |
| 93 | new = bkey_i_to_extent(bch2_keylist_front(keys)); |
| 94 | |
| 95 | if (bversion_cmp(k.k->version, new->k.version) || |
| 96 | !bch2_bkey_matches_ptr(c, k, m->ptr, m->offset)) |
| 97 | goto nomatch; |
| 98 | |
| 99 | bkey_reassemble(_insert.k, k); |
| 100 | insert = _insert.k; |
| 101 | |
| 102 | bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys)); |
| 103 | new = bkey_i_to_extent(_new.k); |
| 104 | bch2_cut_front(iter->pos, &new->k_i); |
| 105 | |
| 106 | bch2_cut_front(iter->pos, insert); |
| 107 | bch2_cut_back(new->k.p, insert); |
| 108 | bch2_cut_back(insert->k.p, &new->k_i); |
| 109 | |
| 110 | if (m->data_cmd == DATA_REWRITE) { |
| 111 | struct bch_extent_ptr *new_ptr, *old_ptr = (void *) |
| 112 | bch2_bkey_has_device(bkey_i_to_s_c(insert), |
| 113 | m->data_opts.rewrite_dev); |
| 114 | if (!old_ptr) |
| 115 | goto nomatch; |
| 116 | |
| 117 | if (old_ptr->cached) |
| 118 | extent_for_each_ptr(extent_i_to_s(new), new_ptr) |
| 119 | new_ptr->cached = true; |
| 120 | |
| 121 | bch2_bkey_drop_ptr(bkey_i_to_s(insert), old_ptr); |
| 122 | } |
| 123 | |
| 124 | extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) { |
| 125 | if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) { |
| 126 | /* |
| 127 | * raced with another move op? extent already |
| 128 | * has a pointer to the device we just wrote |
| 129 | * data to |
| 130 | */ |
| 131 | continue; |
| 132 | } |
| 133 | |
| 134 | bch2_extent_ptr_decoded_append(insert, &p); |
| 135 | did_work = true; |
| 136 | } |
| 137 | |
| 138 | if (!did_work) |
| 139 | goto nomatch; |
| 140 | |
| 141 | bch2_bkey_narrow_crcs(insert, |
| 142 | (struct bch_extent_crc_unpacked) { 0 }); |
| 143 | bch2_extent_normalize(c, bkey_i_to_s(insert)); |
| 144 | bch2_bkey_mark_replicas_cached(c, bkey_i_to_s(insert), |
| 145 | op->opts.background_target, |
| 146 | op->opts.data_replicas); |
| 147 | |
| 148 | ret = bch2_sum_sector_overwrites(&trans, iter, insert, |
| 149 | &extending, |
| 150 | &should_check_enospc, |
| 151 | &i_sectors_delta, |
| 152 | &disk_sectors_delta); |
| 153 | if (ret) |
| 154 | goto err; |
| 155 | |
| 156 | if (disk_sectors_delta > (s64) op->res.sectors) { |
| 157 | ret = bch2_disk_reservation_add(c, &op->res, |
| 158 | disk_sectors_delta - op->res.sectors, |
| 159 | !should_check_enospc |
| 160 | ? BCH_DISK_RESERVATION_NOFAIL : 0); |
| 161 | if (ret) |
| 162 | goto out; |
| 163 | } |
| 164 | |
| 165 | bch2_trans_update(&trans, iter, insert, 0); |
| 166 | |
| 167 | ret = bch2_trans_commit(&trans, &op->res, |
| 168 | op_journal_seq(op), |
| 169 | BTREE_INSERT_NOFAIL| |
| 170 | m->data_opts.btree_insert_flags); |
| 171 | err: |
| 172 | if (!ret) |
| 173 | atomic_long_inc(&c->extent_migrate_done); |
| 174 | if (ret == -EINTR) |
| 175 | ret = 0; |
| 176 | if (ret) |
| 177 | break; |
| 178 | next: |
| 179 | while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) { |
| 180 | bch2_keylist_pop_front(keys); |
| 181 | if (bch2_keylist_empty(keys)) |
| 182 | goto out; |
| 183 | } |
| 184 | continue; |
| 185 | nomatch: |
| 186 | if (m->ctxt) { |
| 187 | BUG_ON(k.k->p.offset <= iter->pos.offset); |
| 188 | atomic64_inc(&m->ctxt->stats->keys_raced); |
| 189 | atomic64_add(k.k->p.offset - iter->pos.offset, |
| 190 | &m->ctxt->stats->sectors_raced); |
| 191 | } |
| 192 | atomic_long_inc(&c->extent_migrate_raced); |
| 193 | trace_move_race(&new->k); |
| 194 | bch2_btree_iter_next_slot(iter); |
| 195 | goto next; |
| 196 | } |
| 197 | out: |
| 198 | bch2_trans_exit(&trans); |
| 199 | bch2_bkey_buf_exit(&_insert, c); |
| 200 | bch2_bkey_buf_exit(&_new, c); |
| 201 | BUG_ON(ret == -EINTR); |
| 202 | return ret; |
| 203 | } |
| 204 | |
| 205 | void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio) |
| 206 | { |
| 207 | /* write bio must own pages: */ |
| 208 | BUG_ON(!m->op.wbio.bio.bi_vcnt); |
| 209 | |
| 210 | m->ptr = rbio->pick.ptr; |
| 211 | m->offset = rbio->pos.offset - rbio->pick.crc.offset; |
| 212 | m->op.devs_have = rbio->devs_have; |
| 213 | m->op.pos = rbio->pos; |
| 214 | m->op.version = rbio->version; |
| 215 | m->op.crc = rbio->pick.crc; |
| 216 | m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; |
| 217 | |
| 218 | if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) { |
| 219 | m->op.nonce = m->op.crc.nonce + m->op.crc.offset; |
| 220 | m->op.csum_type = m->op.crc.csum_type; |
| 221 | } |
| 222 | |
| 223 | if (m->data_cmd == DATA_REWRITE) |
| 224 | bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev); |
| 225 | } |
| 226 | |
| 227 | int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, |
| 228 | struct write_point_specifier wp, |
| 229 | struct bch_io_opts io_opts, |
| 230 | enum data_cmd data_cmd, |
| 231 | struct data_opts data_opts, |
| 232 | enum btree_id btree_id, |
| 233 | struct bkey_s_c k) |
| 234 | { |
| 235 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
| 236 | const union bch_extent_entry *entry; |
| 237 | struct extent_ptr_decoded p; |
| 238 | int ret; |
| 239 | |
| 240 | m->btree_id = btree_id; |
| 241 | m->data_cmd = data_cmd; |
| 242 | m->data_opts = data_opts; |
| 243 | m->nr_ptrs_reserved = 0; |
| 244 | |
| 245 | bch2_write_op_init(&m->op, c, io_opts); |
| 246 | |
| 247 | if (!bch2_bkey_is_incompressible(k)) |
| 248 | m->op.compression_type = |
| 249 | bch2_compression_opt_to_type[io_opts.background_compression ?: |
| 250 | io_opts.compression]; |
| 251 | else |
| 252 | m->op.incompressible = true; |
| 253 | |
| 254 | m->op.target = data_opts.target, |
| 255 | m->op.write_point = wp; |
| 256 | |
| 257 | if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) { |
| 258 | m->op.alloc_reserve = RESERVE_MOVINGGC; |
| 259 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; |
| 260 | } else { |
| 261 | /* XXX: this should probably be passed in */ |
| 262 | m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; |
| 263 | } |
| 264 | |
| 265 | m->op.flags |= BCH_WRITE_PAGES_STABLE| |
| 266 | BCH_WRITE_PAGES_OWNED| |
| 267 | BCH_WRITE_DATA_ENCODED| |
| 268 | BCH_WRITE_FROM_INTERNAL; |
| 269 | |
| 270 | m->op.nr_replicas = data_opts.nr_replicas; |
| 271 | m->op.nr_replicas_required = data_opts.nr_replicas; |
| 272 | m->op.index_update_fn = bch2_migrate_index_update; |
| 273 | |
| 274 | switch (data_cmd) { |
| 275 | case DATA_ADD_REPLICAS: { |
| 276 | /* |
| 277 | * DATA_ADD_REPLICAS is used for moving data to a different |
| 278 | * device in the background, and due to compression the new copy |
| 279 | * might take up more space than the old copy: |
| 280 | */ |
| 281 | #if 0 |
| 282 | int nr = (int) io_opts.data_replicas - |
| 283 | bch2_bkey_nr_ptrs_allocated(k); |
| 284 | #endif |
| 285 | int nr = (int) io_opts.data_replicas; |
| 286 | |
| 287 | if (nr > 0) { |
| 288 | m->op.nr_replicas = m->nr_ptrs_reserved = nr; |
| 289 | |
| 290 | ret = bch2_disk_reservation_get(c, &m->op.res, |
| 291 | k.k->size, m->op.nr_replicas, 0); |
| 292 | if (ret) |
| 293 | return ret; |
| 294 | } |
| 295 | break; |
| 296 | } |
| 297 | case DATA_REWRITE: { |
| 298 | unsigned compressed_sectors = 0; |
| 299 | |
| 300 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
| 301 | if (p.ptr.dev == data_opts.rewrite_dev && |
| 302 | !p.ptr.cached && |
| 303 | crc_is_compressed(p.crc)) |
| 304 | compressed_sectors += p.crc.compressed_size; |
| 305 | |
| 306 | if (compressed_sectors) { |
| 307 | ret = bch2_disk_reservation_add(c, &m->op.res, |
| 308 | k.k->size * m->op.nr_replicas, |
| 309 | BCH_DISK_RESERVATION_NOFAIL); |
| 310 | if (ret) |
| 311 | return ret; |
| 312 | } |
| 313 | break; |
| 314 | } |
| 315 | case DATA_PROMOTE: |
| 316 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; |
| 317 | m->op.flags |= BCH_WRITE_CACHED; |
| 318 | break; |
| 319 | default: |
| 320 | BUG(); |
| 321 | } |
| 322 | |
| 323 | return 0; |
| 324 | } |
| 325 | |
| 326 | static void move_free(struct closure *cl) |
| 327 | { |
| 328 | struct moving_io *io = container_of(cl, struct moving_io, cl); |
| 329 | struct moving_context *ctxt = io->write.ctxt; |
| 330 | struct bvec_iter_all iter; |
| 331 | struct bio_vec *bv; |
| 332 | |
| 333 | bch2_disk_reservation_put(io->write.op.c, &io->write.op.res); |
| 334 | |
| 335 | bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter) |
| 336 | if (bv->bv_page) |
| 337 | __free_page(bv->bv_page); |
| 338 | |
| 339 | wake_up(&ctxt->wait); |
| 340 | |
| 341 | kfree(io); |
| 342 | } |
| 343 | |
| 344 | static void move_write_done(struct closure *cl) |
| 345 | { |
| 346 | struct moving_io *io = container_of(cl, struct moving_io, cl); |
| 347 | |
| 348 | atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); |
| 349 | closure_return_with_destructor(cl, move_free); |
| 350 | } |
| 351 | |
| 352 | static void move_write(struct closure *cl) |
| 353 | { |
| 354 | struct moving_io *io = container_of(cl, struct moving_io, cl); |
| 355 | |
| 356 | if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { |
| 357 | closure_return_with_destructor(cl, move_free); |
| 358 | return; |
| 359 | } |
| 360 | |
| 361 | bch2_migrate_read_done(&io->write, &io->rbio); |
| 362 | |
| 363 | atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); |
| 364 | closure_call(&io->write.op.cl, bch2_write, NULL, cl); |
| 365 | continue_at(cl, move_write_done, NULL); |
| 366 | } |
| 367 | |
| 368 | static inline struct moving_io *next_pending_write(struct moving_context *ctxt) |
| 369 | { |
| 370 | struct moving_io *io = |
| 371 | list_first_entry_or_null(&ctxt->reads, struct moving_io, list); |
| 372 | |
| 373 | return io && io->read_completed ? io : NULL; |
| 374 | } |
| 375 | |
| 376 | static void move_read_endio(struct bio *bio) |
| 377 | { |
| 378 | struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); |
| 379 | struct moving_context *ctxt = io->write.ctxt; |
| 380 | |
| 381 | atomic_sub(io->read_sectors, &ctxt->read_sectors); |
| 382 | io->read_completed = true; |
| 383 | |
| 384 | if (next_pending_write(ctxt)) |
| 385 | wake_up(&ctxt->wait); |
| 386 | |
| 387 | closure_put(&ctxt->cl); |
| 388 | } |
| 389 | |
| 390 | static void do_pending_writes(struct moving_context *ctxt) |
| 391 | { |
| 392 | struct moving_io *io; |
| 393 | |
| 394 | while ((io = next_pending_write(ctxt))) { |
| 395 | list_del(&io->list); |
| 396 | closure_call(&io->cl, move_write, NULL, &ctxt->cl); |
| 397 | } |
| 398 | } |
| 399 | |
| 400 | #define move_ctxt_wait_event(_ctxt, _cond) \ |
| 401 | do { \ |
| 402 | do_pending_writes(_ctxt); \ |
| 403 | \ |
| 404 | if (_cond) \ |
| 405 | break; \ |
| 406 | __wait_event((_ctxt)->wait, \ |
| 407 | next_pending_write(_ctxt) || (_cond)); \ |
| 408 | } while (1) |
| 409 | |
| 410 | static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) |
| 411 | { |
| 412 | unsigned sectors_pending = atomic_read(&ctxt->write_sectors); |
| 413 | |
| 414 | move_ctxt_wait_event(ctxt, |
| 415 | !atomic_read(&ctxt->write_sectors) || |
| 416 | atomic_read(&ctxt->write_sectors) != sectors_pending); |
| 417 | } |
| 418 | |
| 419 | static int bch2_move_extent(struct btree_trans *trans, |
| 420 | struct moving_context *ctxt, |
| 421 | struct write_point_specifier wp, |
| 422 | struct bch_io_opts io_opts, |
| 423 | enum btree_id btree_id, |
| 424 | struct bkey_s_c k, |
| 425 | enum data_cmd data_cmd, |
| 426 | struct data_opts data_opts) |
| 427 | { |
| 428 | struct bch_fs *c = trans->c; |
| 429 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
| 430 | struct moving_io *io; |
| 431 | const union bch_extent_entry *entry; |
| 432 | struct extent_ptr_decoded p; |
| 433 | unsigned sectors = k.k->size, pages; |
| 434 | int ret = -ENOMEM; |
| 435 | |
| 436 | move_ctxt_wait_event(ctxt, |
| 437 | atomic_read(&ctxt->write_sectors) < |
| 438 | SECTORS_IN_FLIGHT_PER_DEVICE); |
| 439 | |
| 440 | move_ctxt_wait_event(ctxt, |
| 441 | atomic_read(&ctxt->read_sectors) < |
| 442 | SECTORS_IN_FLIGHT_PER_DEVICE); |
| 443 | |
| 444 | /* write path might have to decompress data: */ |
| 445 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
| 446 | sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); |
| 447 | |
| 448 | pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); |
| 449 | io = kzalloc(sizeof(struct moving_io) + |
| 450 | sizeof(struct bio_vec) * pages, GFP_KERNEL); |
| 451 | if (!io) |
| 452 | goto err; |
| 453 | |
| 454 | io->write.ctxt = ctxt; |
| 455 | io->read_sectors = k.k->size; |
| 456 | io->write_sectors = k.k->size; |
| 457 | |
| 458 | bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); |
| 459 | bio_set_prio(&io->write.op.wbio.bio, |
| 460 | IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); |
| 461 | |
| 462 | if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, |
| 463 | GFP_KERNEL)) |
| 464 | goto err_free; |
| 465 | |
| 466 | io->rbio.c = c; |
| 467 | io->rbio.opts = io_opts; |
| 468 | bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); |
| 469 | io->rbio.bio.bi_vcnt = pages; |
| 470 | bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); |
| 471 | io->rbio.bio.bi_iter.bi_size = sectors << 9; |
| 472 | |
| 473 | io->rbio.bio.bi_opf = REQ_OP_READ; |
| 474 | io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); |
| 475 | io->rbio.bio.bi_end_io = move_read_endio; |
| 476 | |
| 477 | ret = bch2_migrate_write_init(c, &io->write, wp, io_opts, |
| 478 | data_cmd, data_opts, btree_id, k); |
| 479 | if (ret) |
| 480 | goto err_free_pages; |
| 481 | |
| 482 | atomic64_inc(&ctxt->stats->keys_moved); |
| 483 | atomic64_add(k.k->size, &ctxt->stats->sectors_moved); |
| 484 | |
| 485 | trace_move_extent(k.k); |
| 486 | |
| 487 | atomic_add(io->read_sectors, &ctxt->read_sectors); |
| 488 | list_add_tail(&io->list, &ctxt->reads); |
| 489 | |
| 490 | /* |
| 491 | * dropped by move_read_endio() - guards against use after free of |
| 492 | * ctxt when doing wakeup |
| 493 | */ |
| 494 | closure_get(&ctxt->cl); |
| 495 | bch2_read_extent(trans, &io->rbio, k, 0, |
| 496 | BCH_READ_NODECODE| |
| 497 | BCH_READ_LAST_FRAGMENT); |
| 498 | return 0; |
| 499 | err_free_pages: |
| 500 | bio_free_pages(&io->write.op.wbio.bio); |
| 501 | err_free: |
| 502 | kfree(io); |
| 503 | err: |
| 504 | trace_move_alloc_fail(k.k); |
| 505 | return ret; |
| 506 | } |
| 507 | |
| 508 | static int __bch2_move_data(struct bch_fs *c, |
| 509 | struct moving_context *ctxt, |
| 510 | struct bch_ratelimit *rate, |
| 511 | struct write_point_specifier wp, |
| 512 | struct bpos start, |
| 513 | struct bpos end, |
| 514 | move_pred_fn pred, void *arg, |
| 515 | struct bch_move_stats *stats, |
| 516 | enum btree_id btree_id) |
| 517 | { |
| 518 | bool kthread = (current->flags & PF_KTHREAD) != 0; |
| 519 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); |
| 520 | struct bkey_buf sk; |
| 521 | struct btree_trans trans; |
| 522 | struct btree_iter *iter; |
| 523 | struct bkey_s_c k; |
| 524 | struct data_opts data_opts; |
| 525 | enum data_cmd data_cmd; |
| 526 | u64 delay, cur_inum = U64_MAX; |
| 527 | int ret = 0, ret2; |
| 528 | |
| 529 | bch2_bkey_buf_init(&sk); |
| 530 | bch2_trans_init(&trans, c, 0, 0); |
| 531 | |
| 532 | stats->data_type = BCH_DATA_user; |
| 533 | stats->btree_id = btree_id; |
| 534 | stats->pos = POS_MIN; |
| 535 | |
| 536 | iter = bch2_trans_get_iter(&trans, btree_id, start, |
| 537 | BTREE_ITER_PREFETCH); |
| 538 | |
| 539 | if (rate) |
| 540 | bch2_ratelimit_reset(rate); |
| 541 | |
| 542 | while (1) { |
| 543 | do { |
| 544 | delay = rate ? bch2_ratelimit_delay(rate) : 0; |
| 545 | |
| 546 | if (delay) { |
| 547 | bch2_trans_unlock(&trans); |
| 548 | set_current_state(TASK_INTERRUPTIBLE); |
| 549 | } |
| 550 | |
| 551 | if (kthread && (ret = kthread_should_stop())) { |
| 552 | __set_current_state(TASK_RUNNING); |
| 553 | goto out; |
| 554 | } |
| 555 | |
| 556 | if (delay) |
| 557 | schedule_timeout(delay); |
| 558 | |
| 559 | if (unlikely(freezing(current))) { |
| 560 | bch2_trans_unlock(&trans); |
| 561 | move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); |
| 562 | try_to_freeze(); |
| 563 | } |
| 564 | } while (delay); |
| 565 | peek: |
| 566 | k = bch2_btree_iter_peek(iter); |
| 567 | |
| 568 | stats->pos = iter->pos; |
| 569 | |
| 570 | if (!k.k) |
| 571 | break; |
| 572 | ret = bkey_err(k); |
| 573 | if (ret) |
| 574 | break; |
| 575 | if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) |
| 576 | break; |
| 577 | |
| 578 | if (!bkey_extent_is_direct_data(k.k)) |
| 579 | goto next_nondata; |
| 580 | |
| 581 | if (btree_id == BTREE_ID_EXTENTS && |
| 582 | cur_inum != k.k->p.inode) { |
| 583 | struct bch_inode_unpacked inode; |
| 584 | |
| 585 | /* don't hold btree locks while looking up inode: */ |
| 586 | bch2_trans_unlock(&trans); |
| 587 | |
| 588 | io_opts = bch2_opts_to_inode_opts(c->opts); |
| 589 | if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode)) |
| 590 | bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode)); |
| 591 | cur_inum = k.k->p.inode; |
| 592 | goto peek; |
| 593 | } |
| 594 | |
| 595 | switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) { |
| 596 | case DATA_SKIP: |
| 597 | goto next; |
| 598 | case DATA_SCRUB: |
| 599 | BUG(); |
| 600 | case DATA_ADD_REPLICAS: |
| 601 | case DATA_REWRITE: |
| 602 | case DATA_PROMOTE: |
| 603 | break; |
| 604 | default: |
| 605 | BUG(); |
| 606 | } |
| 607 | |
| 608 | /* unlock before doing IO: */ |
| 609 | bch2_bkey_buf_reassemble(&sk, c, k); |
| 610 | k = bkey_i_to_s_c(sk.k); |
| 611 | bch2_trans_unlock(&trans); |
| 612 | |
| 613 | ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k, |
| 614 | data_cmd, data_opts); |
| 615 | if (ret2) { |
| 616 | if (ret2 == -EINTR) { |
| 617 | bch2_trans_reset(&trans, 0); |
| 618 | bch2_trans_cond_resched(&trans); |
| 619 | continue; |
| 620 | } |
| 621 | |
| 622 | if (ret2 == -ENOMEM) { |
| 623 | /* memory allocation failure, wait for some IO to finish */ |
| 624 | bch2_move_ctxt_wait_for_io(ctxt); |
| 625 | continue; |
| 626 | } |
| 627 | |
| 628 | /* XXX signal failure */ |
| 629 | goto next; |
| 630 | } |
| 631 | |
| 632 | if (rate) |
| 633 | bch2_ratelimit_increment(rate, k.k->size); |
| 634 | next: |
| 635 | atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k), |
| 636 | &stats->sectors_seen); |
| 637 | next_nondata: |
| 638 | bch2_btree_iter_next(iter); |
| 639 | bch2_trans_cond_resched(&trans); |
| 640 | } |
| 641 | out: |
| 642 | ret = bch2_trans_exit(&trans) ?: ret; |
| 643 | bch2_bkey_buf_exit(&sk, c); |
| 644 | |
| 645 | return ret; |
| 646 | } |
| 647 | |
| 648 | int bch2_move_data(struct bch_fs *c, |
| 649 | struct bch_ratelimit *rate, |
| 650 | struct write_point_specifier wp, |
| 651 | struct bpos start, |
| 652 | struct bpos end, |
| 653 | move_pred_fn pred, void *arg, |
| 654 | struct bch_move_stats *stats) |
| 655 | { |
| 656 | struct moving_context ctxt = { .stats = stats }; |
| 657 | int ret; |
| 658 | |
| 659 | closure_init_stack(&ctxt.cl); |
| 660 | INIT_LIST_HEAD(&ctxt.reads); |
| 661 | init_waitqueue_head(&ctxt.wait); |
| 662 | |
| 663 | stats->data_type = BCH_DATA_user; |
| 664 | |
| 665 | ret = __bch2_move_data(c, &ctxt, rate, wp, start, end, |
| 666 | pred, arg, stats, BTREE_ID_EXTENTS) ?: |
| 667 | __bch2_move_data(c, &ctxt, rate, wp, start, end, |
| 668 | pred, arg, stats, BTREE_ID_REFLINK); |
| 669 | |
| 670 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); |
| 671 | closure_sync(&ctxt.cl); |
| 672 | |
| 673 | EBUG_ON(atomic_read(&ctxt.write_sectors)); |
| 674 | |
| 675 | trace_move_data(c, |
| 676 | atomic64_read(&stats->sectors_moved), |
| 677 | atomic64_read(&stats->keys_moved)); |
| 678 | |
| 679 | return ret; |
| 680 | } |
| 681 | |
| 682 | static int bch2_move_btree(struct bch_fs *c, |
| 683 | move_pred_fn pred, |
| 684 | void *arg, |
| 685 | struct bch_move_stats *stats) |
| 686 | { |
| 687 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); |
| 688 | struct btree_trans trans; |
| 689 | struct btree_iter *iter; |
| 690 | struct btree *b; |
| 691 | unsigned id; |
| 692 | struct data_opts data_opts; |
| 693 | enum data_cmd cmd; |
| 694 | int ret = 0; |
| 695 | |
| 696 | bch2_trans_init(&trans, c, 0, 0); |
| 697 | |
| 698 | stats->data_type = BCH_DATA_btree; |
| 699 | |
| 700 | for (id = 0; id < BTREE_ID_NR; id++) { |
| 701 | stats->btree_id = id; |
| 702 | |
| 703 | for_each_btree_node(&trans, iter, id, POS_MIN, |
| 704 | BTREE_ITER_PREFETCH, b) { |
| 705 | stats->pos = iter->pos; |
| 706 | |
| 707 | switch ((cmd = pred(c, arg, |
| 708 | bkey_i_to_s_c(&b->key), |
| 709 | &io_opts, &data_opts))) { |
| 710 | case DATA_SKIP: |
| 711 | goto next; |
| 712 | case DATA_SCRUB: |
| 713 | BUG(); |
| 714 | case DATA_ADD_REPLICAS: |
| 715 | case DATA_REWRITE: |
| 716 | break; |
| 717 | default: |
| 718 | BUG(); |
| 719 | } |
| 720 | |
| 721 | ret = bch2_btree_node_rewrite(c, iter, |
| 722 | b->data->keys.seq, 0) ?: ret; |
| 723 | next: |
| 724 | bch2_trans_cond_resched(&trans); |
| 725 | } |
| 726 | |
| 727 | ret = bch2_trans_iter_free(&trans, iter) ?: ret; |
| 728 | } |
| 729 | |
| 730 | bch2_trans_exit(&trans); |
| 731 | |
| 732 | return ret; |
| 733 | } |
| 734 | |
| 735 | #if 0 |
| 736 | static enum data_cmd scrub_pred(struct bch_fs *c, void *arg, |
| 737 | struct bkey_s_c k, |
| 738 | struct bch_io_opts *io_opts, |
| 739 | struct data_opts *data_opts) |
| 740 | { |
| 741 | return DATA_SCRUB; |
| 742 | } |
| 743 | #endif |
| 744 | |
| 745 | static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, |
| 746 | struct bkey_s_c k, |
| 747 | struct bch_io_opts *io_opts, |
| 748 | struct data_opts *data_opts) |
| 749 | { |
| 750 | unsigned nr_good = bch2_bkey_durability(c, k); |
| 751 | unsigned replicas = 0; |
| 752 | |
| 753 | switch (k.k->type) { |
| 754 | case KEY_TYPE_btree_ptr: |
| 755 | replicas = c->opts.metadata_replicas; |
| 756 | break; |
| 757 | case KEY_TYPE_extent: |
| 758 | replicas = io_opts->data_replicas; |
| 759 | break; |
| 760 | } |
| 761 | |
| 762 | if (!nr_good || nr_good >= replicas) |
| 763 | return DATA_SKIP; |
| 764 | |
| 765 | data_opts->target = 0; |
| 766 | data_opts->nr_replicas = 1; |
| 767 | data_opts->btree_insert_flags = 0; |
| 768 | return DATA_ADD_REPLICAS; |
| 769 | } |
| 770 | |
| 771 | static enum data_cmd migrate_pred(struct bch_fs *c, void *arg, |
| 772 | struct bkey_s_c k, |
| 773 | struct bch_io_opts *io_opts, |
| 774 | struct data_opts *data_opts) |
| 775 | { |
| 776 | struct bch_ioctl_data *op = arg; |
| 777 | |
| 778 | if (!bch2_bkey_has_device(k, op->migrate.dev)) |
| 779 | return DATA_SKIP; |
| 780 | |
| 781 | data_opts->target = 0; |
| 782 | data_opts->nr_replicas = 1; |
| 783 | data_opts->btree_insert_flags = 0; |
| 784 | data_opts->rewrite_dev = op->migrate.dev; |
| 785 | return DATA_REWRITE; |
| 786 | } |
| 787 | |
| 788 | int bch2_data_job(struct bch_fs *c, |
| 789 | struct bch_move_stats *stats, |
| 790 | struct bch_ioctl_data op) |
| 791 | { |
| 792 | int ret = 0; |
| 793 | |
| 794 | switch (op.op) { |
| 795 | case BCH_DATA_OP_REREPLICATE: |
| 796 | stats->data_type = BCH_DATA_journal; |
| 797 | ret = bch2_journal_flush_device_pins(&c->journal, -1); |
| 798 | |
| 799 | ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret; |
| 800 | |
| 801 | closure_wait_event(&c->btree_interior_update_wait, |
| 802 | !bch2_btree_interior_updates_nr_pending(c)); |
| 803 | |
| 804 | ret = bch2_replicas_gc2(c) ?: ret; |
| 805 | |
| 806 | ret = bch2_move_data(c, NULL, |
| 807 | writepoint_hashed((unsigned long) current), |
| 808 | op.start, |
| 809 | op.end, |
| 810 | rereplicate_pred, c, stats) ?: ret; |
| 811 | ret = bch2_replicas_gc2(c) ?: ret; |
| 812 | break; |
| 813 | case BCH_DATA_OP_MIGRATE: |
| 814 | if (op.migrate.dev >= c->sb.nr_devices) |
| 815 | return -EINVAL; |
| 816 | |
| 817 | stats->data_type = BCH_DATA_journal; |
| 818 | ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); |
| 819 | |
| 820 | ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret; |
| 821 | ret = bch2_replicas_gc2(c) ?: ret; |
| 822 | |
| 823 | ret = bch2_move_data(c, NULL, |
| 824 | writepoint_hashed((unsigned long) current), |
| 825 | op.start, |
| 826 | op.end, |
| 827 | migrate_pred, &op, stats) ?: ret; |
| 828 | ret = bch2_replicas_gc2(c) ?: ret; |
| 829 | break; |
| 830 | default: |
| 831 | ret = -EINVAL; |
| 832 | } |
| 833 | |
| 834 | return ret; |
| 835 | } |