Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include "bcachefs.h" | |
4 | #include "btree_gc.h" | |
5 | #include "btree_update.h" | |
6 | #include "buckets.h" | |
7 | #include "inode.h" | |
8 | #include "io.h" | |
9 | #include "journal_reclaim.h" | |
10 | #include "keylist.h" | |
11 | #include "move.h" | |
12 | #include "replicas.h" | |
13 | #include "super-io.h" | |
14 | #include "trace.h" | |
15 | ||
16 | #include <linux/ioprio.h> | |
17 | #include <linux/kthread.h> | |
18 | ||
19 | #define SECTORS_IN_FLIGHT_PER_DEVICE 2048 | |
20 | ||
21 | struct moving_io { | |
22 | struct list_head list; | |
23 | struct closure cl; | |
24 | bool read_completed; | |
25 | ||
26 | unsigned read_sectors; | |
27 | unsigned write_sectors; | |
28 | ||
29 | struct bch_read_bio rbio; | |
30 | ||
31 | struct migrate_write write; | |
32 | /* Must be last since it is variable size */ | |
33 | struct bio_vec bi_inline_vecs[0]; | |
34 | }; | |
35 | ||
36 | struct moving_context { | |
37 | /* Closure for waiting on all reads and writes to complete */ | |
38 | struct closure cl; | |
39 | ||
40 | struct bch_move_stats *stats; | |
41 | ||
42 | struct list_head reads; | |
43 | ||
44 | /* in flight sectors: */ | |
45 | atomic_t read_sectors; | |
46 | atomic_t write_sectors; | |
47 | ||
48 | wait_queue_head_t wait; | |
49 | }; | |
50 | ||
51 | static int bch2_migrate_index_update(struct bch_write_op *op) | |
52 | { | |
53 | struct bch_fs *c = op->c; | |
54 | struct migrate_write *m = | |
55 | container_of(op, struct migrate_write, op); | |
56 | struct keylist *keys = &op->insert_keys; | |
57 | struct btree_iter iter; | |
58 | int ret = 0; | |
59 | ||
60 | bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, | |
61 | bkey_start_pos(&bch2_keylist_front(keys)->k), | |
62 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
63 | ||
64 | while (1) { | |
65 | struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); | |
66 | struct bkey_i_extent *insert, *new = | |
67 | bkey_i_to_extent(bch2_keylist_front(keys)); | |
68 | BKEY_PADDED(k) _new, _insert; | |
69 | struct bch_extent_ptr *ptr; | |
70 | struct bch_extent_crc_unpacked crc; | |
71 | bool did_work = false; | |
72 | int nr; | |
73 | ||
74 | if (btree_iter_err(k)) { | |
75 | ret = bch2_btree_iter_unlock(&iter); | |
76 | break; | |
77 | } | |
78 | ||
79 | if (bversion_cmp(k.k->version, new->k.version) || | |
80 | !bkey_extent_is_data(k.k) || | |
81 | !bch2_extent_matches_ptr(c, bkey_s_c_to_extent(k), | |
82 | m->ptr, m->offset)) | |
83 | goto nomatch; | |
84 | ||
85 | if (m->data_cmd == DATA_REWRITE && | |
86 | !bch2_extent_has_device(bkey_s_c_to_extent(k), | |
87 | m->data_opts.rewrite_dev)) | |
88 | goto nomatch; | |
89 | ||
90 | bkey_reassemble(&_insert.k, k); | |
91 | insert = bkey_i_to_extent(&_insert.k); | |
92 | ||
93 | bkey_copy(&_new.k, bch2_keylist_front(keys)); | |
94 | new = bkey_i_to_extent(&_new.k); | |
95 | ||
96 | bch2_cut_front(iter.pos, &insert->k_i); | |
97 | bch2_cut_back(new->k.p, &insert->k); | |
98 | bch2_cut_back(insert->k.p, &new->k); | |
99 | ||
100 | if (m->data_cmd == DATA_REWRITE) { | |
101 | ptr = (struct bch_extent_ptr *) | |
102 | bch2_extent_has_device(extent_i_to_s_c(insert), | |
103 | m->data_opts.rewrite_dev); | |
104 | bch2_extent_drop_ptr(extent_i_to_s(insert), ptr); | |
105 | } | |
106 | ||
107 | extent_for_each_ptr_crc(extent_i_to_s(new), ptr, crc) { | |
108 | if (bch2_extent_has_device(extent_i_to_s_c(insert), ptr->dev)) { | |
109 | /* | |
110 | * raced with another move op? extent already | |
111 | * has a pointer to the device we just wrote | |
112 | * data to | |
113 | */ | |
114 | continue; | |
115 | } | |
116 | ||
117 | bch2_extent_crc_append(insert, crc); | |
118 | extent_ptr_append(insert, *ptr); | |
119 | did_work = true; | |
120 | } | |
121 | ||
122 | if (!did_work) | |
123 | goto nomatch; | |
124 | ||
125 | bch2_extent_narrow_crcs(insert, | |
126 | (struct bch_extent_crc_unpacked) { 0 }); | |
127 | bch2_extent_normalize(c, extent_i_to_s(insert).s); | |
128 | bch2_extent_mark_replicas_cached(c, extent_i_to_s(insert), | |
129 | op->opts.background_target, | |
130 | op->opts.data_replicas); | |
131 | ||
132 | /* | |
133 | * It's possible we race, and for whatever reason the extent now | |
134 | * has fewer replicas than when we last looked at it - meaning | |
135 | * we need to get a disk reservation here: | |
136 | */ | |
137 | nr = bch2_extent_nr_dirty_ptrs(bkey_i_to_s_c(&insert->k_i)) - | |
138 | (bch2_extent_nr_dirty_ptrs(k) + m->nr_ptrs_reserved); | |
139 | if (nr > 0) { | |
140 | /* | |
141 | * can't call bch2_disk_reservation_add() with btree | |
142 | * locks held, at least not without a song and dance | |
143 | */ | |
144 | bch2_btree_iter_unlock(&iter); | |
145 | ||
146 | ret = bch2_disk_reservation_add(c, &op->res, | |
147 | keylist_sectors(keys) * nr, 0); | |
148 | if (ret) | |
149 | goto out; | |
150 | ||
151 | m->nr_ptrs_reserved += nr; | |
152 | goto next; | |
153 | } | |
154 | ||
155 | ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, | |
156 | extent_i_to_s_c(insert).s_c); | |
157 | if (ret) | |
158 | break; | |
159 | ||
160 | ret = bch2_btree_insert_at(c, &op->res, | |
fc3268c1 | 161 | op_journal_seq(op), |
1c6fdbd8 KO |
162 | BTREE_INSERT_ATOMIC| |
163 | BTREE_INSERT_NOFAIL| | |
164 | BTREE_INSERT_USE_RESERVE| | |
165 | m->data_opts.btree_insert_flags, | |
166 | BTREE_INSERT_ENTRY(&iter, &insert->k_i)); | |
167 | if (!ret) | |
168 | atomic_long_inc(&c->extent_migrate_done); | |
169 | if (ret == -EINTR) | |
170 | ret = 0; | |
171 | if (ret) | |
172 | break; | |
173 | next: | |
174 | while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { | |
175 | bch2_keylist_pop_front(keys); | |
176 | if (bch2_keylist_empty(keys)) | |
177 | goto out; | |
178 | } | |
179 | ||
180 | bch2_cut_front(iter.pos, bch2_keylist_front(keys)); | |
181 | continue; | |
182 | nomatch: | |
183 | if (m->ctxt) | |
184 | atomic64_add(k.k->p.offset - iter.pos.offset, | |
185 | &m->ctxt->stats->sectors_raced); | |
186 | atomic_long_inc(&c->extent_migrate_raced); | |
187 | trace_move_race(&new->k); | |
188 | bch2_btree_iter_next_slot(&iter); | |
189 | goto next; | |
190 | } | |
191 | out: | |
192 | bch2_btree_iter_unlock(&iter); | |
193 | return ret; | |
194 | } | |
195 | ||
196 | void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio) | |
197 | { | |
198 | /* write bio must own pages: */ | |
199 | BUG_ON(!m->op.wbio.bio.bi_vcnt); | |
200 | ||
201 | m->ptr = rbio->pick.ptr; | |
202 | m->offset = rbio->pos.offset - rbio->pick.crc.offset; | |
203 | m->op.devs_have = rbio->devs_have; | |
204 | m->op.pos = rbio->pos; | |
205 | m->op.version = rbio->version; | |
206 | m->op.crc = rbio->pick.crc; | |
207 | m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; | |
208 | ||
209 | if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) { | |
210 | m->op.nonce = m->op.crc.nonce + m->op.crc.offset; | |
211 | m->op.csum_type = m->op.crc.csum_type; | |
212 | } | |
213 | ||
214 | if (m->data_cmd == DATA_REWRITE) | |
215 | bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev); | |
216 | } | |
217 | ||
218 | int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, | |
219 | struct write_point_specifier wp, | |
220 | struct bch_io_opts io_opts, | |
221 | enum data_cmd data_cmd, | |
222 | struct data_opts data_opts, | |
223 | struct bkey_s_c k) | |
224 | { | |
225 | int ret; | |
226 | ||
227 | m->data_cmd = data_cmd; | |
228 | m->data_opts = data_opts; | |
229 | m->nr_ptrs_reserved = 0; | |
230 | ||
231 | bch2_write_op_init(&m->op, c, io_opts); | |
232 | m->op.compression_type = | |
233 | bch2_compression_opt_to_type[io_opts.background_compression ?: | |
234 | io_opts.compression]; | |
235 | m->op.target = data_opts.target, | |
236 | m->op.write_point = wp; | |
237 | ||
238 | if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) | |
239 | m->op.alloc_reserve = RESERVE_MOVINGGC; | |
240 | ||
241 | m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS| | |
242 | BCH_WRITE_PAGES_STABLE| | |
243 | BCH_WRITE_PAGES_OWNED| | |
244 | BCH_WRITE_DATA_ENCODED| | |
245 | BCH_WRITE_NOMARK_REPLICAS; | |
246 | ||
247 | m->op.nr_replicas = 1; | |
248 | m->op.nr_replicas_required = 1; | |
249 | m->op.index_update_fn = bch2_migrate_index_update; | |
250 | ||
251 | switch (data_cmd) { | |
252 | case DATA_ADD_REPLICAS: { | |
253 | int nr = (int) io_opts.data_replicas - | |
254 | bch2_extent_nr_dirty_ptrs(k); | |
255 | ||
256 | if (nr > 0) { | |
257 | m->op.nr_replicas = m->nr_ptrs_reserved = nr; | |
258 | ||
259 | ret = bch2_disk_reservation_get(c, &m->op.res, | |
260 | k.k->size, m->op.nr_replicas, 0); | |
261 | if (ret) | |
262 | return ret; | |
263 | } | |
264 | break; | |
265 | } | |
266 | case DATA_REWRITE: | |
267 | break; | |
268 | case DATA_PROMOTE: | |
269 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; | |
270 | m->op.flags |= BCH_WRITE_CACHED; | |
271 | break; | |
272 | default: | |
273 | BUG(); | |
274 | } | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
279 | static void move_free(struct closure *cl) | |
280 | { | |
281 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
282 | struct moving_context *ctxt = io->write.ctxt; | |
283 | struct bvec_iter_all iter; | |
284 | struct bio_vec *bv; | |
285 | ||
286 | bch2_disk_reservation_put(io->write.op.c, &io->write.op.res); | |
287 | ||
288 | bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter) | |
289 | if (bv->bv_page) | |
290 | __free_page(bv->bv_page); | |
291 | ||
292 | wake_up(&ctxt->wait); | |
293 | ||
294 | kfree(io); | |
295 | } | |
296 | ||
297 | static void move_write_done(struct closure *cl) | |
298 | { | |
299 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
300 | ||
301 | atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); | |
302 | closure_return_with_destructor(cl, move_free); | |
303 | } | |
304 | ||
305 | static void move_write(struct closure *cl) | |
306 | { | |
307 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
308 | ||
309 | if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { | |
310 | closure_return_with_destructor(cl, move_free); | |
311 | return; | |
312 | } | |
313 | ||
314 | bch2_migrate_read_done(&io->write, &io->rbio); | |
315 | ||
316 | atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); | |
317 | closure_call(&io->write.op.cl, bch2_write, NULL, cl); | |
318 | continue_at(cl, move_write_done, NULL); | |
319 | } | |
320 | ||
321 | static inline struct moving_io *next_pending_write(struct moving_context *ctxt) | |
322 | { | |
323 | struct moving_io *io = | |
324 | list_first_entry_or_null(&ctxt->reads, struct moving_io, list); | |
325 | ||
326 | return io && io->read_completed ? io : NULL; | |
327 | } | |
328 | ||
329 | static void move_read_endio(struct bio *bio) | |
330 | { | |
331 | struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); | |
332 | struct moving_context *ctxt = io->write.ctxt; | |
333 | ||
334 | atomic_sub(io->read_sectors, &ctxt->read_sectors); | |
335 | io->read_completed = true; | |
336 | ||
337 | if (next_pending_write(ctxt)) | |
338 | wake_up(&ctxt->wait); | |
339 | ||
340 | closure_put(&ctxt->cl); | |
341 | } | |
342 | ||
343 | static void do_pending_writes(struct moving_context *ctxt) | |
344 | { | |
345 | struct moving_io *io; | |
346 | ||
347 | while ((io = next_pending_write(ctxt))) { | |
348 | list_del(&io->list); | |
349 | closure_call(&io->cl, move_write, NULL, &ctxt->cl); | |
350 | } | |
351 | } | |
352 | ||
353 | #define move_ctxt_wait_event(_ctxt, _cond) \ | |
354 | do { \ | |
355 | do_pending_writes(_ctxt); \ | |
356 | \ | |
357 | if (_cond) \ | |
358 | break; \ | |
359 | __wait_event((_ctxt)->wait, \ | |
360 | next_pending_write(_ctxt) || (_cond)); \ | |
361 | } while (1) | |
362 | ||
363 | static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) | |
364 | { | |
365 | unsigned sectors_pending = atomic_read(&ctxt->write_sectors); | |
366 | ||
367 | move_ctxt_wait_event(ctxt, | |
368 | !atomic_read(&ctxt->write_sectors) || | |
369 | atomic_read(&ctxt->write_sectors) != sectors_pending); | |
370 | } | |
371 | ||
372 | static int bch2_move_extent(struct bch_fs *c, | |
373 | struct moving_context *ctxt, | |
374 | struct write_point_specifier wp, | |
375 | struct bch_io_opts io_opts, | |
376 | struct bkey_s_c_extent e, | |
377 | enum data_cmd data_cmd, | |
378 | struct data_opts data_opts) | |
379 | { | |
380 | struct moving_io *io; | |
381 | const struct bch_extent_ptr *ptr; | |
382 | struct bch_extent_crc_unpacked crc; | |
383 | unsigned sectors = e.k->size, pages; | |
384 | int ret = -ENOMEM; | |
385 | ||
386 | move_ctxt_wait_event(ctxt, | |
387 | atomic_read(&ctxt->write_sectors) < | |
388 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
389 | ||
390 | move_ctxt_wait_event(ctxt, | |
391 | atomic_read(&ctxt->read_sectors) < | |
392 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
393 | ||
394 | /* write path might have to decompress data: */ | |
395 | extent_for_each_ptr_crc(e, ptr, crc) | |
396 | sectors = max_t(unsigned, sectors, crc.uncompressed_size); | |
397 | ||
398 | pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); | |
399 | io = kzalloc(sizeof(struct moving_io) + | |
400 | sizeof(struct bio_vec) * pages, GFP_KERNEL); | |
401 | if (!io) | |
402 | goto err; | |
403 | ||
404 | io->write.ctxt = ctxt; | |
405 | io->read_sectors = e.k->size; | |
406 | io->write_sectors = e.k->size; | |
407 | ||
408 | bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); | |
409 | bio_set_prio(&io->write.op.wbio.bio, | |
410 | IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
411 | ||
412 | if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, | |
413 | GFP_KERNEL)) | |
414 | goto err_free; | |
415 | ||
416 | io->rbio.opts = io_opts; | |
417 | bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); | |
418 | io->rbio.bio.bi_vcnt = pages; | |
419 | bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
420 | io->rbio.bio.bi_iter.bi_size = sectors << 9; | |
421 | ||
422 | io->rbio.bio.bi_opf = REQ_OP_READ; | |
423 | io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(e.k); | |
424 | io->rbio.bio.bi_end_io = move_read_endio; | |
425 | ||
426 | ret = bch2_migrate_write_init(c, &io->write, wp, io_opts, | |
427 | data_cmd, data_opts, e.s_c); | |
428 | if (ret) | |
429 | goto err_free_pages; | |
430 | ||
431 | atomic64_inc(&ctxt->stats->keys_moved); | |
432 | atomic64_add(e.k->size, &ctxt->stats->sectors_moved); | |
433 | ||
434 | trace_move_extent(e.k); | |
435 | ||
436 | atomic_add(io->read_sectors, &ctxt->read_sectors); | |
437 | list_add_tail(&io->list, &ctxt->reads); | |
438 | ||
439 | /* | |
440 | * dropped by move_read_endio() - guards against use after free of | |
441 | * ctxt when doing wakeup | |
442 | */ | |
443 | closure_get(&ctxt->cl); | |
444 | bch2_read_extent(c, &io->rbio, e.s_c, | |
445 | BCH_READ_NODECODE| | |
446 | BCH_READ_LAST_FRAGMENT); | |
447 | return 0; | |
448 | err_free_pages: | |
449 | bio_free_pages(&io->write.op.wbio.bio); | |
450 | err_free: | |
451 | kfree(io); | |
452 | err: | |
453 | trace_move_alloc_fail(e.k); | |
454 | return ret; | |
455 | } | |
456 | ||
457 | int bch2_move_data(struct bch_fs *c, | |
458 | struct bch_ratelimit *rate, | |
459 | struct write_point_specifier wp, | |
460 | struct bpos start, | |
461 | struct bpos end, | |
462 | move_pred_fn pred, void *arg, | |
463 | struct bch_move_stats *stats) | |
464 | { | |
465 | bool kthread = (current->flags & PF_KTHREAD) != 0; | |
466 | struct moving_context ctxt = { .stats = stats }; | |
467 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); | |
468 | BKEY_PADDED(k) tmp; | |
469 | struct bkey_s_c k; | |
470 | struct bkey_s_c_extent e; | |
471 | struct data_opts data_opts; | |
472 | enum data_cmd data_cmd; | |
c2fcff59 | 473 | u64 delay, cur_inum = U64_MAX; |
1c6fdbd8 KO |
474 | int ret = 0, ret2; |
475 | ||
476 | closure_init_stack(&ctxt.cl); | |
477 | INIT_LIST_HEAD(&ctxt.reads); | |
478 | init_waitqueue_head(&ctxt.wait); | |
479 | ||
480 | stats->data_type = BCH_DATA_USER; | |
481 | bch2_btree_iter_init(&stats->iter, c, BTREE_ID_EXTENTS, start, | |
482 | BTREE_ITER_PREFETCH); | |
483 | ||
484 | if (rate) | |
485 | bch2_ratelimit_reset(rate); | |
486 | ||
c2fcff59 KO |
487 | while (1) { |
488 | do { | |
489 | delay = rate ? bch2_ratelimit_delay(rate) : 0; | |
490 | ||
491 | if (delay) { | |
492 | bch2_btree_iter_unlock(&stats->iter); | |
493 | set_current_state(TASK_INTERRUPTIBLE); | |
494 | } | |
495 | ||
496 | if (kthread && (ret = kthread_should_stop())) { | |
497 | __set_current_state(TASK_RUNNING); | |
498 | goto out; | |
499 | } | |
500 | ||
501 | if (delay) | |
502 | schedule_timeout(delay); | |
503 | ||
504 | if (unlikely(freezing(current))) { | |
505 | bch2_btree_iter_unlock(&stats->iter); | |
506 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); | |
c2fcff59 KO |
507 | try_to_freeze(); |
508 | } | |
509 | } while (delay); | |
1c6fdbd8 KO |
510 | peek: |
511 | k = bch2_btree_iter_peek(&stats->iter); | |
512 | if (!k.k) | |
513 | break; | |
514 | ret = btree_iter_err(k); | |
515 | if (ret) | |
516 | break; | |
517 | if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) | |
518 | break; | |
519 | ||
520 | if (!bkey_extent_is_data(k.k)) | |
521 | goto next_nondata; | |
522 | ||
523 | e = bkey_s_c_to_extent(k); | |
524 | ||
525 | if (cur_inum != k.k->p.inode) { | |
526 | struct bch_inode_unpacked inode; | |
527 | ||
528 | /* don't hold btree locks while looking up inode: */ | |
529 | bch2_btree_iter_unlock(&stats->iter); | |
530 | ||
531 | io_opts = bch2_opts_to_inode_opts(c->opts); | |
532 | if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode)) | |
533 | bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode)); | |
534 | cur_inum = k.k->p.inode; | |
535 | goto peek; | |
536 | } | |
537 | ||
538 | switch ((data_cmd = pred(c, arg, BKEY_TYPE_EXTENTS, e, | |
539 | &io_opts, &data_opts))) { | |
540 | case DATA_SKIP: | |
541 | goto next; | |
542 | case DATA_SCRUB: | |
543 | BUG(); | |
544 | case DATA_ADD_REPLICAS: | |
545 | case DATA_REWRITE: | |
546 | case DATA_PROMOTE: | |
547 | break; | |
548 | default: | |
549 | BUG(); | |
550 | } | |
551 | ||
552 | /* unlock before doing IO: */ | |
553 | bkey_reassemble(&tmp.k, k); | |
554 | k = bkey_i_to_s_c(&tmp.k); | |
555 | bch2_btree_iter_unlock(&stats->iter); | |
556 | ||
557 | ret2 = bch2_move_extent(c, &ctxt, wp, io_opts, | |
558 | bkey_s_c_to_extent(k), | |
559 | data_cmd, data_opts); | |
560 | if (ret2) { | |
561 | if (ret2 == -ENOMEM) { | |
562 | /* memory allocation failure, wait for some IO to finish */ | |
563 | bch2_move_ctxt_wait_for_io(&ctxt); | |
564 | continue; | |
565 | } | |
566 | ||
567 | /* XXX signal failure */ | |
568 | goto next; | |
569 | } | |
570 | ||
571 | if (rate) | |
572 | bch2_ratelimit_increment(rate, k.k->size); | |
573 | next: | |
574 | atomic64_add(k.k->size * bch2_extent_nr_dirty_ptrs(k), | |
575 | &stats->sectors_seen); | |
576 | next_nondata: | |
577 | bch2_btree_iter_next(&stats->iter); | |
578 | bch2_btree_iter_cond_resched(&stats->iter); | |
579 | } | |
c2fcff59 | 580 | out: |
1c6fdbd8 KO |
581 | bch2_btree_iter_unlock(&stats->iter); |
582 | ||
583 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); | |
584 | closure_sync(&ctxt.cl); | |
585 | ||
586 | EBUG_ON(atomic_read(&ctxt.write_sectors)); | |
587 | ||
588 | trace_move_data(c, | |
589 | atomic64_read(&stats->sectors_moved), | |
590 | atomic64_read(&stats->keys_moved)); | |
591 | ||
592 | return ret; | |
593 | } | |
594 | ||
595 | static int bch2_gc_data_replicas(struct bch_fs *c) | |
596 | { | |
597 | struct btree_iter iter; | |
598 | struct bkey_s_c k; | |
599 | int ret; | |
600 | ||
601 | mutex_lock(&c->replicas_gc_lock); | |
602 | bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED)); | |
603 | ||
604 | for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, | |
605 | BTREE_ITER_PREFETCH, k) { | |
606 | ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, k); | |
607 | if (ret) | |
608 | break; | |
609 | } | |
610 | ret = bch2_btree_iter_unlock(&iter) ?: ret; | |
611 | ||
612 | bch2_replicas_gc_end(c, ret); | |
613 | mutex_unlock(&c->replicas_gc_lock); | |
614 | ||
615 | return ret; | |
616 | } | |
617 | ||
618 | static int bch2_gc_btree_replicas(struct bch_fs *c) | |
619 | { | |
620 | struct btree_iter iter; | |
621 | struct btree *b; | |
622 | unsigned id; | |
623 | int ret = 0; | |
624 | ||
625 | mutex_lock(&c->replicas_gc_lock); | |
626 | bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE); | |
627 | ||
628 | for (id = 0; id < BTREE_ID_NR; id++) { | |
629 | for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { | |
630 | ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE, | |
631 | bkey_i_to_s_c(&b->key)); | |
632 | ||
633 | bch2_btree_iter_cond_resched(&iter); | |
634 | } | |
635 | ||
636 | ret = bch2_btree_iter_unlock(&iter) ?: ret; | |
637 | } | |
638 | ||
639 | bch2_replicas_gc_end(c, ret); | |
640 | mutex_unlock(&c->replicas_gc_lock); | |
641 | ||
642 | return ret; | |
643 | } | |
644 | ||
645 | static int bch2_move_btree(struct bch_fs *c, | |
646 | move_pred_fn pred, | |
647 | void *arg, | |
648 | struct bch_move_stats *stats) | |
649 | { | |
650 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); | |
651 | struct btree *b; | |
652 | unsigned id; | |
653 | struct data_opts data_opts; | |
654 | enum data_cmd cmd; | |
655 | int ret = 0; | |
656 | ||
657 | stats->data_type = BCH_DATA_BTREE; | |
658 | ||
659 | for (id = 0; id < BTREE_ID_NR; id++) { | |
660 | for_each_btree_node(&stats->iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { | |
661 | switch ((cmd = pred(c, arg, BKEY_TYPE_BTREE, | |
662 | bkey_i_to_s_c_extent(&b->key), | |
663 | &io_opts, | |
664 | &data_opts))) { | |
665 | case DATA_SKIP: | |
666 | goto next; | |
667 | case DATA_SCRUB: | |
668 | BUG(); | |
669 | case DATA_ADD_REPLICAS: | |
670 | case DATA_REWRITE: | |
671 | break; | |
672 | default: | |
673 | BUG(); | |
674 | } | |
675 | ||
676 | ret = bch2_btree_node_rewrite(c, &stats->iter, | |
677 | b->data->keys.seq, 0) ?: ret; | |
678 | next: | |
679 | bch2_btree_iter_cond_resched(&stats->iter); | |
680 | } | |
681 | ||
682 | ret = bch2_btree_iter_unlock(&stats->iter) ?: ret; | |
683 | } | |
684 | ||
685 | return ret; | |
686 | } | |
687 | ||
688 | #if 0 | |
689 | static enum data_cmd scrub_pred(struct bch_fs *c, void *arg, | |
690 | enum bkey_type type, | |
691 | struct bkey_s_c_extent e, | |
692 | struct bch_io_opts *io_opts, | |
693 | struct data_opts *data_opts) | |
694 | { | |
695 | return DATA_SCRUB; | |
696 | } | |
697 | #endif | |
698 | ||
699 | static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, | |
700 | enum bkey_type type, | |
701 | struct bkey_s_c_extent e, | |
702 | struct bch_io_opts *io_opts, | |
703 | struct data_opts *data_opts) | |
704 | { | |
705 | unsigned nr_good = bch2_extent_durability(c, e); | |
706 | unsigned replicas = type == BKEY_TYPE_BTREE | |
707 | ? c->opts.metadata_replicas | |
708 | : io_opts->data_replicas; | |
709 | ||
710 | if (!nr_good || nr_good >= replicas) | |
711 | return DATA_SKIP; | |
712 | ||
713 | data_opts->target = 0; | |
714 | data_opts->btree_insert_flags = 0; | |
715 | return DATA_ADD_REPLICAS; | |
716 | } | |
717 | ||
718 | static enum data_cmd migrate_pred(struct bch_fs *c, void *arg, | |
719 | enum bkey_type type, | |
720 | struct bkey_s_c_extent e, | |
721 | struct bch_io_opts *io_opts, | |
722 | struct data_opts *data_opts) | |
723 | { | |
724 | struct bch_ioctl_data *op = arg; | |
725 | ||
726 | if (!bch2_extent_has_device(e, op->migrate.dev)) | |
727 | return DATA_SKIP; | |
728 | ||
729 | data_opts->target = 0; | |
730 | data_opts->btree_insert_flags = 0; | |
731 | data_opts->rewrite_dev = op->migrate.dev; | |
732 | return DATA_REWRITE; | |
733 | } | |
734 | ||
735 | int bch2_data_job(struct bch_fs *c, | |
736 | struct bch_move_stats *stats, | |
737 | struct bch_ioctl_data op) | |
738 | { | |
739 | int ret = 0; | |
740 | ||
741 | switch (op.op) { | |
742 | case BCH_DATA_OP_REREPLICATE: | |
743 | stats->data_type = BCH_DATA_JOURNAL; | |
744 | ret = bch2_journal_flush_device_pins(&c->journal, -1); | |
745 | ||
746 | ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret; | |
747 | ret = bch2_gc_btree_replicas(c) ?: ret; | |
748 | ||
749 | ret = bch2_move_data(c, NULL, | |
750 | writepoint_hashed((unsigned long) current), | |
751 | op.start, | |
752 | op.end, | |
753 | rereplicate_pred, c, stats) ?: ret; | |
754 | ret = bch2_gc_data_replicas(c) ?: ret; | |
755 | break; | |
756 | case BCH_DATA_OP_MIGRATE: | |
757 | if (op.migrate.dev >= c->sb.nr_devices) | |
758 | return -EINVAL; | |
759 | ||
760 | stats->data_type = BCH_DATA_JOURNAL; | |
761 | ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); | |
762 | ||
763 | ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret; | |
764 | ret = bch2_gc_btree_replicas(c) ?: ret; | |
765 | ||
766 | ret = bch2_move_data(c, NULL, | |
767 | writepoint_hashed((unsigned long) current), | |
768 | op.start, | |
769 | op.end, | |
770 | migrate_pred, &op, stats) ?: ret; | |
771 | ret = bch2_gc_data_replicas(c) ?: ret; | |
772 | break; | |
773 | default: | |
774 | ret = -EINVAL; | |
775 | } | |
776 | ||
777 | return ret; | |
778 | } |