Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include "bcachefs.h" | |
7b3f84ea | 4 | #include "alloc_foreground.h" |
1c6fdbd8 KO |
5 | #include "btree_gc.h" |
6 | #include "btree_update.h" | |
7 | #include "buckets.h" | |
4628529f | 8 | #include "disk_groups.h" |
1c6fdbd8 KO |
9 | #include "inode.h" |
10 | #include "io.h" | |
11 | #include "journal_reclaim.h" | |
12 | #include "keylist.h" | |
13 | #include "move.h" | |
14 | #include "replicas.h" | |
15 | #include "super-io.h" | |
16 | #include "trace.h" | |
17 | ||
18 | #include <linux/ioprio.h> | |
19 | #include <linux/kthread.h> | |
20 | ||
21 | #define SECTORS_IN_FLIGHT_PER_DEVICE 2048 | |
22 | ||
23 | struct moving_io { | |
24 | struct list_head list; | |
25 | struct closure cl; | |
26 | bool read_completed; | |
27 | ||
28 | unsigned read_sectors; | |
29 | unsigned write_sectors; | |
30 | ||
31 | struct bch_read_bio rbio; | |
32 | ||
33 | struct migrate_write write; | |
34 | /* Must be last since it is variable size */ | |
35 | struct bio_vec bi_inline_vecs[0]; | |
36 | }; | |
37 | ||
38 | struct moving_context { | |
39 | /* Closure for waiting on all reads and writes to complete */ | |
40 | struct closure cl; | |
41 | ||
42 | struct bch_move_stats *stats; | |
43 | ||
44 | struct list_head reads; | |
45 | ||
46 | /* in flight sectors: */ | |
47 | atomic_t read_sectors; | |
48 | atomic_t write_sectors; | |
49 | ||
50 | wait_queue_head_t wait; | |
51 | }; | |
52 | ||
53 | static int bch2_migrate_index_update(struct bch_write_op *op) | |
54 | { | |
55 | struct bch_fs *c = op->c; | |
56 | struct migrate_write *m = | |
57 | container_of(op, struct migrate_write, op); | |
58 | struct keylist *keys = &op->insert_keys; | |
59 | struct btree_iter iter; | |
60 | int ret = 0; | |
61 | ||
62 | bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, | |
63 | bkey_start_pos(&bch2_keylist_front(keys)->k), | |
64 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
65 | ||
66 | while (1) { | |
67 | struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); | |
68 | struct bkey_i_extent *insert, *new = | |
69 | bkey_i_to_extent(bch2_keylist_front(keys)); | |
70 | BKEY_PADDED(k) _new, _insert; | |
1742237b KO |
71 | const union bch_extent_entry *entry; |
72 | struct extent_ptr_decoded p; | |
1c6fdbd8 KO |
73 | bool did_work = false; |
74 | int nr; | |
75 | ||
76 | if (btree_iter_err(k)) { | |
77 | ret = bch2_btree_iter_unlock(&iter); | |
78 | break; | |
79 | } | |
80 | ||
81 | if (bversion_cmp(k.k->version, new->k.version) || | |
82 | !bkey_extent_is_data(k.k) || | |
83 | !bch2_extent_matches_ptr(c, bkey_s_c_to_extent(k), | |
84 | m->ptr, m->offset)) | |
85 | goto nomatch; | |
86 | ||
87 | if (m->data_cmd == DATA_REWRITE && | |
88 | !bch2_extent_has_device(bkey_s_c_to_extent(k), | |
89 | m->data_opts.rewrite_dev)) | |
90 | goto nomatch; | |
91 | ||
92 | bkey_reassemble(&_insert.k, k); | |
93 | insert = bkey_i_to_extent(&_insert.k); | |
94 | ||
95 | bkey_copy(&_new.k, bch2_keylist_front(keys)); | |
96 | new = bkey_i_to_extent(&_new.k); | |
97 | ||
98 | bch2_cut_front(iter.pos, &insert->k_i); | |
99 | bch2_cut_back(new->k.p, &insert->k); | |
100 | bch2_cut_back(insert->k.p, &new->k); | |
101 | ||
a2753581 | 102 | if (m->data_cmd == DATA_REWRITE) |
26609b61 KO |
103 | bch2_bkey_drop_device(extent_i_to_s(insert).s, |
104 | m->data_opts.rewrite_dev); | |
1c6fdbd8 | 105 | |
1742237b KO |
106 | extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) { |
107 | if (bch2_extent_has_device(extent_i_to_s_c(insert), p.ptr.dev)) { | |
1c6fdbd8 KO |
108 | /* |
109 | * raced with another move op? extent already | |
110 | * has a pointer to the device we just wrote | |
111 | * data to | |
112 | */ | |
113 | continue; | |
114 | } | |
115 | ||
71c9e0ba | 116 | bch2_extent_ptr_decoded_append(insert, &p); |
1c6fdbd8 KO |
117 | did_work = true; |
118 | } | |
119 | ||
120 | if (!did_work) | |
121 | goto nomatch; | |
122 | ||
123 | bch2_extent_narrow_crcs(insert, | |
124 | (struct bch_extent_crc_unpacked) { 0 }); | |
125 | bch2_extent_normalize(c, extent_i_to_s(insert).s); | |
126 | bch2_extent_mark_replicas_cached(c, extent_i_to_s(insert), | |
127 | op->opts.background_target, | |
128 | op->opts.data_replicas); | |
129 | ||
130 | /* | |
db636adb KO |
131 | * If we're not fully overwriting @k, and it's compressed, we |
132 | * need a reservation for all the pointers in @insert | |
1c6fdbd8 | 133 | */ |
26609b61 | 134 | nr = bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(&insert->k_i)) - |
db636adb KO |
135 | m->nr_ptrs_reserved; |
136 | ||
137 | if (insert->k.size < k.k->size && | |
138 | bch2_extent_is_compressed(k) && | |
139 | nr > 0) { | |
1c6fdbd8 KO |
140 | /* |
141 | * can't call bch2_disk_reservation_add() with btree | |
142 | * locks held, at least not without a song and dance | |
143 | */ | |
144 | bch2_btree_iter_unlock(&iter); | |
145 | ||
146 | ret = bch2_disk_reservation_add(c, &op->res, | |
147 | keylist_sectors(keys) * nr, 0); | |
148 | if (ret) | |
149 | goto out; | |
150 | ||
151 | m->nr_ptrs_reserved += nr; | |
152 | goto next; | |
153 | } | |
154 | ||
1c6fdbd8 | 155 | ret = bch2_btree_insert_at(c, &op->res, |
fc3268c1 | 156 | op_journal_seq(op), |
1c6fdbd8 KO |
157 | BTREE_INSERT_ATOMIC| |
158 | BTREE_INSERT_NOFAIL| | |
159 | BTREE_INSERT_USE_RESERVE| | |
160 | m->data_opts.btree_insert_flags, | |
161 | BTREE_INSERT_ENTRY(&iter, &insert->k_i)); | |
162 | if (!ret) | |
163 | atomic_long_inc(&c->extent_migrate_done); | |
164 | if (ret == -EINTR) | |
165 | ret = 0; | |
166 | if (ret) | |
167 | break; | |
168 | next: | |
169 | while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { | |
170 | bch2_keylist_pop_front(keys); | |
171 | if (bch2_keylist_empty(keys)) | |
172 | goto out; | |
173 | } | |
174 | ||
175 | bch2_cut_front(iter.pos, bch2_keylist_front(keys)); | |
176 | continue; | |
177 | nomatch: | |
178 | if (m->ctxt) | |
179 | atomic64_add(k.k->p.offset - iter.pos.offset, | |
180 | &m->ctxt->stats->sectors_raced); | |
181 | atomic_long_inc(&c->extent_migrate_raced); | |
182 | trace_move_race(&new->k); | |
183 | bch2_btree_iter_next_slot(&iter); | |
184 | goto next; | |
185 | } | |
186 | out: | |
187 | bch2_btree_iter_unlock(&iter); | |
188 | return ret; | |
189 | } | |
190 | ||
191 | void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio) | |
192 | { | |
193 | /* write bio must own pages: */ | |
194 | BUG_ON(!m->op.wbio.bio.bi_vcnt); | |
195 | ||
196 | m->ptr = rbio->pick.ptr; | |
197 | m->offset = rbio->pos.offset - rbio->pick.crc.offset; | |
198 | m->op.devs_have = rbio->devs_have; | |
199 | m->op.pos = rbio->pos; | |
200 | m->op.version = rbio->version; | |
201 | m->op.crc = rbio->pick.crc; | |
202 | m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; | |
203 | ||
204 | if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) { | |
205 | m->op.nonce = m->op.crc.nonce + m->op.crc.offset; | |
206 | m->op.csum_type = m->op.crc.csum_type; | |
207 | } | |
208 | ||
209 | if (m->data_cmd == DATA_REWRITE) | |
210 | bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev); | |
211 | } | |
212 | ||
213 | int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, | |
214 | struct write_point_specifier wp, | |
215 | struct bch_io_opts io_opts, | |
216 | enum data_cmd data_cmd, | |
217 | struct data_opts data_opts, | |
218 | struct bkey_s_c k) | |
219 | { | |
220 | int ret; | |
221 | ||
222 | m->data_cmd = data_cmd; | |
223 | m->data_opts = data_opts; | |
224 | m->nr_ptrs_reserved = 0; | |
225 | ||
226 | bch2_write_op_init(&m->op, c, io_opts); | |
227 | m->op.compression_type = | |
228 | bch2_compression_opt_to_type[io_opts.background_compression ?: | |
229 | io_opts.compression]; | |
230 | m->op.target = data_opts.target, | |
231 | m->op.write_point = wp; | |
232 | ||
233 | if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) | |
234 | m->op.alloc_reserve = RESERVE_MOVINGGC; | |
235 | ||
236 | m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS| | |
237 | BCH_WRITE_PAGES_STABLE| | |
238 | BCH_WRITE_PAGES_OWNED| | |
1d25849c | 239 | BCH_WRITE_DATA_ENCODED; |
1c6fdbd8 KO |
240 | |
241 | m->op.nr_replicas = 1; | |
242 | m->op.nr_replicas_required = 1; | |
243 | m->op.index_update_fn = bch2_migrate_index_update; | |
244 | ||
245 | switch (data_cmd) { | |
246 | case DATA_ADD_REPLICAS: { | |
db636adb KO |
247 | /* |
248 | * DATA_ADD_REPLICAS is used for moving data to a different | |
249 | * device in the background, and due to compression the new copy | |
250 | * might take up more space than the old copy: | |
251 | */ | |
252 | #if 0 | |
1c6fdbd8 | 253 | int nr = (int) io_opts.data_replicas - |
26609b61 | 254 | bch2_bkey_nr_dirty_ptrs(k); |
db636adb KO |
255 | #endif |
256 | int nr = (int) io_opts.data_replicas; | |
1c6fdbd8 KO |
257 | |
258 | if (nr > 0) { | |
259 | m->op.nr_replicas = m->nr_ptrs_reserved = nr; | |
260 | ||
261 | ret = bch2_disk_reservation_get(c, &m->op.res, | |
262 | k.k->size, m->op.nr_replicas, 0); | |
263 | if (ret) | |
264 | return ret; | |
265 | } | |
266 | break; | |
267 | } | |
4628529f KO |
268 | case DATA_REWRITE: { |
269 | const union bch_extent_entry *entry; | |
270 | struct extent_ptr_decoded p; | |
271 | unsigned compressed_sectors = 0; | |
272 | ||
273 | extent_for_each_ptr_decode(bkey_s_c_to_extent(k), p, entry) | |
274 | if (!p.ptr.cached && | |
275 | p.crc.compression_type != BCH_COMPRESSION_NONE && | |
276 | bch2_dev_in_target(c, p.ptr.dev, data_opts.target)) | |
277 | compressed_sectors += p.crc.compressed_size; | |
278 | ||
279 | if (compressed_sectors) { | |
280 | ret = bch2_disk_reservation_add(c, &m->op.res, | |
281 | compressed_sectors, | |
282 | BCH_DISK_RESERVATION_NOFAIL); | |
283 | if (ret) | |
284 | return ret; | |
285 | } | |
1c6fdbd8 | 286 | break; |
4628529f | 287 | } |
1c6fdbd8 KO |
288 | case DATA_PROMOTE: |
289 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; | |
290 | m->op.flags |= BCH_WRITE_CACHED; | |
291 | break; | |
292 | default: | |
293 | BUG(); | |
294 | } | |
295 | ||
296 | return 0; | |
297 | } | |
298 | ||
299 | static void move_free(struct closure *cl) | |
300 | { | |
301 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
302 | struct moving_context *ctxt = io->write.ctxt; | |
303 | struct bvec_iter_all iter; | |
304 | struct bio_vec *bv; | |
305 | ||
306 | bch2_disk_reservation_put(io->write.op.c, &io->write.op.res); | |
307 | ||
308 | bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter) | |
309 | if (bv->bv_page) | |
310 | __free_page(bv->bv_page); | |
311 | ||
312 | wake_up(&ctxt->wait); | |
313 | ||
314 | kfree(io); | |
315 | } | |
316 | ||
317 | static void move_write_done(struct closure *cl) | |
318 | { | |
319 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
320 | ||
321 | atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); | |
322 | closure_return_with_destructor(cl, move_free); | |
323 | } | |
324 | ||
325 | static void move_write(struct closure *cl) | |
326 | { | |
327 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
328 | ||
329 | if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { | |
330 | closure_return_with_destructor(cl, move_free); | |
331 | return; | |
332 | } | |
333 | ||
334 | bch2_migrate_read_done(&io->write, &io->rbio); | |
335 | ||
336 | atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); | |
337 | closure_call(&io->write.op.cl, bch2_write, NULL, cl); | |
338 | continue_at(cl, move_write_done, NULL); | |
339 | } | |
340 | ||
341 | static inline struct moving_io *next_pending_write(struct moving_context *ctxt) | |
342 | { | |
343 | struct moving_io *io = | |
344 | list_first_entry_or_null(&ctxt->reads, struct moving_io, list); | |
345 | ||
346 | return io && io->read_completed ? io : NULL; | |
347 | } | |
348 | ||
349 | static void move_read_endio(struct bio *bio) | |
350 | { | |
351 | struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); | |
352 | struct moving_context *ctxt = io->write.ctxt; | |
353 | ||
354 | atomic_sub(io->read_sectors, &ctxt->read_sectors); | |
355 | io->read_completed = true; | |
356 | ||
357 | if (next_pending_write(ctxt)) | |
358 | wake_up(&ctxt->wait); | |
359 | ||
360 | closure_put(&ctxt->cl); | |
361 | } | |
362 | ||
363 | static void do_pending_writes(struct moving_context *ctxt) | |
364 | { | |
365 | struct moving_io *io; | |
366 | ||
367 | while ((io = next_pending_write(ctxt))) { | |
368 | list_del(&io->list); | |
369 | closure_call(&io->cl, move_write, NULL, &ctxt->cl); | |
370 | } | |
371 | } | |
372 | ||
373 | #define move_ctxt_wait_event(_ctxt, _cond) \ | |
374 | do { \ | |
375 | do_pending_writes(_ctxt); \ | |
376 | \ | |
377 | if (_cond) \ | |
378 | break; \ | |
379 | __wait_event((_ctxt)->wait, \ | |
380 | next_pending_write(_ctxt) || (_cond)); \ | |
381 | } while (1) | |
382 | ||
383 | static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) | |
384 | { | |
385 | unsigned sectors_pending = atomic_read(&ctxt->write_sectors); | |
386 | ||
387 | move_ctxt_wait_event(ctxt, | |
388 | !atomic_read(&ctxt->write_sectors) || | |
389 | atomic_read(&ctxt->write_sectors) != sectors_pending); | |
390 | } | |
391 | ||
392 | static int bch2_move_extent(struct bch_fs *c, | |
393 | struct moving_context *ctxt, | |
394 | struct write_point_specifier wp, | |
395 | struct bch_io_opts io_opts, | |
396 | struct bkey_s_c_extent e, | |
397 | enum data_cmd data_cmd, | |
398 | struct data_opts data_opts) | |
399 | { | |
400 | struct moving_io *io; | |
1742237b KO |
401 | const union bch_extent_entry *entry; |
402 | struct extent_ptr_decoded p; | |
1c6fdbd8 KO |
403 | unsigned sectors = e.k->size, pages; |
404 | int ret = -ENOMEM; | |
405 | ||
406 | move_ctxt_wait_event(ctxt, | |
407 | atomic_read(&ctxt->write_sectors) < | |
408 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
409 | ||
410 | move_ctxt_wait_event(ctxt, | |
411 | atomic_read(&ctxt->read_sectors) < | |
412 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
413 | ||
414 | /* write path might have to decompress data: */ | |
1742237b KO |
415 | extent_for_each_ptr_decode(e, p, entry) |
416 | sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); | |
1c6fdbd8 KO |
417 | |
418 | pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); | |
419 | io = kzalloc(sizeof(struct moving_io) + | |
420 | sizeof(struct bio_vec) * pages, GFP_KERNEL); | |
421 | if (!io) | |
422 | goto err; | |
423 | ||
424 | io->write.ctxt = ctxt; | |
425 | io->read_sectors = e.k->size; | |
426 | io->write_sectors = e.k->size; | |
427 | ||
428 | bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); | |
429 | bio_set_prio(&io->write.op.wbio.bio, | |
430 | IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
431 | ||
432 | if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, | |
433 | GFP_KERNEL)) | |
434 | goto err_free; | |
435 | ||
436 | io->rbio.opts = io_opts; | |
437 | bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); | |
438 | io->rbio.bio.bi_vcnt = pages; | |
439 | bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
440 | io->rbio.bio.bi_iter.bi_size = sectors << 9; | |
441 | ||
442 | io->rbio.bio.bi_opf = REQ_OP_READ; | |
443 | io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(e.k); | |
444 | io->rbio.bio.bi_end_io = move_read_endio; | |
445 | ||
446 | ret = bch2_migrate_write_init(c, &io->write, wp, io_opts, | |
447 | data_cmd, data_opts, e.s_c); | |
448 | if (ret) | |
449 | goto err_free_pages; | |
450 | ||
451 | atomic64_inc(&ctxt->stats->keys_moved); | |
452 | atomic64_add(e.k->size, &ctxt->stats->sectors_moved); | |
453 | ||
454 | trace_move_extent(e.k); | |
455 | ||
456 | atomic_add(io->read_sectors, &ctxt->read_sectors); | |
457 | list_add_tail(&io->list, &ctxt->reads); | |
458 | ||
459 | /* | |
460 | * dropped by move_read_endio() - guards against use after free of | |
461 | * ctxt when doing wakeup | |
462 | */ | |
463 | closure_get(&ctxt->cl); | |
464 | bch2_read_extent(c, &io->rbio, e.s_c, | |
465 | BCH_READ_NODECODE| | |
466 | BCH_READ_LAST_FRAGMENT); | |
467 | return 0; | |
468 | err_free_pages: | |
469 | bio_free_pages(&io->write.op.wbio.bio); | |
470 | err_free: | |
471 | kfree(io); | |
472 | err: | |
473 | trace_move_alloc_fail(e.k); | |
474 | return ret; | |
475 | } | |
476 | ||
477 | int bch2_move_data(struct bch_fs *c, | |
478 | struct bch_ratelimit *rate, | |
479 | struct write_point_specifier wp, | |
480 | struct bpos start, | |
481 | struct bpos end, | |
482 | move_pred_fn pred, void *arg, | |
483 | struct bch_move_stats *stats) | |
484 | { | |
485 | bool kthread = (current->flags & PF_KTHREAD) != 0; | |
486 | struct moving_context ctxt = { .stats = stats }; | |
487 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); | |
488 | BKEY_PADDED(k) tmp; | |
489 | struct bkey_s_c k; | |
1c6fdbd8 KO |
490 | struct data_opts data_opts; |
491 | enum data_cmd data_cmd; | |
c2fcff59 | 492 | u64 delay, cur_inum = U64_MAX; |
1c6fdbd8 KO |
493 | int ret = 0, ret2; |
494 | ||
495 | closure_init_stack(&ctxt.cl); | |
496 | INIT_LIST_HEAD(&ctxt.reads); | |
497 | init_waitqueue_head(&ctxt.wait); | |
498 | ||
499 | stats->data_type = BCH_DATA_USER; | |
500 | bch2_btree_iter_init(&stats->iter, c, BTREE_ID_EXTENTS, start, | |
501 | BTREE_ITER_PREFETCH); | |
502 | ||
503 | if (rate) | |
504 | bch2_ratelimit_reset(rate); | |
505 | ||
c2fcff59 KO |
506 | while (1) { |
507 | do { | |
508 | delay = rate ? bch2_ratelimit_delay(rate) : 0; | |
509 | ||
510 | if (delay) { | |
511 | bch2_btree_iter_unlock(&stats->iter); | |
512 | set_current_state(TASK_INTERRUPTIBLE); | |
513 | } | |
514 | ||
515 | if (kthread && (ret = kthread_should_stop())) { | |
516 | __set_current_state(TASK_RUNNING); | |
517 | goto out; | |
518 | } | |
519 | ||
520 | if (delay) | |
521 | schedule_timeout(delay); | |
522 | ||
523 | if (unlikely(freezing(current))) { | |
524 | bch2_btree_iter_unlock(&stats->iter); | |
525 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); | |
c2fcff59 KO |
526 | try_to_freeze(); |
527 | } | |
528 | } while (delay); | |
1c6fdbd8 KO |
529 | peek: |
530 | k = bch2_btree_iter_peek(&stats->iter); | |
531 | if (!k.k) | |
532 | break; | |
533 | ret = btree_iter_err(k); | |
534 | if (ret) | |
535 | break; | |
536 | if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) | |
537 | break; | |
538 | ||
539 | if (!bkey_extent_is_data(k.k)) | |
540 | goto next_nondata; | |
541 | ||
1c6fdbd8 KO |
542 | if (cur_inum != k.k->p.inode) { |
543 | struct bch_inode_unpacked inode; | |
544 | ||
545 | /* don't hold btree locks while looking up inode: */ | |
546 | bch2_btree_iter_unlock(&stats->iter); | |
547 | ||
548 | io_opts = bch2_opts_to_inode_opts(c->opts); | |
549 | if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode)) | |
550 | bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode)); | |
551 | cur_inum = k.k->p.inode; | |
552 | goto peek; | |
553 | } | |
554 | ||
26609b61 | 555 | switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) { |
1c6fdbd8 KO |
556 | case DATA_SKIP: |
557 | goto next; | |
558 | case DATA_SCRUB: | |
559 | BUG(); | |
560 | case DATA_ADD_REPLICAS: | |
561 | case DATA_REWRITE: | |
562 | case DATA_PROMOTE: | |
563 | break; | |
564 | default: | |
565 | BUG(); | |
566 | } | |
567 | ||
568 | /* unlock before doing IO: */ | |
569 | bkey_reassemble(&tmp.k, k); | |
570 | k = bkey_i_to_s_c(&tmp.k); | |
571 | bch2_btree_iter_unlock(&stats->iter); | |
572 | ||
573 | ret2 = bch2_move_extent(c, &ctxt, wp, io_opts, | |
574 | bkey_s_c_to_extent(k), | |
575 | data_cmd, data_opts); | |
576 | if (ret2) { | |
577 | if (ret2 == -ENOMEM) { | |
578 | /* memory allocation failure, wait for some IO to finish */ | |
579 | bch2_move_ctxt_wait_for_io(&ctxt); | |
580 | continue; | |
581 | } | |
582 | ||
583 | /* XXX signal failure */ | |
584 | goto next; | |
585 | } | |
586 | ||
587 | if (rate) | |
588 | bch2_ratelimit_increment(rate, k.k->size); | |
589 | next: | |
26609b61 | 590 | atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k), |
1c6fdbd8 KO |
591 | &stats->sectors_seen); |
592 | next_nondata: | |
593 | bch2_btree_iter_next(&stats->iter); | |
594 | bch2_btree_iter_cond_resched(&stats->iter); | |
595 | } | |
c2fcff59 | 596 | out: |
1c6fdbd8 KO |
597 | bch2_btree_iter_unlock(&stats->iter); |
598 | ||
599 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); | |
600 | closure_sync(&ctxt.cl); | |
601 | ||
602 | EBUG_ON(atomic_read(&ctxt.write_sectors)); | |
603 | ||
604 | trace_move_data(c, | |
605 | atomic64_read(&stats->sectors_moved), | |
606 | atomic64_read(&stats->keys_moved)); | |
607 | ||
608 | return ret; | |
609 | } | |
610 | ||
611 | static int bch2_gc_data_replicas(struct bch_fs *c) | |
612 | { | |
613 | struct btree_iter iter; | |
614 | struct bkey_s_c k; | |
615 | int ret; | |
616 | ||
617 | mutex_lock(&c->replicas_gc_lock); | |
618 | bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED)); | |
619 | ||
620 | for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, | |
621 | BTREE_ITER_PREFETCH, k) { | |
26609b61 | 622 | ret = bch2_mark_bkey_replicas(c, k); |
1c6fdbd8 KO |
623 | if (ret) |
624 | break; | |
625 | } | |
626 | ret = bch2_btree_iter_unlock(&iter) ?: ret; | |
627 | ||
628 | bch2_replicas_gc_end(c, ret); | |
629 | mutex_unlock(&c->replicas_gc_lock); | |
630 | ||
631 | return ret; | |
632 | } | |
633 | ||
634 | static int bch2_gc_btree_replicas(struct bch_fs *c) | |
635 | { | |
636 | struct btree_iter iter; | |
637 | struct btree *b; | |
638 | unsigned id; | |
639 | int ret = 0; | |
640 | ||
641 | mutex_lock(&c->replicas_gc_lock); | |
642 | bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE); | |
643 | ||
644 | for (id = 0; id < BTREE_ID_NR; id++) { | |
645 | for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { | |
26609b61 | 646 | ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&b->key)); |
1c6fdbd8 KO |
647 | |
648 | bch2_btree_iter_cond_resched(&iter); | |
649 | } | |
650 | ||
651 | ret = bch2_btree_iter_unlock(&iter) ?: ret; | |
652 | } | |
653 | ||
654 | bch2_replicas_gc_end(c, ret); | |
655 | mutex_unlock(&c->replicas_gc_lock); | |
656 | ||
657 | return ret; | |
658 | } | |
659 | ||
660 | static int bch2_move_btree(struct bch_fs *c, | |
661 | move_pred_fn pred, | |
662 | void *arg, | |
663 | struct bch_move_stats *stats) | |
664 | { | |
665 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); | |
666 | struct btree *b; | |
667 | unsigned id; | |
668 | struct data_opts data_opts; | |
669 | enum data_cmd cmd; | |
670 | int ret = 0; | |
671 | ||
672 | stats->data_type = BCH_DATA_BTREE; | |
673 | ||
674 | for (id = 0; id < BTREE_ID_NR; id++) { | |
675 | for_each_btree_node(&stats->iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { | |
26609b61 KO |
676 | switch ((cmd = pred(c, arg, |
677 | bkey_i_to_s_c(&b->key), | |
678 | &io_opts, &data_opts))) { | |
1c6fdbd8 KO |
679 | case DATA_SKIP: |
680 | goto next; | |
681 | case DATA_SCRUB: | |
682 | BUG(); | |
683 | case DATA_ADD_REPLICAS: | |
684 | case DATA_REWRITE: | |
685 | break; | |
686 | default: | |
687 | BUG(); | |
688 | } | |
689 | ||
690 | ret = bch2_btree_node_rewrite(c, &stats->iter, | |
691 | b->data->keys.seq, 0) ?: ret; | |
692 | next: | |
693 | bch2_btree_iter_cond_resched(&stats->iter); | |
694 | } | |
695 | ||
696 | ret = bch2_btree_iter_unlock(&stats->iter) ?: ret; | |
697 | } | |
698 | ||
699 | return ret; | |
700 | } | |
701 | ||
702 | #if 0 | |
703 | static enum data_cmd scrub_pred(struct bch_fs *c, void *arg, | |
26609b61 | 704 | struct bkey_s_c k, |
1c6fdbd8 KO |
705 | struct bch_io_opts *io_opts, |
706 | struct data_opts *data_opts) | |
707 | { | |
708 | return DATA_SCRUB; | |
709 | } | |
710 | #endif | |
711 | ||
712 | static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, | |
26609b61 | 713 | struct bkey_s_c k, |
1c6fdbd8 KO |
714 | struct bch_io_opts *io_opts, |
715 | struct data_opts *data_opts) | |
716 | { | |
26609b61 KO |
717 | unsigned nr_good = bch2_bkey_durability(c, k); |
718 | unsigned replicas = 0; | |
719 | ||
720 | switch (k.k->type) { | |
721 | case KEY_TYPE_btree_ptr: | |
722 | replicas = c->opts.metadata_replicas; | |
723 | break; | |
724 | case KEY_TYPE_extent: | |
725 | replicas = io_opts->data_replicas; | |
726 | break; | |
727 | } | |
1c6fdbd8 KO |
728 | |
729 | if (!nr_good || nr_good >= replicas) | |
730 | return DATA_SKIP; | |
731 | ||
732 | data_opts->target = 0; | |
26609b61 | 733 | data_opts->btree_insert_flags = 0; |
1c6fdbd8 KO |
734 | return DATA_ADD_REPLICAS; |
735 | } | |
736 | ||
737 | static enum data_cmd migrate_pred(struct bch_fs *c, void *arg, | |
26609b61 | 738 | struct bkey_s_c k, |
1c6fdbd8 KO |
739 | struct bch_io_opts *io_opts, |
740 | struct data_opts *data_opts) | |
741 | { | |
742 | struct bch_ioctl_data *op = arg; | |
743 | ||
26609b61 | 744 | if (!bch2_bkey_has_device(k, op->migrate.dev)) |
1c6fdbd8 KO |
745 | return DATA_SKIP; |
746 | ||
747 | data_opts->target = 0; | |
748 | data_opts->btree_insert_flags = 0; | |
749 | data_opts->rewrite_dev = op->migrate.dev; | |
750 | return DATA_REWRITE; | |
751 | } | |
752 | ||
753 | int bch2_data_job(struct bch_fs *c, | |
754 | struct bch_move_stats *stats, | |
755 | struct bch_ioctl_data op) | |
756 | { | |
757 | int ret = 0; | |
758 | ||
759 | switch (op.op) { | |
760 | case BCH_DATA_OP_REREPLICATE: | |
761 | stats->data_type = BCH_DATA_JOURNAL; | |
762 | ret = bch2_journal_flush_device_pins(&c->journal, -1); | |
763 | ||
764 | ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret; | |
765 | ret = bch2_gc_btree_replicas(c) ?: ret; | |
766 | ||
767 | ret = bch2_move_data(c, NULL, | |
768 | writepoint_hashed((unsigned long) current), | |
769 | op.start, | |
770 | op.end, | |
771 | rereplicate_pred, c, stats) ?: ret; | |
772 | ret = bch2_gc_data_replicas(c) ?: ret; | |
773 | break; | |
774 | case BCH_DATA_OP_MIGRATE: | |
775 | if (op.migrate.dev >= c->sb.nr_devices) | |
776 | return -EINVAL; | |
777 | ||
778 | stats->data_type = BCH_DATA_JOURNAL; | |
779 | ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); | |
780 | ||
781 | ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret; | |
782 | ret = bch2_gc_btree_replicas(c) ?: ret; | |
783 | ||
784 | ret = bch2_move_data(c, NULL, | |
785 | writepoint_hashed((unsigned long) current), | |
786 | op.start, | |
787 | op.end, | |
788 | migrate_pred, &op, stats) ?: ret; | |
789 | ret = bch2_gc_data_replicas(c) ?: ret; | |
790 | break; | |
791 | default: | |
792 | ret = -EINVAL; | |
793 | } | |
794 | ||
795 | return ret; | |
796 | } |