Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include "bcachefs.h" | |
7b3f84ea | 4 | #include "alloc_foreground.h" |
35189e09 | 5 | #include "bkey_on_stack.h" |
1c6fdbd8 KO |
6 | #include "btree_gc.h" |
7 | #include "btree_update.h" | |
7ef2a73a | 8 | #include "btree_update_interior.h" |
1c6fdbd8 | 9 | #include "buckets.h" |
4628529f | 10 | #include "disk_groups.h" |
1c6fdbd8 KO |
11 | #include "inode.h" |
12 | #include "io.h" | |
13 | #include "journal_reclaim.h" | |
14 | #include "keylist.h" | |
15 | #include "move.h" | |
16 | #include "replicas.h" | |
17 | #include "super-io.h" | |
18 | #include "trace.h" | |
19 | ||
20 | #include <linux/ioprio.h> | |
21 | #include <linux/kthread.h> | |
22 | ||
23 | #define SECTORS_IN_FLIGHT_PER_DEVICE 2048 | |
24 | ||
25 | struct moving_io { | |
26 | struct list_head list; | |
27 | struct closure cl; | |
28 | bool read_completed; | |
29 | ||
30 | unsigned read_sectors; | |
31 | unsigned write_sectors; | |
32 | ||
33 | struct bch_read_bio rbio; | |
34 | ||
35 | struct migrate_write write; | |
36 | /* Must be last since it is variable size */ | |
37 | struct bio_vec bi_inline_vecs[0]; | |
38 | }; | |
39 | ||
40 | struct moving_context { | |
41 | /* Closure for waiting on all reads and writes to complete */ | |
42 | struct closure cl; | |
43 | ||
44 | struct bch_move_stats *stats; | |
45 | ||
46 | struct list_head reads; | |
47 | ||
48 | /* in flight sectors: */ | |
49 | atomic_t read_sectors; | |
50 | atomic_t write_sectors; | |
51 | ||
52 | wait_queue_head_t wait; | |
53 | }; | |
54 | ||
55 | static int bch2_migrate_index_update(struct bch_write_op *op) | |
56 | { | |
57 | struct bch_fs *c = op->c; | |
0564b167 KO |
58 | struct btree_trans trans; |
59 | struct btree_iter *iter; | |
1c6fdbd8 KO |
60 | struct migrate_write *m = |
61 | container_of(op, struct migrate_write, op); | |
62 | struct keylist *keys = &op->insert_keys; | |
1c6fdbd8 KO |
63 | int ret = 0; |
64 | ||
20bceecb | 65 | bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); |
0564b167 | 66 | |
76426098 | 67 | iter = bch2_trans_get_iter(&trans, m->btree_id, |
0564b167 KO |
68 | bkey_start_pos(&bch2_keylist_front(keys)->k), |
69 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
1c6fdbd8 KO |
70 | |
71 | while (1) { | |
0564b167 | 72 | struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); |
76426098 KO |
73 | struct bkey_i *insert; |
74 | struct bkey_i_extent *new = | |
1c6fdbd8 KO |
75 | bkey_i_to_extent(bch2_keylist_front(keys)); |
76 | BKEY_PADDED(k) _new, _insert; | |
1742237b KO |
77 | const union bch_extent_entry *entry; |
78 | struct extent_ptr_decoded p; | |
1c6fdbd8 KO |
79 | bool did_work = false; |
80 | int nr; | |
81 | ||
0f238367 | 82 | ret = bkey_err(k); |
0564b167 | 83 | if (ret) |
1c6fdbd8 | 84 | break; |
1c6fdbd8 KO |
85 | |
86 | if (bversion_cmp(k.k->version, new->k.version) || | |
99aaf570 | 87 | !bch2_bkey_matches_ptr(c, k, m->ptr, m->offset)) |
1c6fdbd8 KO |
88 | goto nomatch; |
89 | ||
90 | if (m->data_cmd == DATA_REWRITE && | |
76426098 | 91 | !bch2_bkey_has_device(k, m->data_opts.rewrite_dev)) |
1c6fdbd8 KO |
92 | goto nomatch; |
93 | ||
94 | bkey_reassemble(&_insert.k, k); | |
76426098 | 95 | insert = &_insert.k; |
1c6fdbd8 KO |
96 | |
97 | bkey_copy(&_new.k, bch2_keylist_front(keys)); | |
98 | new = bkey_i_to_extent(&_new.k); | |
085ab693 | 99 | bch2_cut_front(iter->pos, &new->k_i); |
1c6fdbd8 | 100 | |
085ab693 KO |
101 | bch2_cut_front(iter->pos, insert); |
102 | bch2_cut_back(new->k.p, insert); | |
103 | bch2_cut_back(insert->k.p, &new->k_i); | |
1c6fdbd8 | 104 | |
a2753581 | 105 | if (m->data_cmd == DATA_REWRITE) |
76426098 | 106 | bch2_bkey_drop_device(bkey_i_to_s(insert), |
26609b61 | 107 | m->data_opts.rewrite_dev); |
1c6fdbd8 | 108 | |
1742237b | 109 | extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) { |
76426098 | 110 | if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) { |
1c6fdbd8 KO |
111 | /* |
112 | * raced with another move op? extent already | |
113 | * has a pointer to the device we just wrote | |
114 | * data to | |
115 | */ | |
116 | continue; | |
117 | } | |
118 | ||
76426098 | 119 | bch2_extent_ptr_decoded_append(insert, &p); |
1c6fdbd8 KO |
120 | did_work = true; |
121 | } | |
122 | ||
123 | if (!did_work) | |
124 | goto nomatch; | |
125 | ||
76426098 | 126 | bch2_bkey_narrow_crcs(insert, |
1c6fdbd8 | 127 | (struct bch_extent_crc_unpacked) { 0 }); |
76426098 KO |
128 | bch2_extent_normalize(c, bkey_i_to_s(insert)); |
129 | bch2_bkey_mark_replicas_cached(c, bkey_i_to_s(insert), | |
130 | op->opts.background_target, | |
131 | op->opts.data_replicas); | |
1c6fdbd8 KO |
132 | |
133 | /* | |
db636adb KO |
134 | * If we're not fully overwriting @k, and it's compressed, we |
135 | * need a reservation for all the pointers in @insert | |
1c6fdbd8 | 136 | */ |
4de77495 | 137 | nr = bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(insert)) - |
db636adb KO |
138 | m->nr_ptrs_reserved; |
139 | ||
140 | if (insert->k.size < k.k->size && | |
4de77495 | 141 | bch2_bkey_sectors_compressed(k) && |
db636adb | 142 | nr > 0) { |
1c6fdbd8 KO |
143 | ret = bch2_disk_reservation_add(c, &op->res, |
144 | keylist_sectors(keys) * nr, 0); | |
145 | if (ret) | |
146 | goto out; | |
147 | ||
148 | m->nr_ptrs_reserved += nr; | |
149 | goto next; | |
150 | } | |
151 | ||
2d594dfb | 152 | bch2_trans_update(&trans, iter, insert, 0); |
0564b167 KO |
153 | |
154 | ret = bch2_trans_commit(&trans, &op->res, | |
fc3268c1 | 155 | op_journal_seq(op), |
1c6fdbd8 KO |
156 | BTREE_INSERT_NOFAIL| |
157 | BTREE_INSERT_USE_RESERVE| | |
0564b167 | 158 | m->data_opts.btree_insert_flags); |
1c6fdbd8 KO |
159 | if (!ret) |
160 | atomic_long_inc(&c->extent_migrate_done); | |
161 | if (ret == -EINTR) | |
162 | ret = 0; | |
163 | if (ret) | |
164 | break; | |
165 | next: | |
0564b167 | 166 | while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) { |
1c6fdbd8 KO |
167 | bch2_keylist_pop_front(keys); |
168 | if (bch2_keylist_empty(keys)) | |
169 | goto out; | |
170 | } | |
1c6fdbd8 KO |
171 | continue; |
172 | nomatch: | |
173 | if (m->ctxt) | |
0564b167 | 174 | atomic64_add(k.k->p.offset - iter->pos.offset, |
1c6fdbd8 KO |
175 | &m->ctxt->stats->sectors_raced); |
176 | atomic_long_inc(&c->extent_migrate_raced); | |
177 | trace_move_race(&new->k); | |
0564b167 | 178 | bch2_btree_iter_next_slot(iter); |
1c6fdbd8 KO |
179 | goto next; |
180 | } | |
181 | out: | |
0564b167 | 182 | bch2_trans_exit(&trans); |
932aa837 | 183 | BUG_ON(ret == -EINTR); |
1c6fdbd8 KO |
184 | return ret; |
185 | } | |
186 | ||
187 | void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio) | |
188 | { | |
189 | /* write bio must own pages: */ | |
190 | BUG_ON(!m->op.wbio.bio.bi_vcnt); | |
191 | ||
192 | m->ptr = rbio->pick.ptr; | |
193 | m->offset = rbio->pos.offset - rbio->pick.crc.offset; | |
194 | m->op.devs_have = rbio->devs_have; | |
195 | m->op.pos = rbio->pos; | |
196 | m->op.version = rbio->version; | |
197 | m->op.crc = rbio->pick.crc; | |
198 | m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; | |
199 | ||
200 | if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) { | |
201 | m->op.nonce = m->op.crc.nonce + m->op.crc.offset; | |
202 | m->op.csum_type = m->op.crc.csum_type; | |
203 | } | |
204 | ||
205 | if (m->data_cmd == DATA_REWRITE) | |
206 | bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev); | |
207 | } | |
208 | ||
209 | int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, | |
210 | struct write_point_specifier wp, | |
211 | struct bch_io_opts io_opts, | |
212 | enum data_cmd data_cmd, | |
213 | struct data_opts data_opts, | |
76426098 | 214 | enum btree_id btree_id, |
1c6fdbd8 KO |
215 | struct bkey_s_c k) |
216 | { | |
ab05de4c KO |
217 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
218 | const union bch_extent_entry *entry; | |
219 | struct extent_ptr_decoded p; | |
1c6fdbd8 KO |
220 | int ret; |
221 | ||
76426098 | 222 | m->btree_id = btree_id; |
1c6fdbd8 KO |
223 | m->data_cmd = data_cmd; |
224 | m->data_opts = data_opts; | |
225 | m->nr_ptrs_reserved = 0; | |
226 | ||
227 | bch2_write_op_init(&m->op, c, io_opts); | |
ab05de4c KO |
228 | |
229 | if (!bch2_bkey_is_incompressible(k)) | |
230 | m->op.compression_type = | |
231 | bch2_compression_opt_to_type[io_opts.background_compression ?: | |
232 | io_opts.compression]; | |
233 | else | |
234 | m->op.incompressible = true; | |
235 | ||
1c6fdbd8 KO |
236 | m->op.target = data_opts.target, |
237 | m->op.write_point = wp; | |
238 | ||
239 | if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) | |
240 | m->op.alloc_reserve = RESERVE_MOVINGGC; | |
241 | ||
242 | m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS| | |
243 | BCH_WRITE_PAGES_STABLE| | |
244 | BCH_WRITE_PAGES_OWNED| | |
1d25849c | 245 | BCH_WRITE_DATA_ENCODED; |
1c6fdbd8 KO |
246 | |
247 | m->op.nr_replicas = 1; | |
248 | m->op.nr_replicas_required = 1; | |
249 | m->op.index_update_fn = bch2_migrate_index_update; | |
250 | ||
251 | switch (data_cmd) { | |
252 | case DATA_ADD_REPLICAS: { | |
db636adb KO |
253 | /* |
254 | * DATA_ADD_REPLICAS is used for moving data to a different | |
255 | * device in the background, and due to compression the new copy | |
256 | * might take up more space than the old copy: | |
257 | */ | |
258 | #if 0 | |
1c6fdbd8 | 259 | int nr = (int) io_opts.data_replicas - |
4de77495 | 260 | bch2_bkey_nr_ptrs_allocated(k); |
db636adb KO |
261 | #endif |
262 | int nr = (int) io_opts.data_replicas; | |
1c6fdbd8 KO |
263 | |
264 | if (nr > 0) { | |
265 | m->op.nr_replicas = m->nr_ptrs_reserved = nr; | |
266 | ||
267 | ret = bch2_disk_reservation_get(c, &m->op.res, | |
268 | k.k->size, m->op.nr_replicas, 0); | |
269 | if (ret) | |
270 | return ret; | |
271 | } | |
272 | break; | |
273 | } | |
4628529f | 274 | case DATA_REWRITE: { |
4628529f KO |
275 | unsigned compressed_sectors = 0; |
276 | ||
76426098 | 277 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
4628529f | 278 | if (!p.ptr.cached && |
ab05de4c | 279 | crc_is_compressed(p.crc) && |
4628529f KO |
280 | bch2_dev_in_target(c, p.ptr.dev, data_opts.target)) |
281 | compressed_sectors += p.crc.compressed_size; | |
282 | ||
283 | if (compressed_sectors) { | |
284 | ret = bch2_disk_reservation_add(c, &m->op.res, | |
285 | compressed_sectors, | |
286 | BCH_DISK_RESERVATION_NOFAIL); | |
287 | if (ret) | |
288 | return ret; | |
289 | } | |
1c6fdbd8 | 290 | break; |
4628529f | 291 | } |
1c6fdbd8 KO |
292 | case DATA_PROMOTE: |
293 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; | |
294 | m->op.flags |= BCH_WRITE_CACHED; | |
295 | break; | |
296 | default: | |
297 | BUG(); | |
298 | } | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
303 | static void move_free(struct closure *cl) | |
304 | { | |
305 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
306 | struct moving_context *ctxt = io->write.ctxt; | |
307 | struct bvec_iter_all iter; | |
308 | struct bio_vec *bv; | |
309 | ||
310 | bch2_disk_reservation_put(io->write.op.c, &io->write.op.res); | |
311 | ||
312 | bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter) | |
313 | if (bv->bv_page) | |
314 | __free_page(bv->bv_page); | |
315 | ||
316 | wake_up(&ctxt->wait); | |
317 | ||
318 | kfree(io); | |
319 | } | |
320 | ||
321 | static void move_write_done(struct closure *cl) | |
322 | { | |
323 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
324 | ||
325 | atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); | |
326 | closure_return_with_destructor(cl, move_free); | |
327 | } | |
328 | ||
329 | static void move_write(struct closure *cl) | |
330 | { | |
331 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
332 | ||
333 | if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { | |
334 | closure_return_with_destructor(cl, move_free); | |
335 | return; | |
336 | } | |
337 | ||
338 | bch2_migrate_read_done(&io->write, &io->rbio); | |
339 | ||
340 | atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); | |
341 | closure_call(&io->write.op.cl, bch2_write, NULL, cl); | |
342 | continue_at(cl, move_write_done, NULL); | |
343 | } | |
344 | ||
345 | static inline struct moving_io *next_pending_write(struct moving_context *ctxt) | |
346 | { | |
347 | struct moving_io *io = | |
348 | list_first_entry_or_null(&ctxt->reads, struct moving_io, list); | |
349 | ||
350 | return io && io->read_completed ? io : NULL; | |
351 | } | |
352 | ||
353 | static void move_read_endio(struct bio *bio) | |
354 | { | |
355 | struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); | |
356 | struct moving_context *ctxt = io->write.ctxt; | |
357 | ||
358 | atomic_sub(io->read_sectors, &ctxt->read_sectors); | |
359 | io->read_completed = true; | |
360 | ||
361 | if (next_pending_write(ctxt)) | |
362 | wake_up(&ctxt->wait); | |
363 | ||
364 | closure_put(&ctxt->cl); | |
365 | } | |
366 | ||
367 | static void do_pending_writes(struct moving_context *ctxt) | |
368 | { | |
369 | struct moving_io *io; | |
370 | ||
371 | while ((io = next_pending_write(ctxt))) { | |
372 | list_del(&io->list); | |
373 | closure_call(&io->cl, move_write, NULL, &ctxt->cl); | |
374 | } | |
375 | } | |
376 | ||
377 | #define move_ctxt_wait_event(_ctxt, _cond) \ | |
378 | do { \ | |
379 | do_pending_writes(_ctxt); \ | |
380 | \ | |
381 | if (_cond) \ | |
382 | break; \ | |
383 | __wait_event((_ctxt)->wait, \ | |
384 | next_pending_write(_ctxt) || (_cond)); \ | |
385 | } while (1) | |
386 | ||
387 | static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) | |
388 | { | |
389 | unsigned sectors_pending = atomic_read(&ctxt->write_sectors); | |
390 | ||
391 | move_ctxt_wait_event(ctxt, | |
392 | !atomic_read(&ctxt->write_sectors) || | |
393 | atomic_read(&ctxt->write_sectors) != sectors_pending); | |
394 | } | |
395 | ||
396 | static int bch2_move_extent(struct bch_fs *c, | |
397 | struct moving_context *ctxt, | |
398 | struct write_point_specifier wp, | |
399 | struct bch_io_opts io_opts, | |
76426098 | 400 | enum btree_id btree_id, |
99aaf570 | 401 | struct bkey_s_c k, |
1c6fdbd8 KO |
402 | enum data_cmd data_cmd, |
403 | struct data_opts data_opts) | |
404 | { | |
99aaf570 | 405 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
1c6fdbd8 | 406 | struct moving_io *io; |
1742237b KO |
407 | const union bch_extent_entry *entry; |
408 | struct extent_ptr_decoded p; | |
99aaf570 | 409 | unsigned sectors = k.k->size, pages; |
1c6fdbd8 KO |
410 | int ret = -ENOMEM; |
411 | ||
412 | move_ctxt_wait_event(ctxt, | |
413 | atomic_read(&ctxt->write_sectors) < | |
414 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
415 | ||
416 | move_ctxt_wait_event(ctxt, | |
417 | atomic_read(&ctxt->read_sectors) < | |
418 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
419 | ||
420 | /* write path might have to decompress data: */ | |
99aaf570 | 421 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
1742237b | 422 | sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); |
1c6fdbd8 KO |
423 | |
424 | pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); | |
425 | io = kzalloc(sizeof(struct moving_io) + | |
426 | sizeof(struct bio_vec) * pages, GFP_KERNEL); | |
427 | if (!io) | |
428 | goto err; | |
429 | ||
430 | io->write.ctxt = ctxt; | |
99aaf570 KO |
431 | io->read_sectors = k.k->size; |
432 | io->write_sectors = k.k->size; | |
1c6fdbd8 KO |
433 | |
434 | bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); | |
435 | bio_set_prio(&io->write.op.wbio.bio, | |
436 | IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
437 | ||
438 | if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, | |
439 | GFP_KERNEL)) | |
440 | goto err_free; | |
441 | ||
b50dd792 KO |
442 | io->rbio.c = c; |
443 | io->rbio.opts = io_opts; | |
1c6fdbd8 KO |
444 | bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); |
445 | io->rbio.bio.bi_vcnt = pages; | |
446 | bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
447 | io->rbio.bio.bi_iter.bi_size = sectors << 9; | |
448 | ||
449 | io->rbio.bio.bi_opf = REQ_OP_READ; | |
99aaf570 | 450 | io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); |
1c6fdbd8 KO |
451 | io->rbio.bio.bi_end_io = move_read_endio; |
452 | ||
453 | ret = bch2_migrate_write_init(c, &io->write, wp, io_opts, | |
76426098 | 454 | data_cmd, data_opts, btree_id, k); |
1c6fdbd8 KO |
455 | if (ret) |
456 | goto err_free_pages; | |
457 | ||
458 | atomic64_inc(&ctxt->stats->keys_moved); | |
99aaf570 | 459 | atomic64_add(k.k->size, &ctxt->stats->sectors_moved); |
1c6fdbd8 | 460 | |
99aaf570 | 461 | trace_move_extent(k.k); |
1c6fdbd8 KO |
462 | |
463 | atomic_add(io->read_sectors, &ctxt->read_sectors); | |
464 | list_add_tail(&io->list, &ctxt->reads); | |
465 | ||
466 | /* | |
467 | * dropped by move_read_endio() - guards against use after free of | |
468 | * ctxt when doing wakeup | |
469 | */ | |
470 | closure_get(&ctxt->cl); | |
99aaf570 | 471 | bch2_read_extent(c, &io->rbio, k, 0, |
1c6fdbd8 KO |
472 | BCH_READ_NODECODE| |
473 | BCH_READ_LAST_FRAGMENT); | |
474 | return 0; | |
475 | err_free_pages: | |
476 | bio_free_pages(&io->write.op.wbio.bio); | |
477 | err_free: | |
478 | kfree(io); | |
479 | err: | |
99aaf570 | 480 | trace_move_alloc_fail(k.k); |
1c6fdbd8 KO |
481 | return ret; |
482 | } | |
483 | ||
76426098 KO |
484 | static int __bch2_move_data(struct bch_fs *c, |
485 | struct moving_context *ctxt, | |
486 | struct bch_ratelimit *rate, | |
487 | struct write_point_specifier wp, | |
488 | struct bpos start, | |
489 | struct bpos end, | |
490 | move_pred_fn pred, void *arg, | |
491 | struct bch_move_stats *stats, | |
492 | enum btree_id btree_id) | |
1c6fdbd8 KO |
493 | { |
494 | bool kthread = (current->flags & PF_KTHREAD) != 0; | |
1c6fdbd8 | 495 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); |
35189e09 | 496 | struct bkey_on_stack sk; |
424eb881 KO |
497 | struct btree_trans trans; |
498 | struct btree_iter *iter; | |
1c6fdbd8 | 499 | struct bkey_s_c k; |
1c6fdbd8 KO |
500 | struct data_opts data_opts; |
501 | enum data_cmd data_cmd; | |
c2fcff59 | 502 | u64 delay, cur_inum = U64_MAX; |
1c6fdbd8 KO |
503 | int ret = 0, ret2; |
504 | ||
35189e09 | 505 | bkey_on_stack_init(&sk); |
20bceecb | 506 | bch2_trans_init(&trans, c, 0, 0); |
424eb881 | 507 | |
1c6fdbd8 | 508 | stats->data_type = BCH_DATA_USER; |
76426098 | 509 | stats->btree_id = btree_id; |
424eb881 KO |
510 | stats->pos = POS_MIN; |
511 | ||
76426098 | 512 | iter = bch2_trans_get_iter(&trans, btree_id, start, |
424eb881 | 513 | BTREE_ITER_PREFETCH); |
1c6fdbd8 KO |
514 | |
515 | if (rate) | |
516 | bch2_ratelimit_reset(rate); | |
517 | ||
c2fcff59 KO |
518 | while (1) { |
519 | do { | |
520 | delay = rate ? bch2_ratelimit_delay(rate) : 0; | |
521 | ||
522 | if (delay) { | |
424eb881 | 523 | bch2_trans_unlock(&trans); |
c2fcff59 KO |
524 | set_current_state(TASK_INTERRUPTIBLE); |
525 | } | |
526 | ||
527 | if (kthread && (ret = kthread_should_stop())) { | |
528 | __set_current_state(TASK_RUNNING); | |
529 | goto out; | |
530 | } | |
531 | ||
532 | if (delay) | |
533 | schedule_timeout(delay); | |
534 | ||
535 | if (unlikely(freezing(current))) { | |
424eb881 | 536 | bch2_trans_unlock(&trans); |
76426098 | 537 | move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); |
c2fcff59 KO |
538 | try_to_freeze(); |
539 | } | |
540 | } while (delay); | |
1c6fdbd8 | 541 | peek: |
424eb881 KO |
542 | k = bch2_btree_iter_peek(iter); |
543 | ||
544 | stats->pos = iter->pos; | |
545 | ||
1c6fdbd8 KO |
546 | if (!k.k) |
547 | break; | |
0f238367 | 548 | ret = bkey_err(k); |
1c6fdbd8 KO |
549 | if (ret) |
550 | break; | |
551 | if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) | |
552 | break; | |
553 | ||
8d84260e | 554 | if (!bkey_extent_is_direct_data(k.k)) |
1c6fdbd8 KO |
555 | goto next_nondata; |
556 | ||
bd09d268 KO |
557 | if (btree_id == BTREE_ID_EXTENTS && |
558 | cur_inum != k.k->p.inode) { | |
1c6fdbd8 KO |
559 | struct bch_inode_unpacked inode; |
560 | ||
561 | /* don't hold btree locks while looking up inode: */ | |
424eb881 | 562 | bch2_trans_unlock(&trans); |
1c6fdbd8 KO |
563 | |
564 | io_opts = bch2_opts_to_inode_opts(c->opts); | |
565 | if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode)) | |
566 | bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode)); | |
567 | cur_inum = k.k->p.inode; | |
568 | goto peek; | |
569 | } | |
570 | ||
26609b61 | 571 | switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) { |
1c6fdbd8 KO |
572 | case DATA_SKIP: |
573 | goto next; | |
574 | case DATA_SCRUB: | |
575 | BUG(); | |
576 | case DATA_ADD_REPLICAS: | |
577 | case DATA_REWRITE: | |
578 | case DATA_PROMOTE: | |
579 | break; | |
580 | default: | |
581 | BUG(); | |
582 | } | |
583 | ||
584 | /* unlock before doing IO: */ | |
5934a0ca | 585 | bkey_on_stack_reassemble(&sk, c, k); |
35189e09 | 586 | k = bkey_i_to_s_c(sk.k); |
424eb881 | 587 | bch2_trans_unlock(&trans); |
1c6fdbd8 | 588 | |
76426098 | 589 | ret2 = bch2_move_extent(c, ctxt, wp, io_opts, btree_id, k, |
1c6fdbd8 KO |
590 | data_cmd, data_opts); |
591 | if (ret2) { | |
592 | if (ret2 == -ENOMEM) { | |
593 | /* memory allocation failure, wait for some IO to finish */ | |
76426098 | 594 | bch2_move_ctxt_wait_for_io(ctxt); |
1c6fdbd8 KO |
595 | continue; |
596 | } | |
597 | ||
598 | /* XXX signal failure */ | |
599 | goto next; | |
600 | } | |
601 | ||
602 | if (rate) | |
603 | bch2_ratelimit_increment(rate, k.k->size); | |
604 | next: | |
4de77495 | 605 | atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k), |
1c6fdbd8 KO |
606 | &stats->sectors_seen); |
607 | next_nondata: | |
424eb881 KO |
608 | bch2_btree_iter_next(iter); |
609 | bch2_trans_cond_resched(&trans); | |
1c6fdbd8 | 610 | } |
c2fcff59 | 611 | out: |
76426098 | 612 | ret = bch2_trans_exit(&trans) ?: ret; |
35189e09 | 613 | bkey_on_stack_exit(&sk, c); |
76426098 KO |
614 | |
615 | return ret; | |
616 | } | |
617 | ||
618 | int bch2_move_data(struct bch_fs *c, | |
619 | struct bch_ratelimit *rate, | |
620 | struct write_point_specifier wp, | |
621 | struct bpos start, | |
622 | struct bpos end, | |
623 | move_pred_fn pred, void *arg, | |
624 | struct bch_move_stats *stats) | |
625 | { | |
626 | struct moving_context ctxt = { .stats = stats }; | |
627 | int ret; | |
628 | ||
629 | closure_init_stack(&ctxt.cl); | |
630 | INIT_LIST_HEAD(&ctxt.reads); | |
631 | init_waitqueue_head(&ctxt.wait); | |
632 | ||
633 | stats->data_type = BCH_DATA_USER; | |
634 | ||
635 | ret = __bch2_move_data(c, &ctxt, rate, wp, start, end, | |
636 | pred, arg, stats, BTREE_ID_EXTENTS) ?: | |
637 | __bch2_move_data(c, &ctxt, rate, wp, start, end, | |
638 | pred, arg, stats, BTREE_ID_REFLINK); | |
1c6fdbd8 KO |
639 | |
640 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); | |
641 | closure_sync(&ctxt.cl); | |
642 | ||
643 | EBUG_ON(atomic_read(&ctxt.write_sectors)); | |
644 | ||
645 | trace_move_data(c, | |
646 | atomic64_read(&stats->sectors_moved), | |
647 | atomic64_read(&stats->keys_moved)); | |
648 | ||
649 | return ret; | |
650 | } | |
651 | ||
1c6fdbd8 KO |
652 | static int bch2_move_btree(struct bch_fs *c, |
653 | move_pred_fn pred, | |
654 | void *arg, | |
655 | struct bch_move_stats *stats) | |
656 | { | |
657 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); | |
424eb881 KO |
658 | struct btree_trans trans; |
659 | struct btree_iter *iter; | |
1c6fdbd8 KO |
660 | struct btree *b; |
661 | unsigned id; | |
662 | struct data_opts data_opts; | |
663 | enum data_cmd cmd; | |
664 | int ret = 0; | |
665 | ||
20bceecb | 666 | bch2_trans_init(&trans, c, 0, 0); |
424eb881 | 667 | |
1c6fdbd8 KO |
668 | stats->data_type = BCH_DATA_BTREE; |
669 | ||
670 | for (id = 0; id < BTREE_ID_NR; id++) { | |
424eb881 KO |
671 | stats->btree_id = id; |
672 | ||
673 | for_each_btree_node(&trans, iter, id, POS_MIN, | |
674 | BTREE_ITER_PREFETCH, b) { | |
675 | stats->pos = iter->pos; | |
676 | ||
26609b61 KO |
677 | switch ((cmd = pred(c, arg, |
678 | bkey_i_to_s_c(&b->key), | |
679 | &io_opts, &data_opts))) { | |
1c6fdbd8 KO |
680 | case DATA_SKIP: |
681 | goto next; | |
682 | case DATA_SCRUB: | |
683 | BUG(); | |
684 | case DATA_ADD_REPLICAS: | |
685 | case DATA_REWRITE: | |
686 | break; | |
687 | default: | |
688 | BUG(); | |
689 | } | |
690 | ||
424eb881 | 691 | ret = bch2_btree_node_rewrite(c, iter, |
1c6fdbd8 KO |
692 | b->data->keys.seq, 0) ?: ret; |
693 | next: | |
424eb881 | 694 | bch2_trans_cond_resched(&trans); |
1c6fdbd8 KO |
695 | } |
696 | ||
424eb881 | 697 | ret = bch2_trans_iter_free(&trans, iter) ?: ret; |
1c6fdbd8 KO |
698 | } |
699 | ||
424eb881 KO |
700 | bch2_trans_exit(&trans); |
701 | ||
1c6fdbd8 KO |
702 | return ret; |
703 | } | |
704 | ||
705 | #if 0 | |
706 | static enum data_cmd scrub_pred(struct bch_fs *c, void *arg, | |
26609b61 | 707 | struct bkey_s_c k, |
1c6fdbd8 KO |
708 | struct bch_io_opts *io_opts, |
709 | struct data_opts *data_opts) | |
710 | { | |
711 | return DATA_SCRUB; | |
712 | } | |
713 | #endif | |
714 | ||
715 | static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, | |
26609b61 | 716 | struct bkey_s_c k, |
1c6fdbd8 KO |
717 | struct bch_io_opts *io_opts, |
718 | struct data_opts *data_opts) | |
719 | { | |
26609b61 KO |
720 | unsigned nr_good = bch2_bkey_durability(c, k); |
721 | unsigned replicas = 0; | |
722 | ||
723 | switch (k.k->type) { | |
724 | case KEY_TYPE_btree_ptr: | |
725 | replicas = c->opts.metadata_replicas; | |
726 | break; | |
727 | case KEY_TYPE_extent: | |
728 | replicas = io_opts->data_replicas; | |
729 | break; | |
730 | } | |
1c6fdbd8 KO |
731 | |
732 | if (!nr_good || nr_good >= replicas) | |
733 | return DATA_SKIP; | |
734 | ||
735 | data_opts->target = 0; | |
26609b61 | 736 | data_opts->btree_insert_flags = 0; |
1c6fdbd8 KO |
737 | return DATA_ADD_REPLICAS; |
738 | } | |
739 | ||
740 | static enum data_cmd migrate_pred(struct bch_fs *c, void *arg, | |
26609b61 | 741 | struct bkey_s_c k, |
1c6fdbd8 KO |
742 | struct bch_io_opts *io_opts, |
743 | struct data_opts *data_opts) | |
744 | { | |
745 | struct bch_ioctl_data *op = arg; | |
746 | ||
26609b61 | 747 | if (!bch2_bkey_has_device(k, op->migrate.dev)) |
1c6fdbd8 KO |
748 | return DATA_SKIP; |
749 | ||
750 | data_opts->target = 0; | |
751 | data_opts->btree_insert_flags = 0; | |
752 | data_opts->rewrite_dev = op->migrate.dev; | |
753 | return DATA_REWRITE; | |
754 | } | |
755 | ||
756 | int bch2_data_job(struct bch_fs *c, | |
757 | struct bch_move_stats *stats, | |
758 | struct bch_ioctl_data op) | |
759 | { | |
760 | int ret = 0; | |
761 | ||
762 | switch (op.op) { | |
763 | case BCH_DATA_OP_REREPLICATE: | |
764 | stats->data_type = BCH_DATA_JOURNAL; | |
765 | ret = bch2_journal_flush_device_pins(&c->journal, -1); | |
766 | ||
767 | ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret; | |
7ef2a73a KO |
768 | |
769 | while (1) { | |
770 | closure_wait_event(&c->btree_interior_update_wait, | |
771 | !bch2_btree_interior_updates_nr_pending(c) || | |
772 | c->btree_roots_dirty); | |
773 | if (!bch2_btree_interior_updates_nr_pending(c)) | |
774 | break; | |
775 | bch2_journal_meta(&c->journal); | |
776 | } | |
777 | ||
ae0ff7b8 | 778 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
779 | |
780 | ret = bch2_move_data(c, NULL, | |
781 | writepoint_hashed((unsigned long) current), | |
782 | op.start, | |
783 | op.end, | |
784 | rereplicate_pred, c, stats) ?: ret; | |
ae0ff7b8 | 785 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
786 | break; |
787 | case BCH_DATA_OP_MIGRATE: | |
788 | if (op.migrate.dev >= c->sb.nr_devices) | |
789 | return -EINVAL; | |
790 | ||
791 | stats->data_type = BCH_DATA_JOURNAL; | |
792 | ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); | |
793 | ||
794 | ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret; | |
ae0ff7b8 | 795 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
796 | |
797 | ret = bch2_move_data(c, NULL, | |
798 | writepoint_hashed((unsigned long) current), | |
799 | op.start, | |
800 | op.end, | |
801 | migrate_pred, &op, stats) ?: ret; | |
ae0ff7b8 | 802 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
803 | break; |
804 | default: | |
805 | ret = -EINVAL; | |
806 | } | |
807 | ||
808 | return ret; | |
809 | } |