Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include "bcachefs.h" | |
7b3f84ea | 4 | #include "alloc_foreground.h" |
35189e09 | 5 | #include "bkey_on_stack.h" |
1c6fdbd8 KO |
6 | #include "btree_gc.h" |
7 | #include "btree_update.h" | |
7ef2a73a | 8 | #include "btree_update_interior.h" |
1c6fdbd8 | 9 | #include "buckets.h" |
4628529f | 10 | #include "disk_groups.h" |
1c6fdbd8 KO |
11 | #include "inode.h" |
12 | #include "io.h" | |
13 | #include "journal_reclaim.h" | |
14 | #include "keylist.h" | |
15 | #include "move.h" | |
16 | #include "replicas.h" | |
17 | #include "super-io.h" | |
18 | #include "trace.h" | |
19 | ||
20 | #include <linux/ioprio.h> | |
21 | #include <linux/kthread.h> | |
22 | ||
23 | #define SECTORS_IN_FLIGHT_PER_DEVICE 2048 | |
24 | ||
25 | struct moving_io { | |
26 | struct list_head list; | |
27 | struct closure cl; | |
28 | bool read_completed; | |
29 | ||
30 | unsigned read_sectors; | |
31 | unsigned write_sectors; | |
32 | ||
33 | struct bch_read_bio rbio; | |
34 | ||
35 | struct migrate_write write; | |
36 | /* Must be last since it is variable size */ | |
37 | struct bio_vec bi_inline_vecs[0]; | |
38 | }; | |
39 | ||
40 | struct moving_context { | |
41 | /* Closure for waiting on all reads and writes to complete */ | |
42 | struct closure cl; | |
43 | ||
44 | struct bch_move_stats *stats; | |
45 | ||
46 | struct list_head reads; | |
47 | ||
48 | /* in flight sectors: */ | |
49 | atomic_t read_sectors; | |
50 | atomic_t write_sectors; | |
51 | ||
52 | wait_queue_head_t wait; | |
53 | }; | |
54 | ||
55 | static int bch2_migrate_index_update(struct bch_write_op *op) | |
56 | { | |
57 | struct bch_fs *c = op->c; | |
0564b167 KO |
58 | struct btree_trans trans; |
59 | struct btree_iter *iter; | |
1c6fdbd8 KO |
60 | struct migrate_write *m = |
61 | container_of(op, struct migrate_write, op); | |
62 | struct keylist *keys = &op->insert_keys; | |
1c6fdbd8 KO |
63 | int ret = 0; |
64 | ||
20bceecb | 65 | bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); |
0564b167 | 66 | |
76426098 | 67 | iter = bch2_trans_get_iter(&trans, m->btree_id, |
0564b167 KO |
68 | bkey_start_pos(&bch2_keylist_front(keys)->k), |
69 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
1c6fdbd8 KO |
70 | |
71 | while (1) { | |
2c480a71 | 72 | struct bkey_s_c k; |
76426098 | 73 | struct bkey_i *insert; |
2c480a71 | 74 | struct bkey_i_extent *new; |
1c6fdbd8 | 75 | BKEY_PADDED(k) _new, _insert; |
1742237b KO |
76 | const union bch_extent_entry *entry; |
77 | struct extent_ptr_decoded p; | |
1c6fdbd8 KO |
78 | bool did_work = false; |
79 | int nr; | |
80 | ||
2c480a71 KO |
81 | bch2_trans_reset(&trans, 0); |
82 | ||
83 | k = bch2_btree_iter_peek_slot(iter); | |
0f238367 | 84 | ret = bkey_err(k); |
2c480a71 KO |
85 | if (ret) { |
86 | if (ret == -EINTR) | |
87 | continue; | |
1c6fdbd8 | 88 | break; |
2c480a71 KO |
89 | } |
90 | ||
91 | new = bkey_i_to_extent(bch2_keylist_front(keys)); | |
1c6fdbd8 KO |
92 | |
93 | if (bversion_cmp(k.k->version, new->k.version) || | |
99aaf570 | 94 | !bch2_bkey_matches_ptr(c, k, m->ptr, m->offset)) |
1c6fdbd8 KO |
95 | goto nomatch; |
96 | ||
97 | if (m->data_cmd == DATA_REWRITE && | |
76426098 | 98 | !bch2_bkey_has_device(k, m->data_opts.rewrite_dev)) |
1c6fdbd8 KO |
99 | goto nomatch; |
100 | ||
101 | bkey_reassemble(&_insert.k, k); | |
76426098 | 102 | insert = &_insert.k; |
1c6fdbd8 KO |
103 | |
104 | bkey_copy(&_new.k, bch2_keylist_front(keys)); | |
105 | new = bkey_i_to_extent(&_new.k); | |
085ab693 | 106 | bch2_cut_front(iter->pos, &new->k_i); |
1c6fdbd8 | 107 | |
085ab693 KO |
108 | bch2_cut_front(iter->pos, insert); |
109 | bch2_cut_back(new->k.p, insert); | |
110 | bch2_cut_back(insert->k.p, &new->k_i); | |
1c6fdbd8 | 111 | |
a2753581 | 112 | if (m->data_cmd == DATA_REWRITE) |
76426098 | 113 | bch2_bkey_drop_device(bkey_i_to_s(insert), |
26609b61 | 114 | m->data_opts.rewrite_dev); |
1c6fdbd8 | 115 | |
1742237b | 116 | extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) { |
76426098 | 117 | if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) { |
1c6fdbd8 KO |
118 | /* |
119 | * raced with another move op? extent already | |
120 | * has a pointer to the device we just wrote | |
121 | * data to | |
122 | */ | |
123 | continue; | |
124 | } | |
125 | ||
76426098 | 126 | bch2_extent_ptr_decoded_append(insert, &p); |
1c6fdbd8 KO |
127 | did_work = true; |
128 | } | |
129 | ||
130 | if (!did_work) | |
131 | goto nomatch; | |
132 | ||
76426098 | 133 | bch2_bkey_narrow_crcs(insert, |
1c6fdbd8 | 134 | (struct bch_extent_crc_unpacked) { 0 }); |
76426098 KO |
135 | bch2_extent_normalize(c, bkey_i_to_s(insert)); |
136 | bch2_bkey_mark_replicas_cached(c, bkey_i_to_s(insert), | |
137 | op->opts.background_target, | |
138 | op->opts.data_replicas); | |
1c6fdbd8 KO |
139 | |
140 | /* | |
db636adb KO |
141 | * If we're not fully overwriting @k, and it's compressed, we |
142 | * need a reservation for all the pointers in @insert | |
1c6fdbd8 | 143 | */ |
4de77495 | 144 | nr = bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(insert)) - |
db636adb KO |
145 | m->nr_ptrs_reserved; |
146 | ||
147 | if (insert->k.size < k.k->size && | |
4de77495 | 148 | bch2_bkey_sectors_compressed(k) && |
db636adb | 149 | nr > 0) { |
1c6fdbd8 KO |
150 | ret = bch2_disk_reservation_add(c, &op->res, |
151 | keylist_sectors(keys) * nr, 0); | |
152 | if (ret) | |
153 | goto out; | |
154 | ||
155 | m->nr_ptrs_reserved += nr; | |
156 | goto next; | |
157 | } | |
158 | ||
2d594dfb | 159 | bch2_trans_update(&trans, iter, insert, 0); |
0564b167 KO |
160 | |
161 | ret = bch2_trans_commit(&trans, &op->res, | |
fc3268c1 | 162 | op_journal_seq(op), |
1c6fdbd8 KO |
163 | BTREE_INSERT_NOFAIL| |
164 | BTREE_INSERT_USE_RESERVE| | |
0564b167 | 165 | m->data_opts.btree_insert_flags); |
1c6fdbd8 KO |
166 | if (!ret) |
167 | atomic_long_inc(&c->extent_migrate_done); | |
168 | if (ret == -EINTR) | |
169 | ret = 0; | |
170 | if (ret) | |
171 | break; | |
172 | next: | |
0564b167 | 173 | while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) { |
1c6fdbd8 KO |
174 | bch2_keylist_pop_front(keys); |
175 | if (bch2_keylist_empty(keys)) | |
176 | goto out; | |
177 | } | |
1c6fdbd8 KO |
178 | continue; |
179 | nomatch: | |
784d8d17 KO |
180 | if (m->ctxt) { |
181 | BUG_ON(k.k->p.offset <= iter->pos.offset); | |
182 | atomic64_inc(&m->ctxt->stats->keys_raced); | |
0564b167 | 183 | atomic64_add(k.k->p.offset - iter->pos.offset, |
1c6fdbd8 | 184 | &m->ctxt->stats->sectors_raced); |
784d8d17 | 185 | } |
1c6fdbd8 KO |
186 | atomic_long_inc(&c->extent_migrate_raced); |
187 | trace_move_race(&new->k); | |
0564b167 | 188 | bch2_btree_iter_next_slot(iter); |
1c6fdbd8 KO |
189 | goto next; |
190 | } | |
191 | out: | |
0564b167 | 192 | bch2_trans_exit(&trans); |
932aa837 | 193 | BUG_ON(ret == -EINTR); |
1c6fdbd8 KO |
194 | return ret; |
195 | } | |
196 | ||
197 | void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio) | |
198 | { | |
199 | /* write bio must own pages: */ | |
200 | BUG_ON(!m->op.wbio.bio.bi_vcnt); | |
201 | ||
202 | m->ptr = rbio->pick.ptr; | |
203 | m->offset = rbio->pos.offset - rbio->pick.crc.offset; | |
204 | m->op.devs_have = rbio->devs_have; | |
205 | m->op.pos = rbio->pos; | |
206 | m->op.version = rbio->version; | |
207 | m->op.crc = rbio->pick.crc; | |
208 | m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; | |
209 | ||
210 | if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) { | |
211 | m->op.nonce = m->op.crc.nonce + m->op.crc.offset; | |
212 | m->op.csum_type = m->op.crc.csum_type; | |
213 | } | |
214 | ||
215 | if (m->data_cmd == DATA_REWRITE) | |
216 | bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev); | |
217 | } | |
218 | ||
219 | int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, | |
220 | struct write_point_specifier wp, | |
221 | struct bch_io_opts io_opts, | |
222 | enum data_cmd data_cmd, | |
223 | struct data_opts data_opts, | |
76426098 | 224 | enum btree_id btree_id, |
1c6fdbd8 KO |
225 | struct bkey_s_c k) |
226 | { | |
ab05de4c KO |
227 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
228 | const union bch_extent_entry *entry; | |
229 | struct extent_ptr_decoded p; | |
1c6fdbd8 KO |
230 | int ret; |
231 | ||
76426098 | 232 | m->btree_id = btree_id; |
1c6fdbd8 KO |
233 | m->data_cmd = data_cmd; |
234 | m->data_opts = data_opts; | |
235 | m->nr_ptrs_reserved = 0; | |
236 | ||
237 | bch2_write_op_init(&m->op, c, io_opts); | |
ab05de4c KO |
238 | |
239 | if (!bch2_bkey_is_incompressible(k)) | |
240 | m->op.compression_type = | |
241 | bch2_compression_opt_to_type[io_opts.background_compression ?: | |
242 | io_opts.compression]; | |
243 | else | |
244 | m->op.incompressible = true; | |
245 | ||
1c6fdbd8 KO |
246 | m->op.target = data_opts.target, |
247 | m->op.write_point = wp; | |
248 | ||
8f3b41ab | 249 | if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) { |
1c6fdbd8 | 250 | m->op.alloc_reserve = RESERVE_MOVINGGC; |
74ed7e56 | 251 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; |
8f3b41ab KO |
252 | } else { |
253 | /* XXX: this should probably be passed in */ | |
254 | m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; | |
255 | } | |
1c6fdbd8 | 256 | |
8f3b41ab | 257 | m->op.flags |= BCH_WRITE_PAGES_STABLE| |
1c6fdbd8 | 258 | BCH_WRITE_PAGES_OWNED| |
a7b46a3d KO |
259 | BCH_WRITE_DATA_ENCODED| |
260 | BCH_WRITE_FROM_INTERNAL; | |
1c6fdbd8 KO |
261 | |
262 | m->op.nr_replicas = 1; | |
263 | m->op.nr_replicas_required = 1; | |
264 | m->op.index_update_fn = bch2_migrate_index_update; | |
265 | ||
266 | switch (data_cmd) { | |
267 | case DATA_ADD_REPLICAS: { | |
db636adb KO |
268 | /* |
269 | * DATA_ADD_REPLICAS is used for moving data to a different | |
270 | * device in the background, and due to compression the new copy | |
271 | * might take up more space than the old copy: | |
272 | */ | |
273 | #if 0 | |
1c6fdbd8 | 274 | int nr = (int) io_opts.data_replicas - |
4de77495 | 275 | bch2_bkey_nr_ptrs_allocated(k); |
db636adb KO |
276 | #endif |
277 | int nr = (int) io_opts.data_replicas; | |
1c6fdbd8 KO |
278 | |
279 | if (nr > 0) { | |
280 | m->op.nr_replicas = m->nr_ptrs_reserved = nr; | |
281 | ||
282 | ret = bch2_disk_reservation_get(c, &m->op.res, | |
283 | k.k->size, m->op.nr_replicas, 0); | |
284 | if (ret) | |
285 | return ret; | |
286 | } | |
287 | break; | |
288 | } | |
4628529f | 289 | case DATA_REWRITE: { |
4628529f KO |
290 | unsigned compressed_sectors = 0; |
291 | ||
76426098 | 292 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
4628529f | 293 | if (!p.ptr.cached && |
ab05de4c | 294 | crc_is_compressed(p.crc) && |
4628529f KO |
295 | bch2_dev_in_target(c, p.ptr.dev, data_opts.target)) |
296 | compressed_sectors += p.crc.compressed_size; | |
297 | ||
298 | if (compressed_sectors) { | |
299 | ret = bch2_disk_reservation_add(c, &m->op.res, | |
300 | compressed_sectors, | |
301 | BCH_DISK_RESERVATION_NOFAIL); | |
302 | if (ret) | |
303 | return ret; | |
304 | } | |
1c6fdbd8 | 305 | break; |
4628529f | 306 | } |
1c6fdbd8 KO |
307 | case DATA_PROMOTE: |
308 | m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; | |
309 | m->op.flags |= BCH_WRITE_CACHED; | |
310 | break; | |
311 | default: | |
312 | BUG(); | |
313 | } | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | static void move_free(struct closure *cl) | |
319 | { | |
320 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
321 | struct moving_context *ctxt = io->write.ctxt; | |
322 | struct bvec_iter_all iter; | |
323 | struct bio_vec *bv; | |
324 | ||
325 | bch2_disk_reservation_put(io->write.op.c, &io->write.op.res); | |
326 | ||
327 | bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter) | |
328 | if (bv->bv_page) | |
329 | __free_page(bv->bv_page); | |
330 | ||
331 | wake_up(&ctxt->wait); | |
332 | ||
333 | kfree(io); | |
334 | } | |
335 | ||
336 | static void move_write_done(struct closure *cl) | |
337 | { | |
338 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
339 | ||
340 | atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); | |
341 | closure_return_with_destructor(cl, move_free); | |
342 | } | |
343 | ||
344 | static void move_write(struct closure *cl) | |
345 | { | |
346 | struct moving_io *io = container_of(cl, struct moving_io, cl); | |
347 | ||
348 | if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { | |
349 | closure_return_with_destructor(cl, move_free); | |
350 | return; | |
351 | } | |
352 | ||
353 | bch2_migrate_read_done(&io->write, &io->rbio); | |
354 | ||
355 | atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); | |
356 | closure_call(&io->write.op.cl, bch2_write, NULL, cl); | |
357 | continue_at(cl, move_write_done, NULL); | |
358 | } | |
359 | ||
360 | static inline struct moving_io *next_pending_write(struct moving_context *ctxt) | |
361 | { | |
362 | struct moving_io *io = | |
363 | list_first_entry_or_null(&ctxt->reads, struct moving_io, list); | |
364 | ||
365 | return io && io->read_completed ? io : NULL; | |
366 | } | |
367 | ||
368 | static void move_read_endio(struct bio *bio) | |
369 | { | |
370 | struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); | |
371 | struct moving_context *ctxt = io->write.ctxt; | |
372 | ||
373 | atomic_sub(io->read_sectors, &ctxt->read_sectors); | |
374 | io->read_completed = true; | |
375 | ||
376 | if (next_pending_write(ctxt)) | |
377 | wake_up(&ctxt->wait); | |
378 | ||
379 | closure_put(&ctxt->cl); | |
380 | } | |
381 | ||
382 | static void do_pending_writes(struct moving_context *ctxt) | |
383 | { | |
384 | struct moving_io *io; | |
385 | ||
386 | while ((io = next_pending_write(ctxt))) { | |
387 | list_del(&io->list); | |
388 | closure_call(&io->cl, move_write, NULL, &ctxt->cl); | |
389 | } | |
390 | } | |
391 | ||
392 | #define move_ctxt_wait_event(_ctxt, _cond) \ | |
393 | do { \ | |
394 | do_pending_writes(_ctxt); \ | |
395 | \ | |
396 | if (_cond) \ | |
397 | break; \ | |
398 | __wait_event((_ctxt)->wait, \ | |
399 | next_pending_write(_ctxt) || (_cond)); \ | |
400 | } while (1) | |
401 | ||
402 | static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) | |
403 | { | |
404 | unsigned sectors_pending = atomic_read(&ctxt->write_sectors); | |
405 | ||
406 | move_ctxt_wait_event(ctxt, | |
407 | !atomic_read(&ctxt->write_sectors) || | |
408 | atomic_read(&ctxt->write_sectors) != sectors_pending); | |
409 | } | |
410 | ||
411 | static int bch2_move_extent(struct bch_fs *c, | |
412 | struct moving_context *ctxt, | |
413 | struct write_point_specifier wp, | |
414 | struct bch_io_opts io_opts, | |
76426098 | 415 | enum btree_id btree_id, |
99aaf570 | 416 | struct bkey_s_c k, |
1c6fdbd8 KO |
417 | enum data_cmd data_cmd, |
418 | struct data_opts data_opts) | |
419 | { | |
99aaf570 | 420 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
1c6fdbd8 | 421 | struct moving_io *io; |
1742237b KO |
422 | const union bch_extent_entry *entry; |
423 | struct extent_ptr_decoded p; | |
99aaf570 | 424 | unsigned sectors = k.k->size, pages; |
1c6fdbd8 KO |
425 | int ret = -ENOMEM; |
426 | ||
427 | move_ctxt_wait_event(ctxt, | |
428 | atomic_read(&ctxt->write_sectors) < | |
429 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
430 | ||
431 | move_ctxt_wait_event(ctxt, | |
432 | atomic_read(&ctxt->read_sectors) < | |
433 | SECTORS_IN_FLIGHT_PER_DEVICE); | |
434 | ||
435 | /* write path might have to decompress data: */ | |
99aaf570 | 436 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
1742237b | 437 | sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); |
1c6fdbd8 KO |
438 | |
439 | pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); | |
440 | io = kzalloc(sizeof(struct moving_io) + | |
441 | sizeof(struct bio_vec) * pages, GFP_KERNEL); | |
442 | if (!io) | |
443 | goto err; | |
444 | ||
445 | io->write.ctxt = ctxt; | |
99aaf570 KO |
446 | io->read_sectors = k.k->size; |
447 | io->write_sectors = k.k->size; | |
1c6fdbd8 KO |
448 | |
449 | bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); | |
450 | bio_set_prio(&io->write.op.wbio.bio, | |
451 | IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
452 | ||
453 | if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, | |
454 | GFP_KERNEL)) | |
455 | goto err_free; | |
456 | ||
b50dd792 KO |
457 | io->rbio.c = c; |
458 | io->rbio.opts = io_opts; | |
1c6fdbd8 KO |
459 | bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); |
460 | io->rbio.bio.bi_vcnt = pages; | |
461 | bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
462 | io->rbio.bio.bi_iter.bi_size = sectors << 9; | |
463 | ||
464 | io->rbio.bio.bi_opf = REQ_OP_READ; | |
99aaf570 | 465 | io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); |
1c6fdbd8 KO |
466 | io->rbio.bio.bi_end_io = move_read_endio; |
467 | ||
468 | ret = bch2_migrate_write_init(c, &io->write, wp, io_opts, | |
76426098 | 469 | data_cmd, data_opts, btree_id, k); |
1c6fdbd8 KO |
470 | if (ret) |
471 | goto err_free_pages; | |
472 | ||
473 | atomic64_inc(&ctxt->stats->keys_moved); | |
99aaf570 | 474 | atomic64_add(k.k->size, &ctxt->stats->sectors_moved); |
1c6fdbd8 | 475 | |
99aaf570 | 476 | trace_move_extent(k.k); |
1c6fdbd8 KO |
477 | |
478 | atomic_add(io->read_sectors, &ctxt->read_sectors); | |
479 | list_add_tail(&io->list, &ctxt->reads); | |
480 | ||
481 | /* | |
482 | * dropped by move_read_endio() - guards against use after free of | |
483 | * ctxt when doing wakeup | |
484 | */ | |
485 | closure_get(&ctxt->cl); | |
99aaf570 | 486 | bch2_read_extent(c, &io->rbio, k, 0, |
1c6fdbd8 KO |
487 | BCH_READ_NODECODE| |
488 | BCH_READ_LAST_FRAGMENT); | |
489 | return 0; | |
490 | err_free_pages: | |
491 | bio_free_pages(&io->write.op.wbio.bio); | |
492 | err_free: | |
493 | kfree(io); | |
494 | err: | |
99aaf570 | 495 | trace_move_alloc_fail(k.k); |
1c6fdbd8 KO |
496 | return ret; |
497 | } | |
498 | ||
76426098 KO |
499 | static int __bch2_move_data(struct bch_fs *c, |
500 | struct moving_context *ctxt, | |
501 | struct bch_ratelimit *rate, | |
502 | struct write_point_specifier wp, | |
503 | struct bpos start, | |
504 | struct bpos end, | |
505 | move_pred_fn pred, void *arg, | |
506 | struct bch_move_stats *stats, | |
507 | enum btree_id btree_id) | |
1c6fdbd8 KO |
508 | { |
509 | bool kthread = (current->flags & PF_KTHREAD) != 0; | |
1c6fdbd8 | 510 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); |
35189e09 | 511 | struct bkey_on_stack sk; |
424eb881 KO |
512 | struct btree_trans trans; |
513 | struct btree_iter *iter; | |
1c6fdbd8 | 514 | struct bkey_s_c k; |
1c6fdbd8 KO |
515 | struct data_opts data_opts; |
516 | enum data_cmd data_cmd; | |
c2fcff59 | 517 | u64 delay, cur_inum = U64_MAX; |
1c6fdbd8 KO |
518 | int ret = 0, ret2; |
519 | ||
35189e09 | 520 | bkey_on_stack_init(&sk); |
20bceecb | 521 | bch2_trans_init(&trans, c, 0, 0); |
424eb881 | 522 | |
89fd25be | 523 | stats->data_type = BCH_DATA_user; |
76426098 | 524 | stats->btree_id = btree_id; |
424eb881 KO |
525 | stats->pos = POS_MIN; |
526 | ||
76426098 | 527 | iter = bch2_trans_get_iter(&trans, btree_id, start, |
424eb881 | 528 | BTREE_ITER_PREFETCH); |
1c6fdbd8 KO |
529 | |
530 | if (rate) | |
531 | bch2_ratelimit_reset(rate); | |
532 | ||
c2fcff59 KO |
533 | while (1) { |
534 | do { | |
535 | delay = rate ? bch2_ratelimit_delay(rate) : 0; | |
536 | ||
537 | if (delay) { | |
424eb881 | 538 | bch2_trans_unlock(&trans); |
c2fcff59 KO |
539 | set_current_state(TASK_INTERRUPTIBLE); |
540 | } | |
541 | ||
542 | if (kthread && (ret = kthread_should_stop())) { | |
543 | __set_current_state(TASK_RUNNING); | |
544 | goto out; | |
545 | } | |
546 | ||
547 | if (delay) | |
548 | schedule_timeout(delay); | |
549 | ||
550 | if (unlikely(freezing(current))) { | |
424eb881 | 551 | bch2_trans_unlock(&trans); |
76426098 | 552 | move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); |
c2fcff59 KO |
553 | try_to_freeze(); |
554 | } | |
555 | } while (delay); | |
1c6fdbd8 | 556 | peek: |
424eb881 KO |
557 | k = bch2_btree_iter_peek(iter); |
558 | ||
559 | stats->pos = iter->pos; | |
560 | ||
1c6fdbd8 KO |
561 | if (!k.k) |
562 | break; | |
0f238367 | 563 | ret = bkey_err(k); |
1c6fdbd8 KO |
564 | if (ret) |
565 | break; | |
566 | if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) | |
567 | break; | |
568 | ||
8d84260e | 569 | if (!bkey_extent_is_direct_data(k.k)) |
1c6fdbd8 KO |
570 | goto next_nondata; |
571 | ||
bd09d268 KO |
572 | if (btree_id == BTREE_ID_EXTENTS && |
573 | cur_inum != k.k->p.inode) { | |
1c6fdbd8 KO |
574 | struct bch_inode_unpacked inode; |
575 | ||
576 | /* don't hold btree locks while looking up inode: */ | |
424eb881 | 577 | bch2_trans_unlock(&trans); |
1c6fdbd8 KO |
578 | |
579 | io_opts = bch2_opts_to_inode_opts(c->opts); | |
580 | if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode)) | |
581 | bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode)); | |
582 | cur_inum = k.k->p.inode; | |
583 | goto peek; | |
584 | } | |
585 | ||
26609b61 | 586 | switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) { |
1c6fdbd8 KO |
587 | case DATA_SKIP: |
588 | goto next; | |
589 | case DATA_SCRUB: | |
590 | BUG(); | |
591 | case DATA_ADD_REPLICAS: | |
592 | case DATA_REWRITE: | |
593 | case DATA_PROMOTE: | |
594 | break; | |
595 | default: | |
596 | BUG(); | |
597 | } | |
598 | ||
599 | /* unlock before doing IO: */ | |
5934a0ca | 600 | bkey_on_stack_reassemble(&sk, c, k); |
35189e09 | 601 | k = bkey_i_to_s_c(sk.k); |
424eb881 | 602 | bch2_trans_unlock(&trans); |
1c6fdbd8 | 603 | |
76426098 | 604 | ret2 = bch2_move_extent(c, ctxt, wp, io_opts, btree_id, k, |
1c6fdbd8 KO |
605 | data_cmd, data_opts); |
606 | if (ret2) { | |
607 | if (ret2 == -ENOMEM) { | |
608 | /* memory allocation failure, wait for some IO to finish */ | |
76426098 | 609 | bch2_move_ctxt_wait_for_io(ctxt); |
1c6fdbd8 KO |
610 | continue; |
611 | } | |
612 | ||
613 | /* XXX signal failure */ | |
614 | goto next; | |
615 | } | |
616 | ||
617 | if (rate) | |
618 | bch2_ratelimit_increment(rate, k.k->size); | |
619 | next: | |
4de77495 | 620 | atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k), |
1c6fdbd8 KO |
621 | &stats->sectors_seen); |
622 | next_nondata: | |
424eb881 KO |
623 | bch2_btree_iter_next(iter); |
624 | bch2_trans_cond_resched(&trans); | |
1c6fdbd8 | 625 | } |
c2fcff59 | 626 | out: |
76426098 | 627 | ret = bch2_trans_exit(&trans) ?: ret; |
35189e09 | 628 | bkey_on_stack_exit(&sk, c); |
76426098 KO |
629 | |
630 | return ret; | |
631 | } | |
632 | ||
633 | int bch2_move_data(struct bch_fs *c, | |
634 | struct bch_ratelimit *rate, | |
635 | struct write_point_specifier wp, | |
636 | struct bpos start, | |
637 | struct bpos end, | |
638 | move_pred_fn pred, void *arg, | |
639 | struct bch_move_stats *stats) | |
640 | { | |
641 | struct moving_context ctxt = { .stats = stats }; | |
642 | int ret; | |
643 | ||
644 | closure_init_stack(&ctxt.cl); | |
645 | INIT_LIST_HEAD(&ctxt.reads); | |
646 | init_waitqueue_head(&ctxt.wait); | |
647 | ||
89fd25be | 648 | stats->data_type = BCH_DATA_user; |
76426098 KO |
649 | |
650 | ret = __bch2_move_data(c, &ctxt, rate, wp, start, end, | |
651 | pred, arg, stats, BTREE_ID_EXTENTS) ?: | |
652 | __bch2_move_data(c, &ctxt, rate, wp, start, end, | |
653 | pred, arg, stats, BTREE_ID_REFLINK); | |
1c6fdbd8 KO |
654 | |
655 | move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); | |
656 | closure_sync(&ctxt.cl); | |
657 | ||
658 | EBUG_ON(atomic_read(&ctxt.write_sectors)); | |
659 | ||
660 | trace_move_data(c, | |
661 | atomic64_read(&stats->sectors_moved), | |
662 | atomic64_read(&stats->keys_moved)); | |
663 | ||
664 | return ret; | |
665 | } | |
666 | ||
1c6fdbd8 KO |
667 | static int bch2_move_btree(struct bch_fs *c, |
668 | move_pred_fn pred, | |
669 | void *arg, | |
670 | struct bch_move_stats *stats) | |
671 | { | |
672 | struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); | |
424eb881 KO |
673 | struct btree_trans trans; |
674 | struct btree_iter *iter; | |
1c6fdbd8 KO |
675 | struct btree *b; |
676 | unsigned id; | |
677 | struct data_opts data_opts; | |
678 | enum data_cmd cmd; | |
679 | int ret = 0; | |
680 | ||
20bceecb | 681 | bch2_trans_init(&trans, c, 0, 0); |
424eb881 | 682 | |
89fd25be | 683 | stats->data_type = BCH_DATA_btree; |
1c6fdbd8 KO |
684 | |
685 | for (id = 0; id < BTREE_ID_NR; id++) { | |
424eb881 KO |
686 | stats->btree_id = id; |
687 | ||
688 | for_each_btree_node(&trans, iter, id, POS_MIN, | |
689 | BTREE_ITER_PREFETCH, b) { | |
690 | stats->pos = iter->pos; | |
691 | ||
26609b61 KO |
692 | switch ((cmd = pred(c, arg, |
693 | bkey_i_to_s_c(&b->key), | |
694 | &io_opts, &data_opts))) { | |
1c6fdbd8 KO |
695 | case DATA_SKIP: |
696 | goto next; | |
697 | case DATA_SCRUB: | |
698 | BUG(); | |
699 | case DATA_ADD_REPLICAS: | |
700 | case DATA_REWRITE: | |
701 | break; | |
702 | default: | |
703 | BUG(); | |
704 | } | |
705 | ||
424eb881 | 706 | ret = bch2_btree_node_rewrite(c, iter, |
1c6fdbd8 KO |
707 | b->data->keys.seq, 0) ?: ret; |
708 | next: | |
424eb881 | 709 | bch2_trans_cond_resched(&trans); |
1c6fdbd8 KO |
710 | } |
711 | ||
424eb881 | 712 | ret = bch2_trans_iter_free(&trans, iter) ?: ret; |
1c6fdbd8 KO |
713 | } |
714 | ||
424eb881 KO |
715 | bch2_trans_exit(&trans); |
716 | ||
1c6fdbd8 KO |
717 | return ret; |
718 | } | |
719 | ||
720 | #if 0 | |
721 | static enum data_cmd scrub_pred(struct bch_fs *c, void *arg, | |
26609b61 | 722 | struct bkey_s_c k, |
1c6fdbd8 KO |
723 | struct bch_io_opts *io_opts, |
724 | struct data_opts *data_opts) | |
725 | { | |
726 | return DATA_SCRUB; | |
727 | } | |
728 | #endif | |
729 | ||
730 | static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg, | |
26609b61 | 731 | struct bkey_s_c k, |
1c6fdbd8 KO |
732 | struct bch_io_opts *io_opts, |
733 | struct data_opts *data_opts) | |
734 | { | |
26609b61 KO |
735 | unsigned nr_good = bch2_bkey_durability(c, k); |
736 | unsigned replicas = 0; | |
737 | ||
738 | switch (k.k->type) { | |
739 | case KEY_TYPE_btree_ptr: | |
740 | replicas = c->opts.metadata_replicas; | |
741 | break; | |
742 | case KEY_TYPE_extent: | |
743 | replicas = io_opts->data_replicas; | |
744 | break; | |
745 | } | |
1c6fdbd8 KO |
746 | |
747 | if (!nr_good || nr_good >= replicas) | |
748 | return DATA_SKIP; | |
749 | ||
750 | data_opts->target = 0; | |
26609b61 | 751 | data_opts->btree_insert_flags = 0; |
1c6fdbd8 KO |
752 | return DATA_ADD_REPLICAS; |
753 | } | |
754 | ||
755 | static enum data_cmd migrate_pred(struct bch_fs *c, void *arg, | |
26609b61 | 756 | struct bkey_s_c k, |
1c6fdbd8 KO |
757 | struct bch_io_opts *io_opts, |
758 | struct data_opts *data_opts) | |
759 | { | |
760 | struct bch_ioctl_data *op = arg; | |
761 | ||
26609b61 | 762 | if (!bch2_bkey_has_device(k, op->migrate.dev)) |
1c6fdbd8 KO |
763 | return DATA_SKIP; |
764 | ||
765 | data_opts->target = 0; | |
766 | data_opts->btree_insert_flags = 0; | |
767 | data_opts->rewrite_dev = op->migrate.dev; | |
768 | return DATA_REWRITE; | |
769 | } | |
770 | ||
771 | int bch2_data_job(struct bch_fs *c, | |
772 | struct bch_move_stats *stats, | |
773 | struct bch_ioctl_data op) | |
774 | { | |
775 | int ret = 0; | |
776 | ||
777 | switch (op.op) { | |
778 | case BCH_DATA_OP_REREPLICATE: | |
89fd25be | 779 | stats->data_type = BCH_DATA_journal; |
1c6fdbd8 KO |
780 | ret = bch2_journal_flush_device_pins(&c->journal, -1); |
781 | ||
782 | ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret; | |
7ef2a73a | 783 | |
00b8ccf7 KO |
784 | closure_wait_event(&c->btree_interior_update_wait, |
785 | !bch2_btree_interior_updates_nr_pending(c)); | |
7ef2a73a | 786 | |
ae0ff7b8 | 787 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
788 | |
789 | ret = bch2_move_data(c, NULL, | |
790 | writepoint_hashed((unsigned long) current), | |
791 | op.start, | |
792 | op.end, | |
793 | rereplicate_pred, c, stats) ?: ret; | |
ae0ff7b8 | 794 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
795 | break; |
796 | case BCH_DATA_OP_MIGRATE: | |
797 | if (op.migrate.dev >= c->sb.nr_devices) | |
798 | return -EINVAL; | |
799 | ||
89fd25be | 800 | stats->data_type = BCH_DATA_journal; |
1c6fdbd8 KO |
801 | ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); |
802 | ||
803 | ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret; | |
ae0ff7b8 | 804 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
805 | |
806 | ret = bch2_move_data(c, NULL, | |
807 | writepoint_hashed((unsigned long) current), | |
808 | op.start, | |
809 | op.end, | |
810 | migrate_pred, &op, stats) ?: ret; | |
ae0ff7b8 | 811 | ret = bch2_replicas_gc2(c) ?: ret; |
1c6fdbd8 KO |
812 | break; |
813 | default: | |
814 | ret = -EINVAL; | |
815 | } | |
816 | ||
817 | return ret; | |
818 | } |