2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
29 #include "transaction.h"
30 #include "dev-replace.h"
35 * This is the implementation for the generic read ahead framework.
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
58 #define MAX_IN_FLIGHT 6
61 struct list_head list;
62 struct reada_control *rc;
69 struct list_head extctl;
72 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
81 struct list_head list;
84 struct btrfs_device *device;
85 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
91 struct reada_machine_work {
92 struct btrfs_work work;
93 struct btrfs_fs_info *fs_info;
96 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
97 static void reada_control_release(struct kref *kref);
98 static void reada_zone_release(struct kref *kref);
99 static void reada_start_machine(struct btrfs_fs_info *fs_info);
100 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102 static int reada_add_block(struct reada_control *rc, u64 logical,
103 struct btrfs_key *top, u64 generation);
106 /* in case of err, eb might be NULL */
107 static void __readahead_hook(struct btrfs_fs_info *fs_info,
108 struct reada_extent *re, struct extent_buffer *eb,
115 struct list_head list;
117 spin_lock(&re->lock);
119 * just take the full list from the extent. afterwards we
120 * don't need the lock anymore
122 list_replace_init(&re->extctl, &list);
124 spin_unlock(&re->lock);
127 * this is the error case, the extent buffer has not been
128 * read correctly. We won't access anything from it and
129 * just cleanup our data structures. Effectively this will
130 * cut the branch below this node from read ahead.
136 * FIXME: currently we just set nritems to 0 if this is a leaf,
137 * effectively ignoring the content. In a next step we could
138 * trigger more readahead depending from the content, e.g.
139 * fetch the checksums for the extents in the leaf.
141 if (!btrfs_header_level(eb))
144 nritems = btrfs_header_nritems(eb);
145 generation = btrfs_header_generation(eb);
146 for (i = 0; i < nritems; i++) {
147 struct reada_extctl *rec;
149 struct btrfs_key key;
150 struct btrfs_key next_key;
152 btrfs_node_key_to_cpu(eb, &key, i);
154 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
157 bytenr = btrfs_node_blockptr(eb, i);
158 n_gen = btrfs_node_ptr_generation(eb, i);
160 list_for_each_entry(rec, &list, list) {
161 struct reada_control *rc = rec->rc;
164 * if the generation doesn't match, just ignore this
165 * extctl. This will probably cut off a branch from
166 * prefetch. Alternatively one could start a new (sub-)
167 * prefetch for this branch, starting again from root.
168 * FIXME: move the generation check out of this loop
171 if (rec->generation != generation) {
173 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
174 key.objectid, key.type, key.offset,
175 rec->generation, generation);
178 if (rec->generation == generation &&
179 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
180 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
181 reada_add_block(rc, bytenr, &next_key, n_gen);
187 * free extctl records
189 while (!list_empty(&list)) {
190 struct reada_control *rc;
191 struct reada_extctl *rec;
193 rec = list_first_entry(&list, struct reada_extctl, list);
194 list_del(&rec->list);
198 kref_get(&rc->refcnt);
199 if (atomic_dec_and_test(&rc->elems)) {
200 kref_put(&rc->refcnt, reada_control_release);
203 kref_put(&rc->refcnt, reada_control_release);
205 reada_extent_put(fs_info, re); /* one ref for each entry */
211 int btree_readahead_hook(struct extent_buffer *eb, int err)
213 struct btrfs_fs_info *fs_info = eb->fs_info;
215 struct reada_extent *re;
218 spin_lock(&fs_info->reada_lock);
219 re = radix_tree_lookup(&fs_info->reada_tree,
220 eb->start >> PAGE_SHIFT);
223 spin_unlock(&fs_info->reada_lock);
229 __readahead_hook(fs_info, re, eb, err);
230 reada_extent_put(fs_info, re); /* our ref */
233 reada_start_machine(fs_info);
237 static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
238 struct btrfs_bio *bbio)
240 struct btrfs_fs_info *fs_info = dev->fs_info;
242 struct reada_zone *zone;
243 struct btrfs_block_group_cache *cache = NULL;
249 spin_lock(&fs_info->reada_lock);
250 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
251 logical >> PAGE_SHIFT, 1);
252 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
253 kref_get(&zone->refcnt);
254 spin_unlock(&fs_info->reada_lock);
258 spin_unlock(&fs_info->reada_lock);
260 cache = btrfs_lookup_block_group(fs_info, logical);
264 start = cache->key.objectid;
265 end = start + cache->key.offset - 1;
266 btrfs_put_block_group(cache);
268 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
272 ret = radix_tree_preload(GFP_KERNEL);
280 INIT_LIST_HEAD(&zone->list);
281 spin_lock_init(&zone->lock);
283 kref_init(&zone->refcnt);
285 zone->device = dev; /* our device always sits at index 0 */
286 for (i = 0; i < bbio->num_stripes; ++i) {
287 /* bounds have already been checked */
288 zone->devs[i] = bbio->stripes[i].dev;
290 zone->ndevs = bbio->num_stripes;
292 spin_lock(&fs_info->reada_lock);
293 ret = radix_tree_insert(&dev->reada_zones,
294 (unsigned long)(zone->end >> PAGE_SHIFT),
297 if (ret == -EEXIST) {
299 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
300 logical >> PAGE_SHIFT, 1);
301 if (ret == 1 && logical >= zone->start && logical <= zone->end)
302 kref_get(&zone->refcnt);
306 spin_unlock(&fs_info->reada_lock);
307 radix_tree_preload_end();
312 static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
314 struct btrfs_key *top)
317 struct reada_extent *re = NULL;
318 struct reada_extent *re_exist = NULL;
319 struct btrfs_bio *bbio = NULL;
320 struct btrfs_device *dev;
321 struct btrfs_device *prev_dev;
325 unsigned long index = logical >> PAGE_SHIFT;
326 int dev_replace_is_ongoing;
329 spin_lock(&fs_info->reada_lock);
330 re = radix_tree_lookup(&fs_info->reada_tree, index);
333 spin_unlock(&fs_info->reada_lock);
338 re = kzalloc(sizeof(*re), GFP_KERNEL);
342 re->logical = logical;
344 INIT_LIST_HEAD(&re->extctl);
345 spin_lock_init(&re->lock);
351 length = fs_info->nodesize;
352 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
354 if (ret || !bbio || length < fs_info->nodesize)
357 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
359 "readahead: more than %d copies not supported",
364 real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
365 for (nzones = 0; nzones < real_stripes; ++nzones) {
366 struct reada_zone *zone;
368 dev = bbio->stripes[nzones].dev;
370 /* cannot read ahead on missing device. */
374 zone = reada_find_zone(dev, logical, bbio);
378 re->zones[re->nzones++] = zone;
379 spin_lock(&zone->lock);
381 kref_get(&zone->refcnt);
383 spin_unlock(&zone->lock);
384 spin_lock(&fs_info->reada_lock);
385 kref_put(&zone->refcnt, reada_zone_release);
386 spin_unlock(&fs_info->reada_lock);
388 if (re->nzones == 0) {
389 /* not a single zone found, error and out */
393 ret = radix_tree_preload(GFP_KERNEL);
397 /* insert extent in reada_tree + all per-device trees, all or nothing */
398 btrfs_dev_replace_read_lock(&fs_info->dev_replace);
399 spin_lock(&fs_info->reada_lock);
400 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
401 if (ret == -EEXIST) {
402 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
404 spin_unlock(&fs_info->reada_lock);
405 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
406 radix_tree_preload_end();
410 spin_unlock(&fs_info->reada_lock);
411 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
412 radix_tree_preload_end();
415 radix_tree_preload_end();
417 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
418 &fs_info->dev_replace);
419 for (nzones = 0; nzones < re->nzones; ++nzones) {
420 dev = re->zones[nzones]->device;
422 if (dev == prev_dev) {
424 * in case of DUP, just add the first zone. As both
425 * are on the same device, there's nothing to gain
427 * Also, it wouldn't work, as the tree is per device
428 * and adding would fail with EEXIST
435 if (dev_replace_is_ongoing &&
436 dev == fs_info->dev_replace.tgtdev) {
438 * as this device is selected for reading only as
439 * a last resort, skip it for read ahead.
444 ret = radix_tree_insert(&dev->reada_extents, index, re);
446 while (--nzones >= 0) {
447 dev = re->zones[nzones]->device;
449 /* ignore whether the entry was inserted */
450 radix_tree_delete(&dev->reada_extents, index);
452 radix_tree_delete(&fs_info->reada_tree, index);
453 spin_unlock(&fs_info->reada_lock);
454 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
459 spin_unlock(&fs_info->reada_lock);
460 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
465 btrfs_put_bbio(bbio);
469 for (nzones = 0; nzones < re->nzones; ++nzones) {
470 struct reada_zone *zone;
472 zone = re->zones[nzones];
473 kref_get(&zone->refcnt);
474 spin_lock(&zone->lock);
476 if (zone->elems == 0) {
478 * no fs_info->reada_lock needed, as this can't be
481 kref_put(&zone->refcnt, reada_zone_release);
483 spin_unlock(&zone->lock);
485 spin_lock(&fs_info->reada_lock);
486 kref_put(&zone->refcnt, reada_zone_release);
487 spin_unlock(&fs_info->reada_lock);
489 btrfs_put_bbio(bbio);
494 static void reada_extent_put(struct btrfs_fs_info *fs_info,
495 struct reada_extent *re)
498 unsigned long index = re->logical >> PAGE_SHIFT;
500 spin_lock(&fs_info->reada_lock);
502 spin_unlock(&fs_info->reada_lock);
506 radix_tree_delete(&fs_info->reada_tree, index);
507 for (i = 0; i < re->nzones; ++i) {
508 struct reada_zone *zone = re->zones[i];
510 radix_tree_delete(&zone->device->reada_extents, index);
513 spin_unlock(&fs_info->reada_lock);
515 for (i = 0; i < re->nzones; ++i) {
516 struct reada_zone *zone = re->zones[i];
518 kref_get(&zone->refcnt);
519 spin_lock(&zone->lock);
521 if (zone->elems == 0) {
522 /* no fs_info->reada_lock needed, as this can't be
524 kref_put(&zone->refcnt, reada_zone_release);
526 spin_unlock(&zone->lock);
528 spin_lock(&fs_info->reada_lock);
529 kref_put(&zone->refcnt, reada_zone_release);
530 spin_unlock(&fs_info->reada_lock);
536 static void reada_zone_release(struct kref *kref)
538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
540 radix_tree_delete(&zone->device->reada_zones,
541 zone->end >> PAGE_SHIFT);
546 static void reada_control_release(struct kref *kref)
548 struct reada_control *rc = container_of(kref, struct reada_control,
554 static int reada_add_block(struct reada_control *rc, u64 logical,
555 struct btrfs_key *top, u64 generation)
557 struct btrfs_fs_info *fs_info = rc->fs_info;
558 struct reada_extent *re;
559 struct reada_extctl *rec;
562 re = reada_find_extent(fs_info, logical, top);
566 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
568 reada_extent_put(fs_info, re);
573 rec->generation = generation;
574 atomic_inc(&rc->elems);
576 spin_lock(&re->lock);
577 list_add_tail(&rec->list, &re->extctl);
578 spin_unlock(&re->lock);
580 /* leave the ref on the extent */
586 * called with fs_info->reada_lock held
588 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
591 unsigned long index = zone->end >> PAGE_SHIFT;
593 for (i = 0; i < zone->ndevs; ++i) {
594 struct reada_zone *peer;
595 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
596 if (peer && peer->device != zone->device)
602 * called with fs_info->reada_lock held
604 static int reada_pick_zone(struct btrfs_device *dev)
606 struct reada_zone *top_zone = NULL;
607 struct reada_zone *top_locked_zone = NULL;
609 u64 top_locked_elems = 0;
610 unsigned long index = 0;
613 if (dev->reada_curr_zone) {
614 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
615 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
616 dev->reada_curr_zone = NULL;
618 /* pick the zone with the most elements */
620 struct reada_zone *zone;
622 ret = radix_tree_gang_lookup(&dev->reada_zones,
623 (void **)&zone, index, 1);
626 index = (zone->end >> PAGE_SHIFT) + 1;
628 if (zone->elems > top_locked_elems) {
629 top_locked_elems = zone->elems;
630 top_locked_zone = zone;
633 if (zone->elems > top_elems) {
634 top_elems = zone->elems;
640 dev->reada_curr_zone = top_zone;
641 else if (top_locked_zone)
642 dev->reada_curr_zone = top_locked_zone;
646 dev->reada_next = dev->reada_curr_zone->start;
647 kref_get(&dev->reada_curr_zone->refcnt);
648 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
653 static int reada_start_machine_dev(struct btrfs_device *dev)
655 struct btrfs_fs_info *fs_info = dev->fs_info;
656 struct reada_extent *re = NULL;
658 struct extent_buffer *eb = NULL;
663 spin_lock(&fs_info->reada_lock);
664 if (dev->reada_curr_zone == NULL) {
665 ret = reada_pick_zone(dev);
667 spin_unlock(&fs_info->reada_lock);
672 * FIXME currently we issue the reads one extent at a time. If we have
673 * a contiguous block of extents, we could also coagulate them or use
674 * plugging to speed things up
676 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
677 dev->reada_next >> PAGE_SHIFT, 1);
678 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
679 ret = reada_pick_zone(dev);
681 spin_unlock(&fs_info->reada_lock);
685 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
686 dev->reada_next >> PAGE_SHIFT, 1);
689 spin_unlock(&fs_info->reada_lock);
692 dev->reada_next = re->logical + fs_info->nodesize;
695 spin_unlock(&fs_info->reada_lock);
697 spin_lock(&re->lock);
698 if (re->scheduled || list_empty(&re->extctl)) {
699 spin_unlock(&re->lock);
700 reada_extent_put(fs_info, re);
704 spin_unlock(&re->lock);
709 for (i = 0; i < re->nzones; ++i) {
710 if (re->zones[i]->device == dev) {
715 logical = re->logical;
717 atomic_inc(&dev->reada_in_flight);
718 ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
720 __readahead_hook(fs_info, re, NULL, ret);
722 __readahead_hook(fs_info, re, eb, ret);
725 free_extent_buffer(eb);
727 atomic_dec(&dev->reada_in_flight);
728 reada_extent_put(fs_info, re);
734 static void reada_start_machine_worker(struct btrfs_work *work)
736 struct reada_machine_work *rmw;
737 struct btrfs_fs_info *fs_info;
740 rmw = container_of(work, struct reada_machine_work, work);
741 fs_info = rmw->fs_info;
745 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
746 task_nice_ioprio(current));
747 set_task_ioprio(current, BTRFS_IOPRIO_READA);
748 __reada_start_machine(fs_info);
749 set_task_ioprio(current, old_ioprio);
751 atomic_dec(&fs_info->reada_works_cnt);
754 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
756 struct btrfs_device *device;
757 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
764 mutex_lock(&fs_devices->device_list_mutex);
765 list_for_each_entry(device, &fs_devices->devices, dev_list) {
766 if (atomic_read(&device->reada_in_flight) <
768 enqueued += reada_start_machine_dev(device);
770 mutex_unlock(&fs_devices->device_list_mutex);
772 } while (enqueued && total < 10000);
778 * If everything is already in the cache, this is effectively single
779 * threaded. To a) not hold the caller for too long and b) to utilize
780 * more cores, we broke the loop above after 10000 iterations and now
781 * enqueue to workers to finish it. This will distribute the load to
784 for (i = 0; i < 2; ++i) {
785 reada_start_machine(fs_info);
786 if (atomic_read(&fs_info->reada_works_cnt) >
787 BTRFS_MAX_MIRRORS * 2)
792 static void reada_start_machine(struct btrfs_fs_info *fs_info)
794 struct reada_machine_work *rmw;
796 rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
798 /* FIXME we cannot handle this properly right now */
801 btrfs_init_work(&rmw->work, btrfs_readahead_helper,
802 reada_start_machine_worker, NULL, NULL);
803 rmw->fs_info = fs_info;
805 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
806 atomic_inc(&fs_info->reada_works_cnt);
810 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
812 struct btrfs_device *device;
813 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
820 spin_lock(&fs_info->reada_lock);
821 list_for_each_entry(device, &fs_devices->devices, dev_list) {
822 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
823 atomic_read(&device->reada_in_flight));
826 struct reada_zone *zone;
827 ret = radix_tree_gang_lookup(&device->reada_zones,
828 (void **)&zone, index, 1);
831 pr_debug(" zone %llu-%llu elems %llu locked %d devs",
832 zone->start, zone->end, zone->elems,
834 for (j = 0; j < zone->ndevs; ++j) {
836 zone->devs[j]->devid);
838 if (device->reada_curr_zone == zone)
839 pr_cont(" curr off %llu",
840 device->reada_next - zone->start);
842 index = (zone->end >> PAGE_SHIFT) + 1;
847 struct reada_extent *re = NULL;
849 ret = radix_tree_gang_lookup(&device->reada_extents,
850 (void **)&re, index, 1);
853 pr_debug(" re: logical %llu size %u empty %d scheduled %d",
854 re->logical, fs_info->nodesize,
855 list_empty(&re->extctl), re->scheduled);
857 for (i = 0; i < re->nzones; ++i) {
858 pr_cont(" zone %llu-%llu devs",
861 for (j = 0; j < re->zones[i]->ndevs; ++j) {
863 re->zones[i]->devs[j]->devid);
867 index = (re->logical >> PAGE_SHIFT) + 1;
876 struct reada_extent *re = NULL;
878 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
882 if (!re->scheduled) {
883 index = (re->logical >> PAGE_SHIFT) + 1;
886 pr_debug("re: logical %llu size %u list empty %d scheduled %d",
887 re->logical, fs_info->nodesize,
888 list_empty(&re->extctl), re->scheduled);
889 for (i = 0; i < re->nzones; ++i) {
890 pr_cont(" zone %llu-%llu devs",
893 for (j = 0; j < re->zones[i]->ndevs; ++j) {
895 re->zones[i]->devs[j]->devid);
899 index = (re->logical >> PAGE_SHIFT) + 1;
901 spin_unlock(&fs_info->reada_lock);
908 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
909 struct btrfs_key *key_start, struct btrfs_key *key_end)
911 struct reada_control *rc;
915 struct extent_buffer *node;
916 static struct btrfs_key max_key = {
922 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
924 return ERR_PTR(-ENOMEM);
926 rc->fs_info = root->fs_info;
927 rc->key_start = *key_start;
928 rc->key_end = *key_end;
929 atomic_set(&rc->elems, 0);
930 init_waitqueue_head(&rc->wait);
931 kref_init(&rc->refcnt);
932 kref_get(&rc->refcnt); /* one ref for having elements */
934 node = btrfs_root_node(root);
936 generation = btrfs_header_generation(node);
937 free_extent_buffer(node);
939 ret = reada_add_block(rc, start, &max_key, generation);
945 reada_start_machine(root->fs_info);
951 int btrfs_reada_wait(void *handle)
953 struct reada_control *rc = handle;
954 struct btrfs_fs_info *fs_info = rc->fs_info;
956 while (atomic_read(&rc->elems)) {
957 if (!atomic_read(&fs_info->reada_works_cnt))
958 reada_start_machine(fs_info);
959 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
961 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
964 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
966 kref_put(&rc->refcnt, reada_control_release);
971 int btrfs_reada_wait(void *handle)
973 struct reada_control *rc = handle;
974 struct btrfs_fs_info *fs_info = rc->fs_info;
976 while (atomic_read(&rc->elems)) {
977 if (!atomic_read(&fs_info->reada_works_cnt))
978 reada_start_machine(fs_info);
979 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
983 kref_put(&rc->refcnt, reada_control_release);
989 void btrfs_reada_detach(void *handle)
991 struct reada_control *rc = handle;
993 kref_put(&rc->refcnt, reada_control_release);