1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is 0 ("writethrough") */
20 static const char * const bch_cache_modes[] = {
28 /* Default is 0 ("auto") */
29 static const char * const bch_stop_on_failure_modes[] = {
35 static const char * const cache_replacement_policies[] = {
42 static const char * const error_actions[] = {
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 read_attribute(backing_dev_name);
71 read_attribute(backing_dev_uuid);
73 sysfs_time_stats_attribute(btree_gc, sec, ms);
74 sysfs_time_stats_attribute(btree_split, sec, us);
75 sysfs_time_stats_attribute(btree_sort, ms, us);
76 sysfs_time_stats_attribute(btree_read, ms, us);
78 read_attribute(btree_nodes);
79 read_attribute(btree_used_percent);
80 read_attribute(average_key_size);
81 read_attribute(dirty_data);
82 read_attribute(bset_tree_stats);
84 read_attribute(state);
85 read_attribute(cache_read_races);
86 read_attribute(reclaim);
87 read_attribute(flush_write);
88 read_attribute(retry_flush_write);
89 read_attribute(writeback_keys_done);
90 read_attribute(writeback_keys_failed);
91 read_attribute(io_errors);
92 read_attribute(congested);
93 read_attribute(cutoff_writeback);
94 read_attribute(cutoff_writeback_sync);
95 rw_attribute(congested_read_threshold_us);
96 rw_attribute(congested_write_threshold_us);
98 rw_attribute(sequential_cutoff);
99 rw_attribute(data_csum);
100 rw_attribute(cache_mode);
101 rw_attribute(stop_when_cache_set_failed);
102 rw_attribute(writeback_metadata);
103 rw_attribute(writeback_running);
104 rw_attribute(writeback_percent);
105 rw_attribute(writeback_delay);
106 rw_attribute(writeback_rate);
108 rw_attribute(writeback_rate_update_seconds);
109 rw_attribute(writeback_rate_i_term_inverse);
110 rw_attribute(writeback_rate_p_term_inverse);
111 rw_attribute(writeback_rate_minimum);
112 read_attribute(writeback_rate_debug);
114 read_attribute(stripe_size);
115 read_attribute(partial_stripes_expensive);
117 rw_attribute(synchronous);
118 rw_attribute(journal_delay_ms);
119 rw_attribute(io_disable);
120 rw_attribute(discard);
121 rw_attribute(running);
123 rw_attribute(readahead);
124 rw_attribute(errors);
125 rw_attribute(io_error_limit);
126 rw_attribute(io_error_halflife);
127 rw_attribute(verify);
128 rw_attribute(bypass_torture_test);
129 rw_attribute(key_merging_disabled);
130 rw_attribute(gc_always_rewrite);
131 rw_attribute(expensive_debug_checks);
132 rw_attribute(cache_replacement_policy);
133 rw_attribute(btree_shrinker_disabled);
134 rw_attribute(copy_gc_enabled);
135 rw_attribute(gc_after_writeback);
138 static ssize_t bch_snprint_string_list(char *buf,
140 const char * const list[],
146 for (i = 0; list[i]; i++)
147 out += snprintf(out, buf + size - out,
148 i == selected ? "[%s] " : "%s ", list[i]);
154 SHOW(__bch_cached_dev)
156 struct cached_dev *dc = container_of(kobj, struct cached_dev,
158 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
159 int wb = dc->writeback_running;
161 #define var(stat) (dc->stat)
163 if (attr == &sysfs_cache_mode)
164 return bch_snprint_string_list(buf, PAGE_SIZE,
166 BDEV_CACHE_MODE(&dc->sb));
168 if (attr == &sysfs_stop_when_cache_set_failed)
169 return bch_snprint_string_list(buf, PAGE_SIZE,
170 bch_stop_on_failure_modes,
171 dc->stop_when_cache_set_failed);
174 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
175 var_printf(verify, "%i");
176 var_printf(bypass_torture_test, "%i");
177 var_printf(writeback_metadata, "%i");
178 var_printf(writeback_running, "%i");
179 var_print(writeback_delay);
180 var_print(writeback_percent);
181 sysfs_hprint(writeback_rate,
182 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
183 sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
184 sysfs_printf(io_error_limit, "%i", dc->error_limit);
185 sysfs_printf(io_disable, "%i", dc->io_disable);
186 var_print(writeback_rate_update_seconds);
187 var_print(writeback_rate_i_term_inverse);
188 var_print(writeback_rate_p_term_inverse);
189 var_print(writeback_rate_minimum);
191 if (attr == &sysfs_writeback_rate_debug) {
195 char proportional[20];
201 * Except for dirty and target, other values should
202 * be 0 if writeback is not running.
205 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
207 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
208 bch_hprint(target, dc->writeback_rate_target << 9);
209 bch_hprint(proportional,
210 wb ? dc->writeback_rate_proportional << 9 : 0);
212 wb ? dc->writeback_rate_integral_scaled << 9 : 0);
213 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
214 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
221 "proportional:\t%s\n"
223 "change:\t\t%s/sec\n"
224 "next io:\t%llims\n",
225 rate, dirty, target, proportional,
226 integral, change, next_io);
229 sysfs_hprint(dirty_data,
230 bcache_dev_sectors_dirty(&dc->disk) << 9);
232 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
233 var_printf(partial_stripes_expensive, "%u");
235 var_hprint(sequential_cutoff);
236 var_hprint(readahead);
238 sysfs_print(running, atomic_read(&dc->running));
239 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
241 if (attr == &sysfs_label) {
242 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
243 buf[SB_LABEL_SIZE + 1] = '\0';
248 if (attr == &sysfs_backing_dev_name) {
249 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
254 if (attr == &sysfs_backing_dev_uuid) {
255 /* convert binary uuid into 36-byte string plus '\0' */
256 snprintf(buf, 36+1, "%pU", dc->sb.uuid);
264 SHOW_LOCKED(bch_cached_dev)
268 struct cached_dev *dc = container_of(kobj, struct cached_dev,
272 struct kobj_uevent_env *env;
274 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
275 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
276 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
278 sysfs_strtoul(data_csum, dc->disk.data_csum);
280 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
281 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
282 sysfs_strtoul_bool(writeback_running, dc->writeback_running);
283 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
285 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
286 0, bch_cutoff_writeback);
288 if (attr == &sysfs_writeback_rate) {
290 long int v = atomic_long_read(&dc->writeback_rate.rate);
292 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
295 atomic_long_set(&dc->writeback_rate.rate, v);
302 sysfs_strtoul_clamp(writeback_rate_update_seconds,
303 dc->writeback_rate_update_seconds,
304 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
305 sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
306 dc->writeback_rate_i_term_inverse,
308 d_strtoul_nonzero(writeback_rate_p_term_inverse);
309 d_strtoul_nonzero(writeback_rate_minimum);
311 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
313 if (attr == &sysfs_io_disable) {
314 int v = strtoul_or_return(buf);
316 dc->io_disable = v ? 1 : 0;
319 sysfs_strtoul_clamp(sequential_cutoff,
320 dc->sequential_cutoff,
322 d_strtoi_h(readahead);
324 if (attr == &sysfs_clear_stats)
325 bch_cache_accounting_clear(&dc->accounting);
327 if (attr == &sysfs_running &&
328 strtoul_or_return(buf))
329 bch_cached_dev_run(dc);
331 if (attr == &sysfs_cache_mode) {
332 v = __sysfs_match_string(bch_cache_modes, -1, buf);
336 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
337 SET_BDEV_CACHE_MODE(&dc->sb, v);
338 bch_write_bdev_super(dc, NULL);
342 if (attr == &sysfs_stop_when_cache_set_failed) {
343 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
347 dc->stop_when_cache_set_failed = v;
350 if (attr == &sysfs_label) {
351 if (size > SB_LABEL_SIZE)
353 memcpy(dc->sb.label, buf, size);
354 if (size < SB_LABEL_SIZE)
355 dc->sb.label[size] = '\0';
356 if (size && dc->sb.label[size - 1] == '\n')
357 dc->sb.label[size - 1] = '\0';
358 bch_write_bdev_super(dc, NULL);
360 memcpy(dc->disk.c->uuids[dc->disk.id].label,
362 bch_uuid_write(dc->disk.c);
364 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
367 add_uevent_var(env, "DRIVER=bcache");
368 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
369 add_uevent_var(env, "CACHED_LABEL=%s", buf);
370 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
376 if (attr == &sysfs_attach) {
377 uint8_t set_uuid[16];
379 if (bch_parse_uuid(buf, set_uuid) < 16)
383 list_for_each_entry(c, &bch_cache_sets, list) {
384 v = bch_cached_dev_attach(dc, c, set_uuid);
389 pr_err("Can't attach %s: cache set not found", buf);
393 if (attr == &sysfs_detach && dc->disk.c)
394 bch_cached_dev_detach(dc);
396 if (attr == &sysfs_stop)
397 bcache_device_stop(&dc->disk);
402 STORE(bch_cached_dev)
404 struct cached_dev *dc = container_of(kobj, struct cached_dev,
407 mutex_lock(&bch_register_lock);
408 size = __cached_dev_store(kobj, attr, buf, size);
410 if (attr == &sysfs_writeback_running) {
411 /* dc->writeback_running changed in __cached_dev_store() */
412 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
414 * reject setting it to 1 via sysfs if writeback
415 * kthread is not created yet.
417 if (dc->writeback_running) {
418 dc->writeback_running = false;
419 pr_err("%s: failed to run non-existent writeback thread",
420 dc->disk.disk->disk_name);
424 * writeback kthread will check if dc->writeback_running
427 bch_writeback_queue(dc);
430 if (attr == &sysfs_writeback_percent)
431 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
432 schedule_delayed_work(&dc->writeback_rate_update,
433 dc->writeback_rate_update_seconds * HZ);
435 mutex_unlock(&bch_register_lock);
439 static struct attribute *bch_cached_dev_files[] = {
447 &sysfs_stop_when_cache_set_failed,
448 &sysfs_writeback_metadata,
449 &sysfs_writeback_running,
450 &sysfs_writeback_delay,
451 &sysfs_writeback_percent,
452 &sysfs_writeback_rate,
453 &sysfs_writeback_rate_update_seconds,
454 &sysfs_writeback_rate_i_term_inverse,
455 &sysfs_writeback_rate_p_term_inverse,
456 &sysfs_writeback_rate_minimum,
457 &sysfs_writeback_rate_debug,
459 &sysfs_io_error_limit,
463 &sysfs_partial_stripes_expensive,
464 &sysfs_sequential_cutoff,
470 #ifdef CONFIG_BCACHE_DEBUG
472 &sysfs_bypass_torture_test,
474 &sysfs_backing_dev_name,
475 &sysfs_backing_dev_uuid,
478 KTYPE(bch_cached_dev);
482 struct bcache_device *d = container_of(kobj, struct bcache_device,
484 struct uuid_entry *u = &d->c->uuids[d->id];
486 sysfs_printf(data_csum, "%i", d->data_csum);
487 sysfs_hprint(size, u->sectors << 9);
489 if (attr == &sysfs_label) {
490 memcpy(buf, u->label, SB_LABEL_SIZE);
491 buf[SB_LABEL_SIZE + 1] = '\0';
499 STORE(__bch_flash_dev)
501 struct bcache_device *d = container_of(kobj, struct bcache_device,
503 struct uuid_entry *u = &d->c->uuids[d->id];
505 sysfs_strtoul(data_csum, d->data_csum);
507 if (attr == &sysfs_size) {
510 strtoi_h_or_return(buf, v);
513 bch_uuid_write(d->c);
514 set_capacity(d->disk, u->sectors);
517 if (attr == &sysfs_label) {
518 memcpy(u->label, buf, SB_LABEL_SIZE);
519 bch_uuid_write(d->c);
522 if (attr == &sysfs_unregister) {
523 set_bit(BCACHE_DEV_DETACHING, &d->flags);
524 bcache_device_stop(d);
529 STORE_LOCKED(bch_flash_dev)
531 static struct attribute *bch_flash_dev_files[] = {
540 KTYPE(bch_flash_dev);
542 struct bset_stats_op {
545 struct bset_stats stats;
548 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
550 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
553 bch_btree_keys_stats(&b->keys, &op->stats);
558 static int bch_bset_print_stats(struct cache_set *c, char *buf)
560 struct bset_stats_op op;
563 memset(&op, 0, sizeof(op));
564 bch_btree_op_init(&op.op, -1);
566 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
570 return snprintf(buf, PAGE_SIZE,
572 "written sets: %zu\n"
573 "unwritten sets: %zu\n"
574 "written key bytes: %zu\n"
575 "unwritten key bytes: %zu\n"
579 op.stats.sets_written, op.stats.sets_unwritten,
580 op.stats.bytes_written, op.stats.bytes_unwritten,
581 op.stats.floats, op.stats.failed);
584 static unsigned int bch_root_usage(struct cache_set *c)
586 unsigned int bytes = 0;
589 struct btree_iter iter;
597 rw_lock(false, b, b->level);
598 } while (b != c->root);
600 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
601 bytes += bkey_bytes(k);
605 return (bytes * 100) / btree_bytes(c);
608 static size_t bch_cache_size(struct cache_set *c)
613 mutex_lock(&c->bucket_lock);
614 list_for_each_entry(b, &c->btree_cache, list)
615 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
617 mutex_unlock(&c->bucket_lock);
621 static unsigned int bch_cache_max_chain(struct cache_set *c)
623 unsigned int ret = 0;
624 struct hlist_head *h;
626 mutex_lock(&c->bucket_lock);
628 for (h = c->bucket_hash;
629 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
632 struct hlist_node *p;
640 mutex_unlock(&c->bucket_lock);
644 static unsigned int bch_btree_used(struct cache_set *c)
646 return div64_u64(c->gc_stats.key_bytes * 100,
647 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
650 static unsigned int bch_average_key_size(struct cache_set *c)
652 return c->gc_stats.nkeys
653 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
657 SHOW(__bch_cache_set)
659 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
661 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
662 sysfs_print(journal_delay_ms, c->journal_delay_ms);
663 sysfs_hprint(bucket_size, bucket_bytes(c));
664 sysfs_hprint(block_size, block_bytes(c));
665 sysfs_print(tree_depth, c->root->level);
666 sysfs_print(root_usage_percent, bch_root_usage(c));
668 sysfs_hprint(btree_cache_size, bch_cache_size(c));
669 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
670 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
672 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
673 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
674 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
675 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
677 sysfs_print(btree_used_percent, bch_btree_used(c));
678 sysfs_print(btree_nodes, c->gc_stats.nodes);
679 sysfs_hprint(average_key_size, bch_average_key_size(c));
681 sysfs_print(cache_read_races,
682 atomic_long_read(&c->cache_read_races));
685 atomic_long_read(&c->reclaim));
687 sysfs_print(flush_write,
688 atomic_long_read(&c->flush_write));
690 sysfs_print(retry_flush_write,
691 atomic_long_read(&c->retry_flush_write));
693 sysfs_print(writeback_keys_done,
694 atomic_long_read(&c->writeback_keys_done));
695 sysfs_print(writeback_keys_failed,
696 atomic_long_read(&c->writeback_keys_failed));
698 if (attr == &sysfs_errors)
699 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
702 /* See count_io_errors for why 88 */
703 sysfs_print(io_error_halflife, c->error_decay * 88);
704 sysfs_print(io_error_limit, c->error_limit);
706 sysfs_hprint(congested,
707 ((uint64_t) bch_get_congested(c)) << 9);
708 sysfs_print(congested_read_threshold_us,
709 c->congested_read_threshold_us);
710 sysfs_print(congested_write_threshold_us,
711 c->congested_write_threshold_us);
713 sysfs_print(cutoff_writeback, bch_cutoff_writeback);
714 sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
716 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
717 sysfs_printf(verify, "%i", c->verify);
718 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
719 sysfs_printf(expensive_debug_checks,
720 "%i", c->expensive_debug_checks);
721 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
722 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
723 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
724 sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
725 sysfs_printf(io_disable, "%i",
726 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
728 if (attr == &sysfs_bset_tree_stats)
729 return bch_bset_print_stats(c, buf);
733 SHOW_LOCKED(bch_cache_set)
735 STORE(__bch_cache_set)
737 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
740 if (attr == &sysfs_unregister)
741 bch_cache_set_unregister(c);
743 if (attr == &sysfs_stop)
744 bch_cache_set_stop(c);
746 if (attr == &sysfs_synchronous) {
747 bool sync = strtoul_or_return(buf);
749 if (sync != CACHE_SYNC(&c->sb)) {
750 SET_CACHE_SYNC(&c->sb, sync);
751 bcache_write_super(c);
755 if (attr == &sysfs_flash_vol_create) {
759 strtoi_h_or_return(buf, v);
761 r = bch_flash_dev_create(c, v);
766 if (attr == &sysfs_clear_stats) {
767 atomic_long_set(&c->writeback_keys_done, 0);
768 atomic_long_set(&c->writeback_keys_failed, 0);
770 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
771 bch_cache_accounting_clear(&c->accounting);
774 if (attr == &sysfs_trigger_gc)
777 if (attr == &sysfs_prune_cache) {
778 struct shrink_control sc;
780 sc.gfp_mask = GFP_KERNEL;
781 sc.nr_to_scan = strtoul_or_return(buf);
782 c->shrink.scan_objects(&c->shrink, &sc);
785 sysfs_strtoul_clamp(congested_read_threshold_us,
786 c->congested_read_threshold_us,
788 sysfs_strtoul_clamp(congested_write_threshold_us,
789 c->congested_write_threshold_us,
792 if (attr == &sysfs_errors) {
793 v = __sysfs_match_string(error_actions, -1, buf);
800 if (attr == &sysfs_io_error_limit)
801 c->error_limit = strtoul_or_return(buf);
803 /* See count_io_errors() for why 88 */
804 if (attr == &sysfs_io_error_halflife)
805 c->error_decay = strtoul_or_return(buf) / 88;
807 if (attr == &sysfs_io_disable) {
808 v = strtoul_or_return(buf);
810 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
812 pr_warn("CACHE_SET_IO_DISABLE already set");
814 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
816 pr_warn("CACHE_SET_IO_DISABLE already cleared");
820 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
821 sysfs_strtoul_bool(verify, c->verify);
822 sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
823 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
824 sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
825 sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
826 sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
828 * write gc_after_writeback here may overwrite an already set
829 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
830 * set in next chance.
832 sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
836 STORE_LOCKED(bch_cache_set)
838 SHOW(bch_cache_set_internal)
840 struct cache_set *c = container_of(kobj, struct cache_set, internal);
842 return bch_cache_set_show(&c->kobj, attr, buf);
845 STORE(bch_cache_set_internal)
847 struct cache_set *c = container_of(kobj, struct cache_set, internal);
849 return bch_cache_set_store(&c->kobj, attr, buf, size);
852 static void bch_cache_set_internal_release(struct kobject *k)
856 static struct attribute *bch_cache_set_files[] = {
860 &sysfs_journal_delay_ms,
861 &sysfs_flash_vol_create,
866 &sysfs_root_usage_percent,
867 &sysfs_btree_cache_size,
868 &sysfs_cache_available_percent,
870 &sysfs_average_key_size,
873 &sysfs_io_error_limit,
874 &sysfs_io_error_halflife,
876 &sysfs_congested_read_threshold_us,
877 &sysfs_congested_write_threshold_us,
881 KTYPE(bch_cache_set);
883 static struct attribute *bch_cache_set_internal_files[] = {
884 &sysfs_active_journal_entries,
886 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
887 sysfs_time_stats_attribute_list(btree_split, sec, us)
888 sysfs_time_stats_attribute_list(btree_sort, ms, us)
889 sysfs_time_stats_attribute_list(btree_read, ms, us)
892 &sysfs_btree_used_percent,
893 &sysfs_btree_cache_max_chain,
895 &sysfs_bset_tree_stats,
896 &sysfs_cache_read_races,
899 &sysfs_retry_flush_write,
900 &sysfs_writeback_keys_done,
901 &sysfs_writeback_keys_failed,
905 #ifdef CONFIG_BCACHE_DEBUG
907 &sysfs_key_merging_disabled,
908 &sysfs_expensive_debug_checks,
910 &sysfs_gc_always_rewrite,
911 &sysfs_btree_shrinker_disabled,
912 &sysfs_copy_gc_enabled,
913 &sysfs_gc_after_writeback,
915 &sysfs_cutoff_writeback,
916 &sysfs_cutoff_writeback_sync,
919 KTYPE(bch_cache_set_internal);
921 static int __bch_cache_cmp(const void *l, const void *r)
923 return *((uint16_t *)r) - *((uint16_t *)l);
928 struct cache *ca = container_of(kobj, struct cache, kobj);
930 sysfs_hprint(bucket_size, bucket_bytes(ca));
931 sysfs_hprint(block_size, block_bytes(ca));
932 sysfs_print(nbuckets, ca->sb.nbuckets);
933 sysfs_print(discard, ca->discard);
934 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
935 sysfs_hprint(btree_written,
936 atomic_long_read(&ca->btree_sectors_written) << 9);
937 sysfs_hprint(metadata_written,
938 (atomic_long_read(&ca->meta_sectors_written) +
939 atomic_long_read(&ca->btree_sectors_written)) << 9);
941 sysfs_print(io_errors,
942 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
944 if (attr == &sysfs_cache_replacement_policy)
945 return bch_snprint_string_list(buf, PAGE_SIZE,
946 cache_replacement_policies,
947 CACHE_REPLACEMENT(&ca->sb));
949 if (attr == &sysfs_priority_stats) {
951 size_t n = ca->sb.nbuckets, i;
952 size_t unused = 0, available = 0, dirty = 0, meta = 0;
954 /* Compute 31 quantiles */
955 uint16_t q[31], *p, *cached;
958 cached = p = vmalloc(array_size(sizeof(uint16_t),
963 mutex_lock(&ca->set->bucket_lock);
964 for_each_bucket(b, ca) {
965 if (!GC_SECTORS_USED(b))
967 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
969 if (GC_MARK(b) == GC_MARK_DIRTY)
971 if (GC_MARK(b) == GC_MARK_METADATA)
975 for (i = ca->sb.first_bucket; i < n; i++)
976 p[i] = ca->buckets[i].prio;
977 mutex_unlock(&ca->set->bucket_lock);
979 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
985 unused = ca->sb.nbuckets - n;
987 while (cached < p + n &&
988 *cached == BTREE_PRIO)
991 for (i = 0; i < n; i++)
992 sum += INITIAL_PRIO - cached[i];
997 for (i = 0; i < ARRAY_SIZE(q); i++)
998 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
999 (ARRAY_SIZE(q) + 1)];
1003 ret = scnprintf(buf, PAGE_SIZE,
1009 "Sectors per Q: %zu\n"
1011 unused * 100 / (size_t) ca->sb.nbuckets,
1012 available * 100 / (size_t) ca->sb.nbuckets,
1013 dirty * 100 / (size_t) ca->sb.nbuckets,
1014 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1015 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1017 for (i = 0; i < ARRAY_SIZE(q); i++)
1018 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1022 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1029 SHOW_LOCKED(bch_cache)
1033 struct cache *ca = container_of(kobj, struct cache, kobj);
1036 if (attr == &sysfs_discard) {
1037 bool v = strtoul_or_return(buf);
1039 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1042 if (v != CACHE_DISCARD(&ca->sb)) {
1043 SET_CACHE_DISCARD(&ca->sb, v);
1044 bcache_write_super(ca->set);
1048 if (attr == &sysfs_cache_replacement_policy) {
1049 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1053 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1054 mutex_lock(&ca->set->bucket_lock);
1055 SET_CACHE_REPLACEMENT(&ca->sb, v);
1056 mutex_unlock(&ca->set->bucket_lock);
1058 bcache_write_super(ca->set);
1062 if (attr == &sysfs_clear_stats) {
1063 atomic_long_set(&ca->sectors_written, 0);
1064 atomic_long_set(&ca->btree_sectors_written, 0);
1065 atomic_long_set(&ca->meta_sectors_written, 0);
1066 atomic_set(&ca->io_count, 0);
1067 atomic_set(&ca->io_errors, 0);
1072 STORE_LOCKED(bch_cache)
1074 static struct attribute *bch_cache_files[] = {
1078 &sysfs_priority_stats,
1081 &sysfs_btree_written,
1082 &sysfs_metadata_written,
1085 &sysfs_cache_replacement_policy,