bcache: fix for data collapse after re-attaching an attached device
[linux-2.6-block.git] / drivers / md / bcache / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18
19 static const char * const cache_replacement_policies[] = {
20         "lru",
21         "fifo",
22         "random",
23         NULL
24 };
25
26 static const char * const error_actions[] = {
27         "unregister",
28         "panic",
29         NULL
30 };
31
32 write_attribute(attach);
33 write_attribute(detach);
34 write_attribute(unregister);
35 write_attribute(stop);
36 write_attribute(clear_stats);
37 write_attribute(trigger_gc);
38 write_attribute(prune_cache);
39 write_attribute(flash_vol_create);
40
41 read_attribute(bucket_size);
42 read_attribute(block_size);
43 read_attribute(nbuckets);
44 read_attribute(tree_depth);
45 read_attribute(root_usage_percent);
46 read_attribute(priority_stats);
47 read_attribute(btree_cache_size);
48 read_attribute(btree_cache_max_chain);
49 read_attribute(cache_available_percent);
50 read_attribute(written);
51 read_attribute(btree_written);
52 read_attribute(metadata_written);
53 read_attribute(active_journal_entries);
54
55 sysfs_time_stats_attribute(btree_gc,    sec, ms);
56 sysfs_time_stats_attribute(btree_split, sec, us);
57 sysfs_time_stats_attribute(btree_sort,  ms,  us);
58 sysfs_time_stats_attribute(btree_read,  ms,  us);
59
60 read_attribute(btree_nodes);
61 read_attribute(btree_used_percent);
62 read_attribute(average_key_size);
63 read_attribute(dirty_data);
64 read_attribute(bset_tree_stats);
65
66 read_attribute(state);
67 read_attribute(cache_read_races);
68 read_attribute(reclaim);
69 read_attribute(flush_write);
70 read_attribute(retry_flush_write);
71 read_attribute(writeback_keys_done);
72 read_attribute(writeback_keys_failed);
73 read_attribute(io_errors);
74 read_attribute(congested);
75 rw_attribute(congested_read_threshold_us);
76 rw_attribute(congested_write_threshold_us);
77
78 rw_attribute(sequential_cutoff);
79 rw_attribute(data_csum);
80 rw_attribute(cache_mode);
81 rw_attribute(writeback_metadata);
82 rw_attribute(writeback_running);
83 rw_attribute(writeback_percent);
84 rw_attribute(writeback_delay);
85 rw_attribute(writeback_rate);
86
87 rw_attribute(writeback_rate_update_seconds);
88 rw_attribute(writeback_rate_i_term_inverse);
89 rw_attribute(writeback_rate_p_term_inverse);
90 rw_attribute(writeback_rate_minimum);
91 read_attribute(writeback_rate_debug);
92
93 read_attribute(stripe_size);
94 read_attribute(partial_stripes_expensive);
95
96 rw_attribute(synchronous);
97 rw_attribute(journal_delay_ms);
98 rw_attribute(discard);
99 rw_attribute(running);
100 rw_attribute(label);
101 rw_attribute(readahead);
102 rw_attribute(errors);
103 rw_attribute(io_error_limit);
104 rw_attribute(io_error_halflife);
105 rw_attribute(verify);
106 rw_attribute(bypass_torture_test);
107 rw_attribute(key_merging_disabled);
108 rw_attribute(gc_always_rewrite);
109 rw_attribute(expensive_debug_checks);
110 rw_attribute(cache_replacement_policy);
111 rw_attribute(btree_shrinker_disabled);
112 rw_attribute(copy_gc_enabled);
113 rw_attribute(size);
114
115 SHOW(__bch_cached_dev)
116 {
117         struct cached_dev *dc = container_of(kobj, struct cached_dev,
118                                              disk.kobj);
119         const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
120
121 #define var(stat)               (dc->stat)
122
123         if (attr == &sysfs_cache_mode)
124                 return bch_snprint_string_list(buf, PAGE_SIZE,
125                                                bch_cache_modes + 1,
126                                                BDEV_CACHE_MODE(&dc->sb));
127
128         sysfs_printf(data_csum,         "%i", dc->disk.data_csum);
129         var_printf(verify,              "%i");
130         var_printf(bypass_torture_test, "%i");
131         var_printf(writeback_metadata,  "%i");
132         var_printf(writeback_running,   "%i");
133         var_print(writeback_delay);
134         var_print(writeback_percent);
135         sysfs_hprint(writeback_rate,    dc->writeback_rate.rate << 9);
136
137         var_print(writeback_rate_update_seconds);
138         var_print(writeback_rate_i_term_inverse);
139         var_print(writeback_rate_p_term_inverse);
140         var_print(writeback_rate_minimum);
141
142         if (attr == &sysfs_writeback_rate_debug) {
143                 char rate[20];
144                 char dirty[20];
145                 char target[20];
146                 char proportional[20];
147                 char integral[20];
148                 char change[20];
149                 s64 next_io;
150
151                 bch_hprint(rate,        dc->writeback_rate.rate << 9);
152                 bch_hprint(dirty,       bcache_dev_sectors_dirty(&dc->disk) << 9);
153                 bch_hprint(target,      dc->writeback_rate_target << 9);
154                 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
155                 bch_hprint(integral,    dc->writeback_rate_integral_scaled << 9);
156                 bch_hprint(change,      dc->writeback_rate_change << 9);
157
158                 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
159                                     NSEC_PER_MSEC);
160
161                 return sprintf(buf,
162                                "rate:\t\t%s/sec\n"
163                                "dirty:\t\t%s\n"
164                                "target:\t\t%s\n"
165                                "proportional:\t%s\n"
166                                "integral:\t%s\n"
167                                "change:\t\t%s/sec\n"
168                                "next io:\t%llims\n",
169                                rate, dirty, target, proportional,
170                                integral, change, next_io);
171         }
172
173         sysfs_hprint(dirty_data,
174                      bcache_dev_sectors_dirty(&dc->disk) << 9);
175
176         sysfs_hprint(stripe_size,       dc->disk.stripe_size << 9);
177         var_printf(partial_stripes_expensive,   "%u");
178
179         var_hprint(sequential_cutoff);
180         var_hprint(readahead);
181
182         sysfs_print(running,            atomic_read(&dc->running));
183         sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
184
185         if (attr == &sysfs_label) {
186                 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
187                 buf[SB_LABEL_SIZE + 1] = '\0';
188                 strcat(buf, "\n");
189                 return strlen(buf);
190         }
191
192 #undef var
193         return 0;
194 }
195 SHOW_LOCKED(bch_cached_dev)
196
197 STORE(__cached_dev)
198 {
199         struct cached_dev *dc = container_of(kobj, struct cached_dev,
200                                              disk.kobj);
201         ssize_t v;
202         struct cache_set *c;
203         struct kobj_uevent_env *env;
204
205 #define d_strtoul(var)          sysfs_strtoul(var, dc->var)
206 #define d_strtoul_nonzero(var)  sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
207 #define d_strtoi_h(var)         sysfs_hatoi(var, dc->var)
208
209         sysfs_strtoul(data_csum,        dc->disk.data_csum);
210         d_strtoul(verify);
211         d_strtoul(bypass_torture_test);
212         d_strtoul(writeback_metadata);
213         d_strtoul(writeback_running);
214         d_strtoul(writeback_delay);
215
216         sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
217
218         sysfs_strtoul_clamp(writeback_rate,
219                             dc->writeback_rate.rate, 1, INT_MAX);
220
221         sysfs_strtoul_clamp(writeback_rate_update_seconds,
222                             dc->writeback_rate_update_seconds,
223                             1, WRITEBACK_RATE_UPDATE_SECS_MAX);
224         d_strtoul(writeback_rate_i_term_inverse);
225         d_strtoul_nonzero(writeback_rate_p_term_inverse);
226
227         d_strtoi_h(sequential_cutoff);
228         d_strtoi_h(readahead);
229
230         if (attr == &sysfs_clear_stats)
231                 bch_cache_accounting_clear(&dc->accounting);
232
233         if (attr == &sysfs_running &&
234             strtoul_or_return(buf))
235                 bch_cached_dev_run(dc);
236
237         if (attr == &sysfs_cache_mode) {
238                 v = bch_read_string_list(buf, bch_cache_modes + 1);
239
240                 if (v < 0)
241                         return v;
242
243                 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
244                         SET_BDEV_CACHE_MODE(&dc->sb, v);
245                         bch_write_bdev_super(dc, NULL);
246                 }
247         }
248
249         if (attr == &sysfs_label) {
250                 if (size > SB_LABEL_SIZE)
251                         return -EINVAL;
252                 memcpy(dc->sb.label, buf, size);
253                 if (size < SB_LABEL_SIZE)
254                         dc->sb.label[size] = '\0';
255                 if (size && dc->sb.label[size - 1] == '\n')
256                         dc->sb.label[size - 1] = '\0';
257                 bch_write_bdev_super(dc, NULL);
258                 if (dc->disk.c) {
259                         memcpy(dc->disk.c->uuids[dc->disk.id].label,
260                                buf, SB_LABEL_SIZE);
261                         bch_uuid_write(dc->disk.c);
262                 }
263                 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
264                 if (!env)
265                         return -ENOMEM;
266                 add_uevent_var(env, "DRIVER=bcache");
267                 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
268                 add_uevent_var(env, "CACHED_LABEL=%s", buf);
269                 kobject_uevent_env(
270                         &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
271                 kfree(env);
272         }
273
274         if (attr == &sysfs_attach) {
275                 uint8_t         set_uuid[16];
276
277                 if (bch_parse_uuid(buf, set_uuid) < 16)
278                         return -EINVAL;
279
280                 v = -ENOENT;
281                 list_for_each_entry(c, &bch_cache_sets, list) {
282                         v = bch_cached_dev_attach(dc, c, set_uuid);
283                         if (!v)
284                                 return size;
285                 }
286
287                 pr_err("Can't attach %s: cache set not found", buf);
288                 return v;
289         }
290
291         if (attr == &sysfs_detach && dc->disk.c)
292                 bch_cached_dev_detach(dc);
293
294         if (attr == &sysfs_stop)
295                 bcache_device_stop(&dc->disk);
296
297         return size;
298 }
299
300 STORE(bch_cached_dev)
301 {
302         struct cached_dev *dc = container_of(kobj, struct cached_dev,
303                                              disk.kobj);
304
305         mutex_lock(&bch_register_lock);
306         size = __cached_dev_store(kobj, attr, buf, size);
307
308         if (attr == &sysfs_writeback_running)
309                 bch_writeback_queue(dc);
310
311         if (attr == &sysfs_writeback_percent)
312                 schedule_delayed_work(&dc->writeback_rate_update,
313                                       dc->writeback_rate_update_seconds * HZ);
314
315         mutex_unlock(&bch_register_lock);
316         return size;
317 }
318
319 static struct attribute *bch_cached_dev_files[] = {
320         &sysfs_attach,
321         &sysfs_detach,
322         &sysfs_stop,
323 #if 0
324         &sysfs_data_csum,
325 #endif
326         &sysfs_cache_mode,
327         &sysfs_writeback_metadata,
328         &sysfs_writeback_running,
329         &sysfs_writeback_delay,
330         &sysfs_writeback_percent,
331         &sysfs_writeback_rate,
332         &sysfs_writeback_rate_update_seconds,
333         &sysfs_writeback_rate_i_term_inverse,
334         &sysfs_writeback_rate_p_term_inverse,
335         &sysfs_writeback_rate_debug,
336         &sysfs_dirty_data,
337         &sysfs_stripe_size,
338         &sysfs_partial_stripes_expensive,
339         &sysfs_sequential_cutoff,
340         &sysfs_clear_stats,
341         &sysfs_running,
342         &sysfs_state,
343         &sysfs_label,
344         &sysfs_readahead,
345 #ifdef CONFIG_BCACHE_DEBUG
346         &sysfs_verify,
347         &sysfs_bypass_torture_test,
348 #endif
349         NULL
350 };
351 KTYPE(bch_cached_dev);
352
353 SHOW(bch_flash_dev)
354 {
355         struct bcache_device *d = container_of(kobj, struct bcache_device,
356                                                kobj);
357         struct uuid_entry *u = &d->c->uuids[d->id];
358
359         sysfs_printf(data_csum, "%i", d->data_csum);
360         sysfs_hprint(size,      u->sectors << 9);
361
362         if (attr == &sysfs_label) {
363                 memcpy(buf, u->label, SB_LABEL_SIZE);
364                 buf[SB_LABEL_SIZE + 1] = '\0';
365                 strcat(buf, "\n");
366                 return strlen(buf);
367         }
368
369         return 0;
370 }
371
372 STORE(__bch_flash_dev)
373 {
374         struct bcache_device *d = container_of(kobj, struct bcache_device,
375                                                kobj);
376         struct uuid_entry *u = &d->c->uuids[d->id];
377
378         sysfs_strtoul(data_csum,        d->data_csum);
379
380         if (attr == &sysfs_size) {
381                 uint64_t v;
382                 strtoi_h_or_return(buf, v);
383
384                 u->sectors = v >> 9;
385                 bch_uuid_write(d->c);
386                 set_capacity(d->disk, u->sectors);
387         }
388
389         if (attr == &sysfs_label) {
390                 memcpy(u->label, buf, SB_LABEL_SIZE);
391                 bch_uuid_write(d->c);
392         }
393
394         if (attr == &sysfs_unregister) {
395                 set_bit(BCACHE_DEV_DETACHING, &d->flags);
396                 bcache_device_stop(d);
397         }
398
399         return size;
400 }
401 STORE_LOCKED(bch_flash_dev)
402
403 static struct attribute *bch_flash_dev_files[] = {
404         &sysfs_unregister,
405 #if 0
406         &sysfs_data_csum,
407 #endif
408         &sysfs_label,
409         &sysfs_size,
410         NULL
411 };
412 KTYPE(bch_flash_dev);
413
414 struct bset_stats_op {
415         struct btree_op op;
416         size_t nodes;
417         struct bset_stats stats;
418 };
419
420 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
421 {
422         struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
423
424         op->nodes++;
425         bch_btree_keys_stats(&b->keys, &op->stats);
426
427         return MAP_CONTINUE;
428 }
429
430 static int bch_bset_print_stats(struct cache_set *c, char *buf)
431 {
432         struct bset_stats_op op;
433         int ret;
434
435         memset(&op, 0, sizeof(op));
436         bch_btree_op_init(&op.op, -1);
437
438         ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
439         if (ret < 0)
440                 return ret;
441
442         return snprintf(buf, PAGE_SIZE,
443                         "btree nodes:           %zu\n"
444                         "written sets:          %zu\n"
445                         "unwritten sets:                %zu\n"
446                         "written key bytes:     %zu\n"
447                         "unwritten key bytes:   %zu\n"
448                         "floats:                        %zu\n"
449                         "failed:                        %zu\n",
450                         op.nodes,
451                         op.stats.sets_written, op.stats.sets_unwritten,
452                         op.stats.bytes_written, op.stats.bytes_unwritten,
453                         op.stats.floats, op.stats.failed);
454 }
455
456 static unsigned bch_root_usage(struct cache_set *c)
457 {
458         unsigned bytes = 0;
459         struct bkey *k;
460         struct btree *b;
461         struct btree_iter iter;
462
463         goto lock_root;
464
465         do {
466                 rw_unlock(false, b);
467 lock_root:
468                 b = c->root;
469                 rw_lock(false, b, b->level);
470         } while (b != c->root);
471
472         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
473                 bytes += bkey_bytes(k);
474
475         rw_unlock(false, b);
476
477         return (bytes * 100) / btree_bytes(c);
478 }
479
480 static size_t bch_cache_size(struct cache_set *c)
481 {
482         size_t ret = 0;
483         struct btree *b;
484
485         mutex_lock(&c->bucket_lock);
486         list_for_each_entry(b, &c->btree_cache, list)
487                 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
488
489         mutex_unlock(&c->bucket_lock);
490         return ret;
491 }
492
493 static unsigned bch_cache_max_chain(struct cache_set *c)
494 {
495         unsigned ret = 0;
496         struct hlist_head *h;
497
498         mutex_lock(&c->bucket_lock);
499
500         for (h = c->bucket_hash;
501              h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
502              h++) {
503                 unsigned i = 0;
504                 struct hlist_node *p;
505
506                 hlist_for_each(p, h)
507                         i++;
508
509                 ret = max(ret, i);
510         }
511
512         mutex_unlock(&c->bucket_lock);
513         return ret;
514 }
515
516 static unsigned bch_btree_used(struct cache_set *c)
517 {
518         return div64_u64(c->gc_stats.key_bytes * 100,
519                          (c->gc_stats.nodes ?: 1) * btree_bytes(c));
520 }
521
522 static unsigned bch_average_key_size(struct cache_set *c)
523 {
524         return c->gc_stats.nkeys
525                 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
526                 : 0;
527 }
528
529 SHOW(__bch_cache_set)
530 {
531         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
532
533         sysfs_print(synchronous,                CACHE_SYNC(&c->sb));
534         sysfs_print(journal_delay_ms,           c->journal_delay_ms);
535         sysfs_hprint(bucket_size,               bucket_bytes(c));
536         sysfs_hprint(block_size,                block_bytes(c));
537         sysfs_print(tree_depth,                 c->root->level);
538         sysfs_print(root_usage_percent,         bch_root_usage(c));
539
540         sysfs_hprint(btree_cache_size,          bch_cache_size(c));
541         sysfs_print(btree_cache_max_chain,      bch_cache_max_chain(c));
542         sysfs_print(cache_available_percent,    100 - c->gc_stats.in_use);
543
544         sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
545         sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
546         sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
547         sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
548
549         sysfs_print(btree_used_percent, bch_btree_used(c));
550         sysfs_print(btree_nodes,        c->gc_stats.nodes);
551         sysfs_hprint(average_key_size,  bch_average_key_size(c));
552
553         sysfs_print(cache_read_races,
554                     atomic_long_read(&c->cache_read_races));
555
556         sysfs_print(reclaim,
557                     atomic_long_read(&c->reclaim));
558
559         sysfs_print(flush_write,
560                     atomic_long_read(&c->flush_write));
561
562         sysfs_print(retry_flush_write,
563                     atomic_long_read(&c->retry_flush_write));
564
565         sysfs_print(writeback_keys_done,
566                     atomic_long_read(&c->writeback_keys_done));
567         sysfs_print(writeback_keys_failed,
568                     atomic_long_read(&c->writeback_keys_failed));
569
570         if (attr == &sysfs_errors)
571                 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
572                                                c->on_error);
573
574         /* See count_io_errors for why 88 */
575         sysfs_print(io_error_halflife,  c->error_decay * 88);
576         sysfs_print(io_error_limit,     c->error_limit);
577
578         sysfs_hprint(congested,
579                      ((uint64_t) bch_get_congested(c)) << 9);
580         sysfs_print(congested_read_threshold_us,
581                     c->congested_read_threshold_us);
582         sysfs_print(congested_write_threshold_us,
583                     c->congested_write_threshold_us);
584
585         sysfs_print(active_journal_entries,     fifo_used(&c->journal.pin));
586         sysfs_printf(verify,                    "%i", c->verify);
587         sysfs_printf(key_merging_disabled,      "%i", c->key_merging_disabled);
588         sysfs_printf(expensive_debug_checks,
589                      "%i", c->expensive_debug_checks);
590         sysfs_printf(gc_always_rewrite,         "%i", c->gc_always_rewrite);
591         sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
592         sysfs_printf(copy_gc_enabled,           "%i", c->copy_gc_enabled);
593
594         if (attr == &sysfs_bset_tree_stats)
595                 return bch_bset_print_stats(c, buf);
596
597         return 0;
598 }
599 SHOW_LOCKED(bch_cache_set)
600
601 STORE(__bch_cache_set)
602 {
603         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
604
605         if (attr == &sysfs_unregister)
606                 bch_cache_set_unregister(c);
607
608         if (attr == &sysfs_stop)
609                 bch_cache_set_stop(c);
610
611         if (attr == &sysfs_synchronous) {
612                 bool sync = strtoul_or_return(buf);
613
614                 if (sync != CACHE_SYNC(&c->sb)) {
615                         SET_CACHE_SYNC(&c->sb, sync);
616                         bcache_write_super(c);
617                 }
618         }
619
620         if (attr == &sysfs_flash_vol_create) {
621                 int r;
622                 uint64_t v;
623                 strtoi_h_or_return(buf, v);
624
625                 r = bch_flash_dev_create(c, v);
626                 if (r)
627                         return r;
628         }
629
630         if (attr == &sysfs_clear_stats) {
631                 atomic_long_set(&c->writeback_keys_done,        0);
632                 atomic_long_set(&c->writeback_keys_failed,      0);
633
634                 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
635                 bch_cache_accounting_clear(&c->accounting);
636         }
637
638         if (attr == &sysfs_trigger_gc) {
639                 /*
640                  * Garbage collection thread only works when sectors_to_gc < 0,
641                  * when users write to sysfs entry trigger_gc, most of time
642                  * they want to forcibly triger gargage collection. Here -1 is
643                  * set to c->sectors_to_gc, to make gc_should_run() give a
644                  * chance to permit gc thread to run. "give a chance" means
645                  * before going into gc_should_run(), there is still chance
646                  * that c->sectors_to_gc being set to other positive value. So
647                  * writing sysfs entry trigger_gc won't always make sure gc
648                  * thread takes effect.
649                  */
650                 atomic_set(&c->sectors_to_gc, -1);
651                 wake_up_gc(c);
652         }
653
654         if (attr == &sysfs_prune_cache) {
655                 struct shrink_control sc;
656                 sc.gfp_mask = GFP_KERNEL;
657                 sc.nr_to_scan = strtoul_or_return(buf);
658                 c->shrink.scan_objects(&c->shrink, &sc);
659         }
660
661         sysfs_strtoul(congested_read_threshold_us,
662                       c->congested_read_threshold_us);
663         sysfs_strtoul(congested_write_threshold_us,
664                       c->congested_write_threshold_us);
665
666         if (attr == &sysfs_errors) {
667                 ssize_t v = bch_read_string_list(buf, error_actions);
668
669                 if (v < 0)
670                         return v;
671
672                 c->on_error = v;
673         }
674
675         if (attr == &sysfs_io_error_limit)
676                 c->error_limit = strtoul_or_return(buf);
677
678         /* See count_io_errors() for why 88 */
679         if (attr == &sysfs_io_error_halflife)
680                 c->error_decay = strtoul_or_return(buf) / 88;
681
682         sysfs_strtoul(journal_delay_ms,         c->journal_delay_ms);
683         sysfs_strtoul(verify,                   c->verify);
684         sysfs_strtoul(key_merging_disabled,     c->key_merging_disabled);
685         sysfs_strtoul(expensive_debug_checks,   c->expensive_debug_checks);
686         sysfs_strtoul(gc_always_rewrite,        c->gc_always_rewrite);
687         sysfs_strtoul(btree_shrinker_disabled,  c->shrinker_disabled);
688         sysfs_strtoul(copy_gc_enabled,          c->copy_gc_enabled);
689
690         return size;
691 }
692 STORE_LOCKED(bch_cache_set)
693
694 SHOW(bch_cache_set_internal)
695 {
696         struct cache_set *c = container_of(kobj, struct cache_set, internal);
697         return bch_cache_set_show(&c->kobj, attr, buf);
698 }
699
700 STORE(bch_cache_set_internal)
701 {
702         struct cache_set *c = container_of(kobj, struct cache_set, internal);
703         return bch_cache_set_store(&c->kobj, attr, buf, size);
704 }
705
706 static void bch_cache_set_internal_release(struct kobject *k)
707 {
708 }
709
710 static struct attribute *bch_cache_set_files[] = {
711         &sysfs_unregister,
712         &sysfs_stop,
713         &sysfs_synchronous,
714         &sysfs_journal_delay_ms,
715         &sysfs_flash_vol_create,
716
717         &sysfs_bucket_size,
718         &sysfs_block_size,
719         &sysfs_tree_depth,
720         &sysfs_root_usage_percent,
721         &sysfs_btree_cache_size,
722         &sysfs_cache_available_percent,
723
724         &sysfs_average_key_size,
725
726         &sysfs_errors,
727         &sysfs_io_error_limit,
728         &sysfs_io_error_halflife,
729         &sysfs_congested,
730         &sysfs_congested_read_threshold_us,
731         &sysfs_congested_write_threshold_us,
732         &sysfs_clear_stats,
733         NULL
734 };
735 KTYPE(bch_cache_set);
736
737 static struct attribute *bch_cache_set_internal_files[] = {
738         &sysfs_active_journal_entries,
739
740         sysfs_time_stats_attribute_list(btree_gc, sec, ms)
741         sysfs_time_stats_attribute_list(btree_split, sec, us)
742         sysfs_time_stats_attribute_list(btree_sort, ms, us)
743         sysfs_time_stats_attribute_list(btree_read, ms, us)
744
745         &sysfs_btree_nodes,
746         &sysfs_btree_used_percent,
747         &sysfs_btree_cache_max_chain,
748
749         &sysfs_bset_tree_stats,
750         &sysfs_cache_read_races,
751         &sysfs_reclaim,
752         &sysfs_flush_write,
753         &sysfs_retry_flush_write,
754         &sysfs_writeback_keys_done,
755         &sysfs_writeback_keys_failed,
756
757         &sysfs_trigger_gc,
758         &sysfs_prune_cache,
759 #ifdef CONFIG_BCACHE_DEBUG
760         &sysfs_verify,
761         &sysfs_key_merging_disabled,
762         &sysfs_expensive_debug_checks,
763 #endif
764         &sysfs_gc_always_rewrite,
765         &sysfs_btree_shrinker_disabled,
766         &sysfs_copy_gc_enabled,
767         NULL
768 };
769 KTYPE(bch_cache_set_internal);
770
771 static int __bch_cache_cmp(const void *l, const void *r)
772 {
773         return *((uint16_t *)r) - *((uint16_t *)l);
774 }
775
776 SHOW(__bch_cache)
777 {
778         struct cache *ca = container_of(kobj, struct cache, kobj);
779
780         sysfs_hprint(bucket_size,       bucket_bytes(ca));
781         sysfs_hprint(block_size,        block_bytes(ca));
782         sysfs_print(nbuckets,           ca->sb.nbuckets);
783         sysfs_print(discard,            ca->discard);
784         sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
785         sysfs_hprint(btree_written,
786                      atomic_long_read(&ca->btree_sectors_written) << 9);
787         sysfs_hprint(metadata_written,
788                      (atomic_long_read(&ca->meta_sectors_written) +
789                       atomic_long_read(&ca->btree_sectors_written)) << 9);
790
791         sysfs_print(io_errors,
792                     atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
793
794         if (attr == &sysfs_cache_replacement_policy)
795                 return bch_snprint_string_list(buf, PAGE_SIZE,
796                                                cache_replacement_policies,
797                                                CACHE_REPLACEMENT(&ca->sb));
798
799         if (attr == &sysfs_priority_stats) {
800                 struct bucket *b;
801                 size_t n = ca->sb.nbuckets, i;
802                 size_t unused = 0, available = 0, dirty = 0, meta = 0;
803                 uint64_t sum = 0;
804                 /* Compute 31 quantiles */
805                 uint16_t q[31], *p, *cached;
806                 ssize_t ret;
807
808                 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
809                 if (!p)
810                         return -ENOMEM;
811
812                 mutex_lock(&ca->set->bucket_lock);
813                 for_each_bucket(b, ca) {
814                         if (!GC_SECTORS_USED(b))
815                                 unused++;
816                         if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
817                                 available++;
818                         if (GC_MARK(b) == GC_MARK_DIRTY)
819                                 dirty++;
820                         if (GC_MARK(b) == GC_MARK_METADATA)
821                                 meta++;
822                 }
823
824                 for (i = ca->sb.first_bucket; i < n; i++)
825                         p[i] = ca->buckets[i].prio;
826                 mutex_unlock(&ca->set->bucket_lock);
827
828                 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
829
830                 while (n &&
831                        !cached[n - 1])
832                         --n;
833
834                 unused = ca->sb.nbuckets - n;
835
836                 while (cached < p + n &&
837                        *cached == BTREE_PRIO)
838                         cached++, n--;
839
840                 for (i = 0; i < n; i++)
841                         sum += INITIAL_PRIO - cached[i];
842
843                 if (n)
844                         do_div(sum, n);
845
846                 for (i = 0; i < ARRAY_SIZE(q); i++)
847                         q[i] = INITIAL_PRIO - cached[n * (i + 1) /
848                                 (ARRAY_SIZE(q) + 1)];
849
850                 vfree(p);
851
852                 ret = scnprintf(buf, PAGE_SIZE,
853                                 "Unused:                %zu%%\n"
854                                 "Clean:         %zu%%\n"
855                                 "Dirty:         %zu%%\n"
856                                 "Metadata:      %zu%%\n"
857                                 "Average:       %llu\n"
858                                 "Sectors per Q: %zu\n"
859                                 "Quantiles:     [",
860                                 unused * 100 / (size_t) ca->sb.nbuckets,
861                                 available * 100 / (size_t) ca->sb.nbuckets,
862                                 dirty * 100 / (size_t) ca->sb.nbuckets,
863                                 meta * 100 / (size_t) ca->sb.nbuckets, sum,
864                                 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
865
866                 for (i = 0; i < ARRAY_SIZE(q); i++)
867                         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
868                                          "%u ", q[i]);
869                 ret--;
870
871                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
872
873                 return ret;
874         }
875
876         return 0;
877 }
878 SHOW_LOCKED(bch_cache)
879
880 STORE(__bch_cache)
881 {
882         struct cache *ca = container_of(kobj, struct cache, kobj);
883
884         if (attr == &sysfs_discard) {
885                 bool v = strtoul_or_return(buf);
886
887                 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
888                         ca->discard = v;
889
890                 if (v != CACHE_DISCARD(&ca->sb)) {
891                         SET_CACHE_DISCARD(&ca->sb, v);
892                         bcache_write_super(ca->set);
893                 }
894         }
895
896         if (attr == &sysfs_cache_replacement_policy) {
897                 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
898
899                 if (v < 0)
900                         return v;
901
902                 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
903                         mutex_lock(&ca->set->bucket_lock);
904                         SET_CACHE_REPLACEMENT(&ca->sb, v);
905                         mutex_unlock(&ca->set->bucket_lock);
906
907                         bcache_write_super(ca->set);
908                 }
909         }
910
911         if (attr == &sysfs_clear_stats) {
912                 atomic_long_set(&ca->sectors_written, 0);
913                 atomic_long_set(&ca->btree_sectors_written, 0);
914                 atomic_long_set(&ca->meta_sectors_written, 0);
915                 atomic_set(&ca->io_count, 0);
916                 atomic_set(&ca->io_errors, 0);
917         }
918
919         return size;
920 }
921 STORE_LOCKED(bch_cache)
922
923 static struct attribute *bch_cache_files[] = {
924         &sysfs_bucket_size,
925         &sysfs_block_size,
926         &sysfs_nbuckets,
927         &sysfs_priority_stats,
928         &sysfs_discard,
929         &sysfs_written,
930         &sysfs_btree_written,
931         &sysfs_metadata_written,
932         &sysfs_io_errors,
933         &sysfs_clear_stats,
934         &sysfs_cache_replacement_policy,
935         NULL
936 };
937 KTYPE(bch_cache);