1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
17 struct xfs_sysfs_attr {
18 struct attribute attr;
19 ssize_t (*show)(struct kobject *kobject, char *buf);
20 ssize_t (*store)(struct kobject *kobject, const char *buf,
24 static inline struct xfs_sysfs_attr *
25 to_attr(struct attribute *attr)
27 return container_of(attr, struct xfs_sysfs_attr, attr);
30 #define XFS_SYSFS_ATTR_RW(name) \
31 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
40 xfs_sysfs_object_show(
41 struct kobject *kobject,
42 struct attribute *attr,
45 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
47 return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
51 xfs_sysfs_object_store(
52 struct kobject *kobject,
53 struct attribute *attr,
57 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
59 return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
62 static const struct sysfs_ops xfs_sysfs_ops = {
63 .show = xfs_sysfs_object_show,
64 .store = xfs_sysfs_object_store,
67 static struct attribute *xfs_mp_attrs[] = {
71 struct kobj_type xfs_mp_ktype = {
72 .release = xfs_sysfs_release,
73 .sysfs_ops = &xfs_sysfs_ops,
74 .default_attrs = xfs_mp_attrs,
82 struct kobject *kobject,
89 ret = kstrtoint(buf, 0, &val);
94 xfs_globals.bug_on_assert = true;
96 xfs_globals.bug_on_assert = false;
105 struct kobject *kobject,
108 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bug_on_assert ? 1 : 0);
110 XFS_SYSFS_ATTR_RW(bug_on_assert);
113 log_recovery_delay_store(
114 struct kobject *kobject,
121 ret = kstrtoint(buf, 0, &val);
125 if (val < 0 || val > 60)
128 xfs_globals.log_recovery_delay = val;
134 log_recovery_delay_show(
135 struct kobject *kobject,
138 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.log_recovery_delay);
140 XFS_SYSFS_ATTR_RW(log_recovery_delay);
144 struct kobject *kobject,
151 ret = kstrtoint(buf, 0, &val);
155 if (val < 0 || val > 60)
158 xfs_globals.mount_delay = val;
165 struct kobject *kobject,
168 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.mount_delay);
170 XFS_SYSFS_ATTR_RW(mount_delay);
174 struct kobject *kobject,
180 ret = kstrtobool(buf, &xfs_globals.always_cow);
188 struct kobject *kobject,
191 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
193 XFS_SYSFS_ATTR_RW(always_cow);
197 * Override how many threads the parallel work queue is allowed to create.
198 * This has to be a debug-only global (instead of an errortag) because one of
199 * the main users of parallel workqueues is mount time quotacheck.
203 struct kobject *kobject,
210 ret = kstrtoint(buf, 0, &val);
214 if (val < -1 || val > num_possible_cpus())
217 xfs_globals.pwork_threads = val;
224 struct kobject *kobject,
227 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.pwork_threads);
229 XFS_SYSFS_ATTR_RW(pwork_threads);
232 static struct attribute *xfs_dbg_attrs[] = {
233 ATTR_LIST(bug_on_assert),
234 ATTR_LIST(log_recovery_delay),
235 ATTR_LIST(mount_delay),
236 ATTR_LIST(always_cow),
238 ATTR_LIST(pwork_threads),
243 struct kobj_type xfs_dbg_ktype = {
244 .release = xfs_sysfs_release,
245 .sysfs_ops = &xfs_sysfs_ops,
246 .default_attrs = xfs_dbg_attrs,
253 static inline struct xstats *
254 to_xstats(struct kobject *kobject)
256 struct xfs_kobj *kobj = to_kobj(kobject);
258 return container_of(kobj, struct xstats, xs_kobj);
263 struct kobject *kobject,
266 struct xstats *stats = to_xstats(kobject);
268 return xfs_stats_format(stats->xs_stats, buf);
270 XFS_SYSFS_ATTR_RO(stats);
274 struct kobject *kobject,
280 struct xstats *stats = to_xstats(kobject);
282 ret = kstrtoint(buf, 0, &val);
289 xfs_stats_clearall(stats->xs_stats);
292 XFS_SYSFS_ATTR_WO(stats_clear);
294 static struct attribute *xfs_stats_attrs[] = {
296 ATTR_LIST(stats_clear),
300 struct kobj_type xfs_stats_ktype = {
301 .release = xfs_sysfs_release,
302 .sysfs_ops = &xfs_sysfs_ops,
303 .default_attrs = xfs_stats_attrs,
308 static inline struct xlog *
309 to_xlog(struct kobject *kobject)
311 struct xfs_kobj *kobj = to_kobj(kobject);
313 return container_of(kobj, struct xlog, l_kobj);
318 struct kobject *kobject,
323 struct xlog *log = to_xlog(kobject);
325 spin_lock(&log->l_icloglock);
326 cycle = log->l_curr_cycle;
327 block = log->l_curr_block;
328 spin_unlock(&log->l_icloglock);
330 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
332 XFS_SYSFS_ATTR_RO(log_head_lsn);
336 struct kobject *kobject,
341 struct xlog *log = to_xlog(kobject);
343 xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
344 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
346 XFS_SYSFS_ATTR_RO(log_tail_lsn);
349 reserve_grant_head_show(
350 struct kobject *kobject,
356 struct xlog *log = to_xlog(kobject);
358 xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
359 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
361 XFS_SYSFS_ATTR_RO(reserve_grant_head);
364 write_grant_head_show(
365 struct kobject *kobject,
370 struct xlog *log = to_xlog(kobject);
372 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
373 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
375 XFS_SYSFS_ATTR_RO(write_grant_head);
377 static struct attribute *xfs_log_attrs[] = {
378 ATTR_LIST(log_head_lsn),
379 ATTR_LIST(log_tail_lsn),
380 ATTR_LIST(reserve_grant_head),
381 ATTR_LIST(write_grant_head),
385 struct kobj_type xfs_log_ktype = {
386 .release = xfs_sysfs_release,
387 .sysfs_ops = &xfs_sysfs_ops,
388 .default_attrs = xfs_log_attrs,
392 * Metadata IO error configuration
394 * The sysfs structure here is:
395 * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
397 * where <class> allows us to discriminate between data IO and metadata IO,
398 * and any other future type of IO (e.g. special inode or directory error
399 * handling) we care to support.
401 static inline struct xfs_error_cfg *
402 to_error_cfg(struct kobject *kobject)
404 struct xfs_kobj *kobj = to_kobj(kobject);
405 return container_of(kobj, struct xfs_error_cfg, kobj);
408 static inline struct xfs_mount *
409 err_to_mp(struct kobject *kobject)
411 struct xfs_kobj *kobj = to_kobj(kobject);
412 return container_of(kobj, struct xfs_mount, m_error_kobj);
417 struct kobject *kobject,
421 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
423 if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
426 retries = cfg->max_retries;
428 return snprintf(buf, PAGE_SIZE, "%d\n", retries);
433 struct kobject *kobject,
437 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
441 ret = kstrtoint(buf, 0, &val);
449 cfg->max_retries = XFS_ERR_RETRY_FOREVER;
451 cfg->max_retries = val;
454 XFS_SYSFS_ATTR_RW(max_retries);
457 retry_timeout_seconds_show(
458 struct kobject *kobject,
462 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
464 if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
467 timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
469 return snprintf(buf, PAGE_SIZE, "%d\n", timeout);
473 retry_timeout_seconds_store(
474 struct kobject *kobject,
478 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
482 ret = kstrtoint(buf, 0, &val);
486 /* 1 day timeout maximum, -1 means infinite */
487 if (val < -1 || val > 86400)
491 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
493 cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
494 ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
498 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
501 fail_at_unmount_show(
502 struct kobject *kobject,
505 struct xfs_mount *mp = err_to_mp(kobject);
507 return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
511 fail_at_unmount_store(
512 struct kobject *kobject,
516 struct xfs_mount *mp = err_to_mp(kobject);
520 ret = kstrtoint(buf, 0, &val);
524 if (val < 0 || val > 1)
527 mp->m_fail_unmount = val;
530 XFS_SYSFS_ATTR_RW(fail_at_unmount);
532 static struct attribute *xfs_error_attrs[] = {
533 ATTR_LIST(max_retries),
534 ATTR_LIST(retry_timeout_seconds),
539 static struct kobj_type xfs_error_cfg_ktype = {
540 .release = xfs_sysfs_release,
541 .sysfs_ops = &xfs_sysfs_ops,
542 .default_attrs = xfs_error_attrs,
545 static struct kobj_type xfs_error_ktype = {
546 .release = xfs_sysfs_release,
547 .sysfs_ops = &xfs_sysfs_ops,
551 * Error initialization tables. These need to be ordered in the same
552 * order as the enums used to index the array. All class init tables need to
553 * define a "default" behaviour as the first entry, all other entries can be
556 struct xfs_error_init {
559 int retry_timeout; /* in seconds */
562 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
564 .max_retries = XFS_ERR_RETRY_FOREVER,
565 .retry_timeout = XFS_ERR_RETRY_FOREVER,
568 .max_retries = XFS_ERR_RETRY_FOREVER,
569 .retry_timeout = XFS_ERR_RETRY_FOREVER,
572 .max_retries = XFS_ERR_RETRY_FOREVER,
573 .retry_timeout = XFS_ERR_RETRY_FOREVER,
576 .max_retries = 0, /* We can't recover from devices disappearing */
582 xfs_error_sysfs_init_class(
583 struct xfs_mount *mp,
585 const char *parent_name,
586 struct xfs_kobj *parent_kobj,
587 const struct xfs_error_init init[])
589 struct xfs_error_cfg *cfg;
593 ASSERT(class < XFS_ERR_CLASS_MAX);
595 error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
596 &mp->m_error_kobj, parent_name);
600 for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
601 cfg = &mp->m_error_cfg[class][i];
602 error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
603 parent_kobj, init[i].name);
607 cfg->max_retries = init[i].max_retries;
608 if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
609 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
611 cfg->retry_timeout = msecs_to_jiffies(
612 init[i].retry_timeout * MSEC_PER_SEC);
617 /* unwind the entries that succeeded */
618 for (i--; i >= 0; i--) {
619 cfg = &mp->m_error_cfg[class][i];
620 xfs_sysfs_del(&cfg->kobj);
622 xfs_sysfs_del(parent_kobj);
627 xfs_error_sysfs_init(
628 struct xfs_mount *mp)
632 /* .../xfs/<dev>/error/ */
633 error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
634 &mp->m_kobj, "error");
638 error = sysfs_create_file(&mp->m_error_kobj.kobject,
639 ATTR_LIST(fail_at_unmount));
644 /* .../xfs/<dev>/error/metadata/ */
645 error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
646 "metadata", &mp->m_error_meta_kobj,
647 xfs_error_meta_init);
654 xfs_sysfs_del(&mp->m_error_kobj);
660 struct xfs_mount *mp)
662 struct xfs_error_cfg *cfg;
665 for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
666 for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
667 cfg = &mp->m_error_cfg[i][j];
669 xfs_sysfs_del(&cfg->kobj);
672 xfs_sysfs_del(&mp->m_error_meta_kobj);
673 xfs_sysfs_del(&mp->m_error_kobj);
676 struct xfs_error_cfg *
678 struct xfs_mount *mp,
682 struct xfs_error_cfg *cfg;
689 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
692 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
695 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
698 cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];