Merge branch 'address-masking'
[linux-2.6-block.git] / include / linux / shrinker.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
b0d40c92
DC
2#ifndef _LINUX_SHRINKER_H
3#define _LINUX_SHRINKER_H
4
b7217a0b
M
5#include <linux/atomic.h>
6#include <linux/types.h>
ca1d36b8
QZ
7#include <linux/refcount.h>
8#include <linux/completion.h>
b7217a0b 9
307becec
QZ
10#define SHRINKER_UNIT_BITS BITS_PER_LONG
11
12/*
13 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
14 * shrinkers, which have elements charged to the memcg.
15 */
16struct shrinker_info_unit {
17 atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
18 DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
19};
20
21struct shrinker_info {
22 struct rcu_head rcu;
23 int map_nr_max;
24 struct shrinker_info_unit *unit[];
25};
26
b0d40c92
DC
27/*
28 * This struct is used to pass information from page reclaim to the shrinkers.
06c88398 29 * We consolidate the values for easier extension later.
24f7c6b9
DC
30 *
31 * The 'gfpmask' refers to the allocation we are currently trying to
32 * fulfil.
b0d40c92
DC
33 */
34struct shrink_control {
35 gfp_t gfp_mask;
36
92be775a
KT
37 /* current node being shrunk (for NUMA aware shrinkers) */
38 int nid;
39
a0b02131
DC
40 /*
41 * How many objects scan_objects should scan and try to reclaim.
42 * This is reset before every call, so it is safe for callees
43 * to modify.
44 */
b0d40c92 45 unsigned long nr_to_scan;
0ce3d744 46
d460acb5
CW
47 /*
48 * How many objects did scan_objects process?
49 * This defaults to nr_to_scan before every call, but the callee
50 * should track its actual progress.
51 */
52 unsigned long nr_scanned;
53
cb731d6c
VD
54 /* current memcg being shrunk (for memcg aware shrinkers) */
55 struct mem_cgroup *memcg;
b0d40c92
DC
56};
57
24f7c6b9 58#define SHRINK_STOP (~0UL)
9b996468 59#define SHRINK_EMPTY (~0UL - 1)
b0d40c92
DC
60/*
61 * A callback you can register to apply pressure to ageable caches.
62 *
24f7c6b9 63 * @count_objects should return the number of freeable items in the cache. If
9b996468
KT
64 * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
65 * returned in cases of the number of freeable items cannot be determined
66 * or shrinker should skip this cache for this time (e.g., their number
67 * is below shrinkable limit). No deadlock checks should be done during the
24f7c6b9
DC
68 * count callback - the shrinker relies on aggregating scan counts that couldn't
69 * be executed due to potential deadlocks to be run at a later call when the
70 * deadlock condition is no longer pending.
b0d40c92 71 *
24f7c6b9
DC
72 * @scan_objects will only be called if @count_objects returned a non-zero
73 * value for the number of freeable objects. The callout should scan the cache
74 * and attempt to free items from the cache. It should then return the number
75 * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
76 * due to potential deadlocks. If SHRINK_STOP is returned, then no further
77 * attempts to call the @scan_objects will be made from the current reclaim
78 * context.
1d3d4437
GC
79 *
80 * @flags determine the shrinker abilities, like numa awareness
b0d40c92
DC
81 */
82struct shrinker {
24f7c6b9
DC
83 unsigned long (*count_objects)(struct shrinker *,
84 struct shrink_control *sc);
85 unsigned long (*scan_objects)(struct shrinker *,
86 struct shrink_control *sc);
87
b0d40c92 88 long batch; /* reclaim batch size, 0 = default */
e50ef89b
KT
89 int seeks; /* seeks to recreate an obj */
90 unsigned flags;
b0d40c92 91
ca1d36b8
QZ
92 /*
93 * The reference count of this shrinker. Registered shrinker have an
94 * initial refcount of 1, then the lookup operations are now allowed
95 * to use it via shrinker_try_get(). Later in the unregistration step,
96 * the initial refcount will be discarded, and will free the shrinker
97 * asynchronously via RCU after its refcount reaches 0.
98 */
99 refcount_t refcount;
100 struct completion done; /* use to wait for refcount to reach 0 */
101 struct rcu_head rcu;
102
c42d50ae
QZ
103 void *private_data;
104
b0d40c92
DC
105 /* These are for internal use */
106 struct list_head list;
0a432dcb 107#ifdef CONFIG_MEMCG
b4c2b231
KT
108 /* ID in shrinker_idr */
109 int id;
5035ebc6
RG
110#endif
111#ifdef CONFIG_SHRINKER_DEBUG
112 int debugfs_id;
e33c267a 113 const char *name;
5035ebc6 114 struct dentry *debugfs_entry;
b4c2b231 115#endif
1d3d4437
GC
116 /* objs pending delete, per node */
117 atomic_long_t *nr_deferred;
b0d40c92
DC
118};
119#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
1d3d4437 120
c42d50ae
QZ
121/* Internal flags */
122#define SHRINKER_REGISTERED BIT(0)
123#define SHRINKER_ALLOCATED BIT(1)
124
125/* Flags for users to use */
126#define SHRINKER_NUMA_AWARE BIT(2)
127#define SHRINKER_MEMCG_AWARE BIT(3)
0a432dcb
YS
128/*
129 * It just makes sense when the shrinker is also MEMCG_AWARE for now,
130 * non-MEMCG_AWARE shrinker should not have this flag set.
131 */
c42d50ae
QZ
132#define SHRINKER_NONSLAB BIT(4)
133
f04eba13 134__printf(2, 3)
c42d50ae
QZ
135struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
136void shrinker_register(struct shrinker *shrinker);
137void shrinker_free(struct shrinker *shrinker);
1d3d4437 138
ca1d36b8
QZ
139static inline bool shrinker_try_get(struct shrinker *shrinker)
140{
141 return refcount_inc_not_zero(&shrinker->refcount);
142}
143
144static inline void shrinker_put(struct shrinker *shrinker)
145{
146 if (refcount_dec_and_test(&shrinker->refcount))
147 complete(&shrinker->done);
148}
149
5035ebc6 150#ifdef CONFIG_SHRINKER_DEBUG
e33c267a
RG
151extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
152 const char *fmt, ...);
5035ebc6 153#else /* CONFIG_SHRINKER_DEBUG */
e33c267a
RG
154static inline __printf(2, 3)
155int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
156{
157 return 0;
158}
5035ebc6
RG
159#endif /* CONFIG_SHRINKER_DEBUG */
160#endif /* _LINUX_SHRINKER_H */