Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / include / linux / writeback.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4 2/*
f30c2269 3 * include/linux/writeback.h
1da177e4
LT
4 */
5#ifndef WRITEBACK_H
6#define WRITEBACK_H
7
e8edc6e0 8#include <linux/sched.h>
a27bb332 9#include <linux/workqueue.h>
f5ff8422 10#include <linux/fs.h>
380c27ca 11#include <linux/flex_proportions.h>
21c6321f 12#include <linux/backing-dev-defs.h>
7637241e 13#include <linux/blk_types.h>
751e0d55 14#include <linux/pagevec.h>
e8edc6e0 15
2f8b5444
CH
16struct bio;
17
54848d73
WF
18DECLARE_PER_CPU(int, dirty_throttle_leaks);
19
ffd1f609 20/*
ffd1f609
WF
21 * The global dirty threshold is normally equal to the global dirty limit,
22 * except when the system suddenly allocates a lot of anonymous memory and
23 * knocks down the global dirty threshold quickly, in which case the global
24 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
25 */
1a12d8bd 26#define DIRTY_SCOPE 8
ffd1f609 27
1da177e4
LT
28struct backing_dev_info;
29
1da177e4
LT
30/*
31 * fs/fs-writeback.c
32 */
33enum writeback_sync_modes {
34 WB_SYNC_NONE, /* Don't wait on anything */
35 WB_SYNC_ALL, /* Wait on every mapping */
1da177e4
LT
36};
37
38/*
39 * A control structure which tells the writeback code what to do. These are
40 * always on the stack, and hence need no locking. They are always initialised
41 * in a manner such that unspecified fields are set to zero.
42 */
43struct writeback_control {
751e0d55 44 /* public fields that can be set and/or consumed by the caller: */
1da177e4
LT
45 long nr_to_write; /* Write this many pages, and decrement
46 this for each page written */
47 long pages_skipped; /* Pages which were not written */
48
49 /*
95468fd4 50 * For a_ops->writepages(): if start or end are non-zero then this is
1da177e4
LT
51 * a hint that the filesystem need only write out the pages inside that
52 * byterange. The byte at `end' is included in the writeout request.
53 */
111ebb6e
OH
54 loff_t range_start;
55 loff_t range_end;
1da177e4 56
4cd9069a
RK
57 enum writeback_sync_modes sync_mode;
58
22905f77 59 unsigned for_kupdate:1; /* A kupdate writeback */
b17621fe 60 unsigned for_background:1; /* A background writeback */
6e6938b6 61 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
22905f77 62 unsigned for_reclaim:1; /* Invoked from the page allocator */
111ebb6e 63 unsigned range_cyclic:1; /* range_start is cyclic */
7747bd4b 64 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
c9c4ff12 65 unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
27b36d8f
TH
66
67 /*
68 * When writeback IOs are bounced through async layers, only the
69 * initial synchronous phase should be accounted towards inode
70 * cgroup ownership arbitration to avoid confusion. Later stages
71 * can set the following flag to disable the accounting.
72 */
73 unsigned no_cgroup_owner:1;
74
2282679f
N
75 /* To enable batching of swap writes to non-block-device backends,
76 * "plug" can be set point to a 'struct swap_iocb *'. When all swap
77 * writes have been submitted, if with swap_iocb is not NULL,
78 * swap_write_unplug() should be called.
79 */
80 struct swap_iocb **swap_plug;
81
809bc865
BW
82 /* Target list for splitting a large folio */
83 struct list_head *list;
84
751e0d55
MWO
85 /* internal fields used by the ->writepages implementation: */
86 struct folio_batch fbatch;
87 pgoff_t index;
cdc150b5 88 int saved_err;
751e0d55 89
b16b1deb
TH
90#ifdef CONFIG_CGROUP_WRITEBACK
91 struct bdi_writeback *wb; /* wb this writeback is issued under */
2a814908
TH
92 struct inode *inode; /* inode being written out */
93
94 /* foreign inode detection, see wbc_detach_inode() */
95 int wb_id; /* current wb id */
96 int wb_lcand_id; /* last foreign candidate wb id */
97 int wb_tcand_id; /* this foreign candidate wb id */
98 size_t wb_bytes; /* bytes written by current wb */
99 size_t wb_lcand_bytes; /* bytes written by last candidate */
100 size_t wb_tcand_bytes; /* bytes written by this candidate */
b16b1deb 101#endif
1da177e4
LT
102};
103
f8e6e4bd 104static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc)
7637241e 105{
f8e6e4bd 106 blk_opf_t flags = 0;
d3f77dfd 107
7637241e 108 if (wbc->sync_mode == WB_SYNC_ALL)
d3f77dfd 109 flags |= REQ_SYNC;
13edd5e7 110 else if (wbc->for_kupdate || wbc->for_background)
d3f77dfd 111 flags |= REQ_BACKGROUND;
7637241e 112
d3f77dfd 113 return flags;
7637241e
JA
114}
115
653c45c6 116#ifdef CONFIG_CGROUP_WRITEBACK
348332e0
CH
117#define wbc_blkcg_css(wbc) \
118 ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css)
119#else
120#define wbc_blkcg_css(wbc) (blkcg_root_css)
121#endif /* CONFIG_CGROUP_WRITEBACK */
653c45c6 122
380c27ca
TH
123/*
124 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
125 * and are measured against each other in. There always is one global
126 * domain, global_wb_domain, that every wb in the system is a member of.
127 * This allows measuring the relative bandwidth of each wb to distribute
128 * dirtyable memory accordingly.
129 */
130struct wb_domain {
dcc25ae7
TH
131 spinlock_t lock;
132
380c27ca
TH
133 /*
134 * Scale the writeback cache size proportional to the relative
135 * writeout speed.
136 *
137 * We do this by keeping a floating proportion between BDIs, based
138 * on page writeback completions [end_page_writeback()]. Those
139 * devices that write out pages fastest will get the larger share,
140 * while the slower will get a smaller share.
141 *
142 * We use page writeout completions because we are interested in
143 * getting rid of dirty pages. Having them written out is the
144 * primary goal.
145 *
146 * We introduce a concept of time, a period over which we measure
147 * these events, because demand can/will vary over time. The length
148 * of this period itself is measured in page writeback completions.
149 */
150 struct fprop_global completions;
151 struct timer_list period_timer; /* timer for aging of completions */
152 unsigned long period_time;
dcc25ae7
TH
153
154 /*
155 * The dirtyable memory and dirty threshold could be suddenly
156 * knocked down by a large amount (eg. on the startup of KVM in a
157 * swapless system). This may throw the system into deep dirty
158 * exceeded state and throttle heavy/light dirtiers alike. To
159 * retain good responsiveness, maintain global_dirty_limit for
160 * tracking slowly down to the knocked down dirty threshold.
161 *
162 * Both fields are protected by ->lock.
163 */
164 unsigned long dirty_limit_tstamp;
165 unsigned long dirty_limit;
380c27ca
TH
166};
167
2529bb3a
TH
168/**
169 * wb_domain_size_changed - memory available to a wb_domain has changed
170 * @dom: wb_domain of interest
171 *
172 * This function should be called when the amount of memory available to
173 * @dom has changed. It resets @dom's dirty limit parameters to prevent
174 * the past values which don't match the current configuration from skewing
175 * dirty throttling. Without this, when memory size of a wb_domain is
176 * greatly reduced, the dirty throttling logic may allow too many pages to
177 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
178 * that situation.
179 */
180static inline void wb_domain_size_changed(struct wb_domain *dom)
181{
182 spin_lock(&dom->lock);
183 dom->dirty_limit_tstamp = jiffies;
184 dom->dirty_limit = 0;
185 spin_unlock(&dom->lock);
186}
187
1da177e4
LT
188/*
189 * fs/fs-writeback.c
190 */
03ba3782 191struct bdi_writeback;
0e175a18
CW
192void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
193void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
194 enum wb_reason reason);
8264c321 195void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason);
0dc83bd3 196void sync_inodes_sb(struct super_block *);
9ba4b2df 197void wakeup_flusher_threads(enum wb_reason reason);
595043e5
JA
198void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
199 enum wb_reason reason);
169ebd90 200void inode_wait_for_writeback(struct inode *inode);
4301efa4 201void inode_io_list_del(struct inode *inode);
1da177e4
LT
202
203/* writeback.h requires fs.h; it, too, is not included from here. */
204static inline void wait_on_inode(struct inode *inode)
205{
0fe340a9
CB
206 wait_var_event(inode_state_wait_address(inode, __I_NEW),
207 !(READ_ONCE(inode->i_state) & I_NEW));
1da177e4 208}
1c0eeaf5 209
21c6321f
TH
210#ifdef CONFIG_CGROUP_WRITEBACK
211
b16b1deb
TH
212#include <linux/cgroup.h>
213#include <linux/bio.h>
214
9cfb816b 215void __inode_attach_wb(struct inode *inode, struct folio *folio);
b16b1deb 216void wbc_detach_inode(struct writeback_control *wbc);
30dac24e 217void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
34e51a5e 218 size_t bytes);
7490a2d2 219int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
d62241c7 220 enum wb_reason reason, struct wb_completion *done);
c393eaa8 221void cgroup_writeback_umount(struct super_block *sb);
c22d70a1 222bool cleanup_offline_cgwb(struct bdi_writeback *wb);
21c6321f
TH
223
224/**
225 * inode_attach_wb - associate an inode with its wb
226 * @inode: inode of interest
9cfb816b 227 * @folio: folio being dirtied (may be NULL)
21c6321f
TH
228 *
229 * If @inode doesn't have its wb, associate it with the wb matching the
9cfb816b 230 * memcg of @folio or, if @folio is NULL, %current. May be called w/ or w/o
21c6321f
TH
231 * @inode->i_lock.
232 */
9cfb816b 233static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
21c6321f
TH
234{
235 if (!inode->i_wb)
9cfb816b 236 __inode_attach_wb(inode, folio);
21c6321f
TH
237}
238
239/**
240 * inode_detach_wb - disassociate an inode from its wb
241 * @inode: inode of interest
242 *
243 * @inode is being freed. Detach from its wb.
244 */
245static inline void inode_detach_wb(struct inode *inode)
246{
247 if (inode->i_wb) {
f759741d 248 WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
21c6321f
TH
249 wb_put(inode->i_wb);
250 inode->i_wb = NULL;
251 }
252}
253
8182a8b3
CH
254void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
255 struct inode *inode);
b16b1deb
TH
256
257/**
258 * wbc_init_bio - writeback specific initializtion of bio
259 * @wbc: writeback_control for the writeback in progress
260 * @bio: bio to be initialized
261 *
262 * @bio is a part of the writeback in progress controlled by @wbc. Perform
263 * writeback specific initialization. This is used to apply the cgroup
fd42df30
DZ
264 * writeback context. Must be called after the bio has been associated with
265 * a device.
b16b1deb
TH
266 */
267static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
268{
269 /*
270 * pageout() path doesn't attach @wbc to the inode being written
271 * out. This is intentional as we don't want the function to block
272 * behind a slow cgroup. Ultimately, we want pageout() to kick off
273 * regular writeback instead of writing things out itself.
274 */
275 if (wbc->wb)
fd42df30 276 bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
b16b1deb
TH
277}
278
21c6321f
TH
279#else /* CONFIG_CGROUP_WRITEBACK */
280
9cfb816b 281static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
21c6321f
TH
282{
283}
284
285static inline void inode_detach_wb(struct inode *inode)
286{
287}
288
b16b1deb
TH
289static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
290 struct inode *inode)
291{
292}
293
294static inline void wbc_detach_inode(struct writeback_control *wbc)
295{
296}
297
298static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
299{
300}
301
34e51a5e 302static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
30dac24e 303 struct folio *folio, size_t bytes)
2a814908
TH
304{
305}
306
c393eaa8 307static inline void cgroup_writeback_umount(struct super_block *sb)
a1a0e23e
TH
308{
309}
310
21c6321f
TH
311#endif /* CONFIG_CGROUP_WRITEBACK */
312
1da177e4
LT
313/*
314 * mm/page-writeback.c
315 */
f1ab2831
TY
316/* consolidated parameters for balance_dirty_pages() and its subroutines */
317struct dirty_throttle_control {
318#ifdef CONFIG_CGROUP_WRITEBACK
319 struct wb_domain *dom;
320 struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
321#endif
322 struct bdi_writeback *wb;
323 struct fprop_local_percpu *wb_completions;
324
325 unsigned long avail; /* dirtyable */
326 unsigned long dirty; /* file_dirty + write + nfs */
327 unsigned long thresh; /* dirty threshold */
328 unsigned long bg_thresh; /* dirty background threshold */
6cc4c3aa 329 unsigned long limit; /* hard dirty limit */
f1ab2831
TY
330
331 unsigned long wb_dirty; /* per-wb counterparts */
332 unsigned long wb_thresh;
333 unsigned long wb_bg_thresh;
334
335 unsigned long pos_ratio;
336 bool freerun;
337 bool dirty_exceeded;
338};
339
31373d09 340void laptop_io_completion(struct backing_dev_info *info);
1da177e4 341void laptop_sync_completion(void);
bca237a5 342void laptop_mode_timer_fn(struct timer_list *t);
281e3726 343bool node_dirty_ok(struct pglist_data *pgdat);
380c27ca 344int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
841710aa
TH
345#ifdef CONFIG_CGROUP_WRITEBACK
346void wb_domain_exit(struct wb_domain *dom);
347#endif
1da177e4 348
dcc25ae7 349extern struct wb_domain global_wb_domain;
c42843f2 350
1da177e4 351/* These are exported to sysctl. */
704503d8
AD
352extern unsigned int dirty_writeback_interval;
353extern unsigned int dirty_expire_interval;
1da177e4
LT
354extern int laptop_mode;
355
16c4042f 356void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
0d960a38 357unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
4b5bbc39 358unsigned long cgwb_calc_thresh(struct bdi_writeback *wb);
a88a341a 359
45a2966f 360void wb_update_bandwidth(struct bdi_writeback *wb);
fe6c9c6e
JK
361
362/* Invoke balance dirty pages in async mode. */
363#define BDP_ASYNC 0x0001
364
d0e1d66b 365void balance_dirty_pages_ratelimited(struct address_space *mapping);
fe6c9c6e
JK
366int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
367 unsigned int flags);
368
aa661bbe 369bool wb_over_bg_thresh(struct bdi_writeback *wb);
fa5a734e 370
cdc150b5
CH
371struct folio *writeback_iter(struct address_space *mapping,
372 struct writeback_control *wbc, struct folio *folio, int *error);
373
d585bdbe 374typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
0ea97180
MS
375 void *data);
376
0ea97180
MS
377int write_cache_pages(struct address_space *mapping,
378 struct writeback_control *wbc, writepage_t writepage,
379 void *data);
1da177e4 380int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
2d1d43f6 381void writeback_set_ratelimit(void);
92c09c04
NK
382void tag_pages_for_writeback(struct address_space *mapping,
383 pgoff_t start, pgoff_t end);
1da177e4 384
85d4d2eb 385bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
cd78ab11
MWO
386bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
387bool redirty_page_for_writepage(struct writeback_control *, struct page *);
2f800fbd 388
6c60d2b5
DC
389void sb_mark_inode_writeback(struct inode *inode);
390void sb_clear_inode_writeback(struct inode *inode);
391
1da177e4 392#endif /* WRITEBACK_H */