Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
f30c2269 | 3 | * include/linux/writeback.h |
1da177e4 LT |
4 | */ |
5 | #ifndef WRITEBACK_H | |
6 | #define WRITEBACK_H | |
7 | ||
e8edc6e0 | 8 | #include <linux/sched.h> |
a27bb332 | 9 | #include <linux/workqueue.h> |
f5ff8422 | 10 | #include <linux/fs.h> |
380c27ca | 11 | #include <linux/flex_proportions.h> |
21c6321f | 12 | #include <linux/backing-dev-defs.h> |
7637241e | 13 | #include <linux/blk_types.h> |
e8edc6e0 | 14 | |
2f8b5444 CH |
15 | struct bio; |
16 | ||
54848d73 WF |
17 | DECLARE_PER_CPU(int, dirty_throttle_leaks); |
18 | ||
ffd1f609 | 19 | /* |
1a12d8bd WF |
20 | * The 1/4 region under the global dirty thresh is for smooth dirty throttling: |
21 | * | |
22 | * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) | |
23 | * | |
ffd1f609 WF |
24 | * Further beyond, all dirtier tasks will enter a loop waiting (possibly long |
25 | * time) for the dirty pages to drop, unless written enough pages. | |
26 | * | |
27 | * The global dirty threshold is normally equal to the global dirty limit, | |
28 | * except when the system suddenly allocates a lot of anonymous memory and | |
29 | * knocks down the global dirty threshold quickly, in which case the global | |
30 | * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. | |
31 | */ | |
1a12d8bd WF |
32 | #define DIRTY_SCOPE 8 |
33 | #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) | |
ffd1f609 | 34 | |
1da177e4 LT |
35 | struct backing_dev_info; |
36 | ||
1da177e4 LT |
37 | /* |
38 | * fs/fs-writeback.c | |
39 | */ | |
40 | enum writeback_sync_modes { | |
41 | WB_SYNC_NONE, /* Don't wait on anything */ | |
42 | WB_SYNC_ALL, /* Wait on every mapping */ | |
1da177e4 LT |
43 | }; |
44 | ||
45 | /* | |
46 | * A control structure which tells the writeback code what to do. These are | |
47 | * always on the stack, and hence need no locking. They are always initialised | |
48 | * in a manner such that unspecified fields are set to zero. | |
49 | */ | |
50 | struct writeback_control { | |
1da177e4 LT |
51 | long nr_to_write; /* Write this many pages, and decrement |
52 | this for each page written */ | |
53 | long pages_skipped; /* Pages which were not written */ | |
54 | ||
55 | /* | |
95468fd4 | 56 | * For a_ops->writepages(): if start or end are non-zero then this is |
1da177e4 LT |
57 | * a hint that the filesystem need only write out the pages inside that |
58 | * byterange. The byte at `end' is included in the writeout request. | |
59 | */ | |
111ebb6e OH |
60 | loff_t range_start; |
61 | loff_t range_end; | |
1da177e4 | 62 | |
4cd9069a RK |
63 | enum writeback_sync_modes sync_mode; |
64 | ||
22905f77 | 65 | unsigned for_kupdate:1; /* A kupdate writeback */ |
b17621fe | 66 | unsigned for_background:1; /* A background writeback */ |
6e6938b6 | 67 | unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ |
22905f77 | 68 | unsigned for_reclaim:1; /* Invoked from the page allocator */ |
111ebb6e | 69 | unsigned range_cyclic:1; /* range_start is cyclic */ |
7747bd4b | 70 | unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ |
08276bda | 71 | unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */ |
27b36d8f TH |
72 | |
73 | /* | |
74 | * When writeback IOs are bounced through async layers, only the | |
75 | * initial synchronous phase should be accounted towards inode | |
76 | * cgroup ownership arbitration to avoid confusion. Later stages | |
77 | * can set the following flag to disable the accounting. | |
78 | */ | |
79 | unsigned no_cgroup_owner:1; | |
80 | ||
d3f77dfd TH |
81 | unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */ |
82 | ||
2282679f N |
83 | /* To enable batching of swap writes to non-block-device backends, |
84 | * "plug" can be set point to a 'struct swap_iocb *'. When all swap | |
85 | * writes have been submitted, if with swap_iocb is not NULL, | |
86 | * swap_write_unplug() should be called. | |
87 | */ | |
88 | struct swap_iocb **swap_plug; | |
89 | ||
b16b1deb TH |
90 | #ifdef CONFIG_CGROUP_WRITEBACK |
91 | struct bdi_writeback *wb; /* wb this writeback is issued under */ | |
2a814908 TH |
92 | struct inode *inode; /* inode being written out */ |
93 | ||
94 | /* foreign inode detection, see wbc_detach_inode() */ | |
95 | int wb_id; /* current wb id */ | |
96 | int wb_lcand_id; /* last foreign candidate wb id */ | |
97 | int wb_tcand_id; /* this foreign candidate wb id */ | |
98 | size_t wb_bytes; /* bytes written by current wb */ | |
99 | size_t wb_lcand_bytes; /* bytes written by last candidate */ | |
100 | size_t wb_tcand_bytes; /* bytes written by this candidate */ | |
b16b1deb | 101 | #endif |
1da177e4 LT |
102 | }; |
103 | ||
7637241e JA |
104 | static inline int wbc_to_write_flags(struct writeback_control *wbc) |
105 | { | |
d3f77dfd TH |
106 | int flags = 0; |
107 | ||
108 | if (wbc->punt_to_cgroup) | |
109 | flags = REQ_CGROUP_PUNT; | |
110 | ||
7637241e | 111 | if (wbc->sync_mode == WB_SYNC_ALL) |
d3f77dfd | 112 | flags |= REQ_SYNC; |
13edd5e7 | 113 | else if (wbc->for_kupdate || wbc->for_background) |
d3f77dfd | 114 | flags |= REQ_BACKGROUND; |
7637241e | 115 | |
d3f77dfd | 116 | return flags; |
7637241e JA |
117 | } |
118 | ||
653c45c6 | 119 | #ifdef CONFIG_CGROUP_WRITEBACK |
348332e0 CH |
120 | #define wbc_blkcg_css(wbc) \ |
121 | ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) | |
122 | #else | |
123 | #define wbc_blkcg_css(wbc) (blkcg_root_css) | |
124 | #endif /* CONFIG_CGROUP_WRITEBACK */ | |
653c45c6 | 125 | |
380c27ca TH |
126 | /* |
127 | * A wb_domain represents a domain that wb's (bdi_writeback's) belong to | |
128 | * and are measured against each other in. There always is one global | |
129 | * domain, global_wb_domain, that every wb in the system is a member of. | |
130 | * This allows measuring the relative bandwidth of each wb to distribute | |
131 | * dirtyable memory accordingly. | |
132 | */ | |
133 | struct wb_domain { | |
dcc25ae7 TH |
134 | spinlock_t lock; |
135 | ||
380c27ca TH |
136 | /* |
137 | * Scale the writeback cache size proportional to the relative | |
138 | * writeout speed. | |
139 | * | |
140 | * We do this by keeping a floating proportion between BDIs, based | |
141 | * on page writeback completions [end_page_writeback()]. Those | |
142 | * devices that write out pages fastest will get the larger share, | |
143 | * while the slower will get a smaller share. | |
144 | * | |
145 | * We use page writeout completions because we are interested in | |
146 | * getting rid of dirty pages. Having them written out is the | |
147 | * primary goal. | |
148 | * | |
149 | * We introduce a concept of time, a period over which we measure | |
150 | * these events, because demand can/will vary over time. The length | |
151 | * of this period itself is measured in page writeback completions. | |
152 | */ | |
153 | struct fprop_global completions; | |
154 | struct timer_list period_timer; /* timer for aging of completions */ | |
155 | unsigned long period_time; | |
dcc25ae7 TH |
156 | |
157 | /* | |
158 | * The dirtyable memory and dirty threshold could be suddenly | |
159 | * knocked down by a large amount (eg. on the startup of KVM in a | |
160 | * swapless system). This may throw the system into deep dirty | |
161 | * exceeded state and throttle heavy/light dirtiers alike. To | |
162 | * retain good responsiveness, maintain global_dirty_limit for | |
163 | * tracking slowly down to the knocked down dirty threshold. | |
164 | * | |
165 | * Both fields are protected by ->lock. | |
166 | */ | |
167 | unsigned long dirty_limit_tstamp; | |
168 | unsigned long dirty_limit; | |
380c27ca TH |
169 | }; |
170 | ||
2529bb3a TH |
171 | /** |
172 | * wb_domain_size_changed - memory available to a wb_domain has changed | |
173 | * @dom: wb_domain of interest | |
174 | * | |
175 | * This function should be called when the amount of memory available to | |
176 | * @dom has changed. It resets @dom's dirty limit parameters to prevent | |
177 | * the past values which don't match the current configuration from skewing | |
178 | * dirty throttling. Without this, when memory size of a wb_domain is | |
179 | * greatly reduced, the dirty throttling logic may allow too many pages to | |
180 | * be dirtied leading to consecutive unnecessary OOMs and may get stuck in | |
181 | * that situation. | |
182 | */ | |
183 | static inline void wb_domain_size_changed(struct wb_domain *dom) | |
184 | { | |
185 | spin_lock(&dom->lock); | |
186 | dom->dirty_limit_tstamp = jiffies; | |
187 | dom->dirty_limit = 0; | |
188 | spin_unlock(&dom->lock); | |
189 | } | |
190 | ||
1da177e4 LT |
191 | /* |
192 | * fs/fs-writeback.c | |
193 | */ | |
03ba3782 | 194 | struct bdi_writeback; |
0e175a18 CW |
195 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
196 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | |
197 | enum wb_reason reason); | |
8264c321 | 198 | void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason); |
0dc83bd3 | 199 | void sync_inodes_sb(struct super_block *); |
9ba4b2df | 200 | void wakeup_flusher_threads(enum wb_reason reason); |
595043e5 JA |
201 | void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, |
202 | enum wb_reason reason); | |
169ebd90 | 203 | void inode_wait_for_writeback(struct inode *inode); |
4301efa4 | 204 | void inode_io_list_del(struct inode *inode); |
1da177e4 LT |
205 | |
206 | /* writeback.h requires fs.h; it, too, is not included from here. */ | |
207 | static inline void wait_on_inode(struct inode *inode) | |
208 | { | |
209 | might_sleep(); | |
74316201 | 210 | wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); |
1da177e4 | 211 | } |
1c0eeaf5 | 212 | |
21c6321f TH |
213 | #ifdef CONFIG_CGROUP_WRITEBACK |
214 | ||
b16b1deb TH |
215 | #include <linux/cgroup.h> |
216 | #include <linux/bio.h> | |
217 | ||
21c6321f | 218 | void __inode_attach_wb(struct inode *inode, struct page *page); |
b16b1deb TH |
219 | void wbc_attach_and_unlock_inode(struct writeback_control *wbc, |
220 | struct inode *inode) | |
221 | __releases(&inode->i_lock); | |
222 | void wbc_detach_inode(struct writeback_control *wbc); | |
34e51a5e TH |
223 | void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, |
224 | size_t bytes); | |
7490a2d2 | 225 | int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, |
d62241c7 | 226 | enum wb_reason reason, struct wb_completion *done); |
a1a0e23e | 227 | void cgroup_writeback_umount(void); |
c22d70a1 | 228 | bool cleanup_offline_cgwb(struct bdi_writeback *wb); |
21c6321f TH |
229 | |
230 | /** | |
231 | * inode_attach_wb - associate an inode with its wb | |
232 | * @inode: inode of interest | |
233 | * @page: page being dirtied (may be NULL) | |
234 | * | |
235 | * If @inode doesn't have its wb, associate it with the wb matching the | |
236 | * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o | |
237 | * @inode->i_lock. | |
238 | */ | |
239 | static inline void inode_attach_wb(struct inode *inode, struct page *page) | |
240 | { | |
241 | if (!inode->i_wb) | |
242 | __inode_attach_wb(inode, page); | |
243 | } | |
244 | ||
245 | /** | |
246 | * inode_detach_wb - disassociate an inode from its wb | |
247 | * @inode: inode of interest | |
248 | * | |
249 | * @inode is being freed. Detach from its wb. | |
250 | */ | |
251 | static inline void inode_detach_wb(struct inode *inode) | |
252 | { | |
253 | if (inode->i_wb) { | |
f759741d | 254 | WARN_ON_ONCE(!(inode->i_state & I_CLEAR)); |
21c6321f TH |
255 | wb_put(inode->i_wb); |
256 | inode->i_wb = NULL; | |
257 | } | |
258 | } | |
259 | ||
b16b1deb TH |
260 | /** |
261 | * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite | |
262 | * @wbc: writeback_control of interest | |
263 | * @inode: target inode | |
264 | * | |
265 | * This function is to be used by __filemap_fdatawrite_range(), which is an | |
266 | * alternative entry point into writeback code, and first ensures @inode is | |
267 | * associated with a bdi_writeback and attaches it to @wbc. | |
268 | */ | |
269 | static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, | |
270 | struct inode *inode) | |
271 | { | |
272 | spin_lock(&inode->i_lock); | |
273 | inode_attach_wb(inode, NULL); | |
274 | wbc_attach_and_unlock_inode(wbc, inode); | |
275 | } | |
276 | ||
277 | /** | |
278 | * wbc_init_bio - writeback specific initializtion of bio | |
279 | * @wbc: writeback_control for the writeback in progress | |
280 | * @bio: bio to be initialized | |
281 | * | |
282 | * @bio is a part of the writeback in progress controlled by @wbc. Perform | |
283 | * writeback specific initialization. This is used to apply the cgroup | |
fd42df30 DZ |
284 | * writeback context. Must be called after the bio has been associated with |
285 | * a device. | |
b16b1deb TH |
286 | */ |
287 | static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) | |
288 | { | |
289 | /* | |
290 | * pageout() path doesn't attach @wbc to the inode being written | |
291 | * out. This is intentional as we don't want the function to block | |
292 | * behind a slow cgroup. Ultimately, we want pageout() to kick off | |
293 | * regular writeback instead of writing things out itself. | |
294 | */ | |
295 | if (wbc->wb) | |
fd42df30 | 296 | bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); |
b16b1deb TH |
297 | } |
298 | ||
21c6321f TH |
299 | #else /* CONFIG_CGROUP_WRITEBACK */ |
300 | ||
301 | static inline void inode_attach_wb(struct inode *inode, struct page *page) | |
302 | { | |
303 | } | |
304 | ||
305 | static inline void inode_detach_wb(struct inode *inode) | |
306 | { | |
307 | } | |
308 | ||
b16b1deb TH |
309 | static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, |
310 | struct inode *inode) | |
311 | __releases(&inode->i_lock) | |
312 | { | |
313 | spin_unlock(&inode->i_lock); | |
314 | } | |
315 | ||
316 | static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, | |
317 | struct inode *inode) | |
318 | { | |
319 | } | |
320 | ||
321 | static inline void wbc_detach_inode(struct writeback_control *wbc) | |
322 | { | |
323 | } | |
324 | ||
325 | static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) | |
326 | { | |
327 | } | |
328 | ||
34e51a5e TH |
329 | static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, |
330 | struct page *page, size_t bytes) | |
2a814908 TH |
331 | { |
332 | } | |
333 | ||
a1a0e23e TH |
334 | static inline void cgroup_writeback_umount(void) |
335 | { | |
336 | } | |
337 | ||
21c6321f TH |
338 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
339 | ||
1da177e4 LT |
340 | /* |
341 | * mm/page-writeback.c | |
342 | */ | |
31373d09 | 343 | void laptop_io_completion(struct backing_dev_info *info); |
1da177e4 | 344 | void laptop_sync_completion(void); |
bca237a5 | 345 | void laptop_mode_timer_fn(struct timer_list *t); |
281e3726 | 346 | bool node_dirty_ok(struct pglist_data *pgdat); |
380c27ca | 347 | int wb_domain_init(struct wb_domain *dom, gfp_t gfp); |
841710aa TH |
348 | #ifdef CONFIG_CGROUP_WRITEBACK |
349 | void wb_domain_exit(struct wb_domain *dom); | |
350 | #endif | |
1da177e4 | 351 | |
dcc25ae7 | 352 | extern struct wb_domain global_wb_domain; |
c42843f2 | 353 | |
1da177e4 | 354 | /* These are exported to sysctl. */ |
704503d8 AD |
355 | extern unsigned int dirty_writeback_interval; |
356 | extern unsigned int dirty_expire_interval; | |
1efff914 | 357 | extern unsigned int dirtytime_expire_interval; |
1da177e4 LT |
358 | extern int laptop_mode; |
359 | ||
1efff914 | 360 | int dirtytime_interval_handler(struct ctl_table *table, int write, |
32927393 | 361 | void *buffer, size_t *lenp, loff_t *ppos); |
1da177e4 | 362 | |
16c4042f | 363 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); |
0d960a38 | 364 | unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); |
a88a341a | 365 | |
45a2966f | 366 | void wb_update_bandwidth(struct bdi_writeback *wb); |
d0e1d66b | 367 | void balance_dirty_pages_ratelimited(struct address_space *mapping); |
aa661bbe | 368 | bool wb_over_bg_thresh(struct bdi_writeback *wb); |
fa5a734e | 369 | |
0ea97180 MS |
370 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, |
371 | void *data); | |
372 | ||
0ea97180 MS |
373 | int generic_writepages(struct address_space *mapping, |
374 | struct writeback_control *wbc); | |
5b41d924 ES |
375 | void tag_pages_for_writeback(struct address_space *mapping, |
376 | pgoff_t start, pgoff_t end); | |
0ea97180 MS |
377 | int write_cache_pages(struct address_space *mapping, |
378 | struct writeback_control *wbc, writepage_t writepage, | |
379 | void *data); | |
1da177e4 | 380 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
2d1d43f6 | 381 | void writeback_set_ratelimit(void); |
92c09c04 NK |
382 | void tag_pages_for_writeback(struct address_space *mapping, |
383 | pgoff_t start, pgoff_t end); | |
1da177e4 | 384 | |
85d4d2eb | 385 | bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio); |
25ff8b15 MWO |
386 | void folio_account_redirty(struct folio *folio); |
387 | static inline void account_page_redirty(struct page *page) | |
388 | { | |
389 | folio_account_redirty(page_folio(page)); | |
390 | } | |
cd78ab11 MWO |
391 | bool folio_redirty_for_writepage(struct writeback_control *, struct folio *); |
392 | bool redirty_page_for_writepage(struct writeback_control *, struct page *); | |
2f800fbd | 393 | |
6c60d2b5 DC |
394 | void sb_mark_inode_writeback(struct inode *inode); |
395 | void sb_clear_inode_writeback(struct inode *inode); | |
396 | ||
1da177e4 | 397 | #endif /* WRITEBACK_H */ |