Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * mm/readahead.c - address_space-level file readahead. | |
4 | * | |
5 | * Copyright (C) 2002, Linus Torvalds | |
6 | * | |
e1f8e874 | 7 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
8 | * Initial version. |
9 | */ | |
10 | ||
84dacdbd N |
11 | /** |
12 | * DOC: Readahead Overview | |
13 | * | |
14 | * Readahead is used to read content into the page cache before it is | |
15 | * explicitly requested by the application. Readahead only ever | |
1e470280 MWO |
16 | * attempts to read folios that are not yet in the page cache. If a |
17 | * folio is present but not up-to-date, readahead will not try to read | |
5efe7448 | 18 | * it. In that case a simple ->read_folio() will be requested. |
84dacdbd N |
19 | * |
20 | * Readahead is triggered when an application read request (whether a | |
1e470280 | 21 | * system call or a page fault) finds that the requested folio is not in |
84dacdbd | 22 | * the page cache, or that it is in the page cache and has the |
1e470280 MWO |
23 | * readahead flag set. This flag indicates that the folio was read |
24 | * as part of a previous readahead request and now that it has been | |
25 | * accessed, it is time for the next readahead. | |
84dacdbd N |
26 | * |
27 | * Each readahead request is partly synchronous read, and partly async | |
1e470280 MWO |
28 | * readahead. This is reflected in the struct file_ra_state which |
29 | * contains ->size being the total number of pages, and ->async_size | |
30 | * which is the number of pages in the async section. The readahead | |
31 | * flag will be set on the first folio in this async section to trigger | |
32 | * a subsequent readahead. Once a series of sequential reads has been | |
84dacdbd | 33 | * established, there should be no need for a synchronous component and |
1e470280 | 34 | * all readahead request will be fully asynchronous. |
84dacdbd | 35 | * |
1e470280 MWO |
36 | * When either of the triggers causes a readahead, three numbers need |
37 | * to be determined: the start of the region to read, the size of the | |
38 | * region, and the size of the async tail. | |
84dacdbd N |
39 | * |
40 | * The start of the region is simply the first page address at or after | |
41 | * the accessed address, which is not currently populated in the page | |
42 | * cache. This is found with a simple search in the page cache. | |
43 | * | |
44 | * The size of the async tail is determined by subtracting the size that | |
45 | * was explicitly requested from the determined request size, unless | |
46 | * this would be less than zero - then zero is used. NOTE THIS | |
47 | * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED | |
1e470280 | 48 | * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY. |
84dacdbd N |
49 | * |
50 | * The size of the region is normally determined from the size of the | |
51 | * previous readahead which loaded the preceding pages. This may be | |
52 | * discovered from the struct file_ra_state for simple sequential reads, | |
53 | * or from examining the state of the page cache when multiple | |
54 | * sequential reads are interleaved. Specifically: where the readahead | |
1e470280 | 55 | * was triggered by the readahead flag, the size of the previous |
84dacdbd N |
56 | * readahead is assumed to be the number of pages from the triggering |
57 | * page to the start of the new readahead. In these cases, the size of | |
58 | * the previous readahead is scaled, often doubled, for the new | |
59 | * readahead, though see get_next_ra_size() for details. | |
60 | * | |
61 | * If the size of the previous read cannot be determined, the number of | |
62 | * preceding pages in the page cache is used to estimate the size of | |
63 | * a previous read. This estimate could easily be misled by random | |
64 | * reads being coincidentally adjacent, so it is ignored unless it is | |
65 | * larger than the current request, and it is not scaled up, unless it | |
66 | * is at the start of file. | |
67 | * | |
1e470280 | 68 | * In general readahead is accelerated at the start of the file, as |
84dacdbd | 69 | * reads from there are often sequential. There are other minor |
1e470280 | 70 | * adjustments to the readahead size in various special cases and these |
84dacdbd N |
71 | * are best discovered by reading the code. |
72 | * | |
1e470280 MWO |
73 | * The above calculation, based on the previous readahead size, |
74 | * determines the size of the readahead, to which any requested read | |
75 | * size may be added. | |
84dacdbd N |
76 | * |
77 | * Readahead requests are sent to the filesystem using the ->readahead() | |
78 | * address space operation, for which mpage_readahead() is a canonical | |
79 | * implementation. ->readahead() should normally initiate reads on all | |
1e470280 | 80 | * folios, but may fail to read any or all folios without causing an I/O |
5efe7448 | 81 | * error. The page cache reading code will issue a ->read_folio() request |
1e470280 | 82 | * for any folio which ->readahead() did not read, and only an error |
84dacdbd N |
83 | * from this will be final. |
84 | * | |
1e470280 MWO |
85 | * ->readahead() will generally call readahead_folio() repeatedly to get |
86 | * each folio from those prepared for readahead. It may fail to read a | |
87 | * folio by: | |
84dacdbd | 88 | * |
1e470280 MWO |
89 | * * not calling readahead_folio() sufficiently many times, effectively |
90 | * ignoring some folios, as might be appropriate if the path to | |
84dacdbd N |
91 | * storage is congested. |
92 | * | |
1e470280 | 93 | * * failing to actually submit a read request for a given folio, |
84dacdbd N |
94 | * possibly due to insufficient resources, or |
95 | * | |
96 | * * getting an error during subsequent processing of a request. | |
97 | * | |
1e470280 MWO |
98 | * In the last two cases, the folio should be unlocked by the filesystem |
99 | * to indicate that the read attempt has failed. In the first case the | |
100 | * folio will be unlocked by the VFS. | |
84dacdbd | 101 | * |
1e470280 | 102 | * Those folios not in the final ``async_size`` of the request should be |
84dacdbd N |
103 | * considered to be important and ->readahead() should not fail them due |
104 | * to congestion or temporary resource unavailability, but should wait | |
105 | * for necessary resources (e.g. memory or indexing information) to | |
1e470280 | 106 | * become available. Folios in the final ``async_size`` may be |
84dacdbd | 107 | * considered less urgent and failure to read them is more acceptable. |
1e470280 MWO |
108 | * In this case it is best to use filemap_remove_folio() to remove the |
109 | * folios from the page cache as is automatically done for folios that | |
110 | * were not fetched with readahead_folio(). This will allow a | |
111 | * subsequent synchronous readahead request to try them again. If they | |
9fd472af | 112 | * are left in the page cache, then they will be read individually using |
5efe7448 | 113 | * ->read_folio() which may be less efficient. |
84dacdbd N |
114 | */ |
115 | ||
c97ab271 | 116 | #include <linux/blkdev.h> |
1da177e4 | 117 | #include <linux/kernel.h> |
11bd969f | 118 | #include <linux/dax.h> |
5a0e3ad6 | 119 | #include <linux/gfp.h> |
b95f1b31 | 120 | #include <linux/export.h> |
1da177e4 | 121 | #include <linux/backing-dev.h> |
8bde37f0 | 122 | #include <linux/task_io_accounting_ops.h> |
f5ff8422 | 123 | #include <linux/pagemap.h> |
17604240 | 124 | #include <linux/psi.h> |
782182e5 CW |
125 | #include <linux/syscalls.h> |
126 | #include <linux/file.h> | |
d72ee911 | 127 | #include <linux/mm_inline.h> |
ca47e8c7 | 128 | #include <linux/blk-cgroup.h> |
3d8f7615 | 129 | #include <linux/fadvise.h> |
f2c817be | 130 | #include <linux/sched/mm.h> |
1da177e4 | 131 | |
29f175d1 FF |
132 | #include "internal.h" |
133 | ||
1da177e4 LT |
134 | /* |
135 | * Initialise a struct file's readahead state. Assumes that the caller has | |
136 | * memset *ra to zero. | |
137 | */ | |
138 | void | |
139 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
140 | { | |
de1414a6 | 141 | ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; |
f4e6b498 | 142 | ra->prev_pos = -1; |
1da177e4 | 143 | } |
d41cc702 | 144 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 145 | |
b4e089d7 | 146 | static void read_pages(struct readahead_control *rac) |
1da177e4 | 147 | { |
a4d96536 | 148 | const struct address_space_operations *aops = rac->mapping->a_ops; |
a42634a6 | 149 | struct folio *folio; |
5b417b18 | 150 | struct blk_plug plug; |
1da177e4 | 151 | |
a4d96536 | 152 | if (!readahead_count(rac)) |
b4e089d7 | 153 | return; |
ad4ae1c7 | 154 | |
17604240 CH |
155 | if (unlikely(rac->_workingset)) |
156 | psi_memstall_enter(&rac->_pflags); | |
5b417b18 JA |
157 | blk_start_plug(&plug); |
158 | ||
8151b4c8 MWO |
159 | if (aops->readahead) { |
160 | aops->readahead(rac); | |
9fd472af | 161 | /* |
a42634a6 | 162 | * Clean up the remaining folios. The sizes in ->ra |
1e470280 | 163 | * may be used to size the next readahead, so make sure |
9fd472af N |
164 | * they accurately reflect what happened. |
165 | */ | |
a42634a6 MWO |
166 | while ((folio = readahead_folio(rac)) != NULL) { |
167 | unsigned long nr = folio_nr_pages(folio); | |
168 | ||
6bf74cdd | 169 | folio_get(folio); |
a42634a6 MWO |
170 | rac->ra->size -= nr; |
171 | if (rac->ra->async_size >= nr) { | |
172 | rac->ra->async_size -= nr; | |
173 | filemap_remove_folio(folio); | |
9fd472af | 174 | } |
a42634a6 | 175 | folio_unlock(folio); |
6bf74cdd | 176 | folio_put(folio); |
8151b4c8 | 177 | } |
c1f6925e | 178 | } else { |
5efe7448 | 179 | while ((folio = readahead_folio(rac)) != NULL) |
7e0a1265 | 180 | aops->read_folio(rac->file, folio); |
1da177e4 | 181 | } |
5b417b18 | 182 | |
5b417b18 | 183 | blk_finish_plug(&plug); |
17604240 CH |
184 | if (unlikely(rac->_workingset)) |
185 | psi_memstall_leave(&rac->_pflags); | |
186 | rac->_workingset = false; | |
ad4ae1c7 | 187 | |
c1f6925e | 188 | BUG_ON(readahead_count(rac)); |
1da177e4 LT |
189 | } |
190 | ||
2c684234 | 191 | /** |
73bb49da MWO |
192 | * page_cache_ra_unbounded - Start unchecked readahead. |
193 | * @ractl: Readahead control. | |
2c684234 MWO |
194 | * @nr_to_read: The number of pages to read. |
195 | * @lookahead_size: Where to start the next readahead. | |
196 | * | |
197 | * This function is for filesystems to call when they want to start | |
198 | * readahead beyond a file's stated i_size. This is almost certainly | |
199 | * not the function you want to call. Use page_cache_async_readahead() | |
200 | * or page_cache_sync_readahead() instead. | |
201 | * | |
202 | * Context: File is referenced by caller. Mutexes may be held by caller. | |
203 | * May sleep, but will not reenter filesystem to reclaim memory. | |
1da177e4 | 204 | */ |
73bb49da MWO |
205 | void page_cache_ra_unbounded(struct readahead_control *ractl, |
206 | unsigned long nr_to_read, unsigned long lookahead_size) | |
1da177e4 | 207 | { |
73bb49da MWO |
208 | struct address_space *mapping = ractl->mapping; |
209 | unsigned long index = readahead_index(ractl); | |
8a5c743e | 210 | gfp_t gfp_mask = readahead_gfp_mask(mapping); |
c2c7ad74 | 211 | unsigned long i; |
1da177e4 | 212 | |
f2c817be MWO |
213 | /* |
214 | * Partway through the readahead operation, we will have added | |
215 | * locked pages to the page cache, but will not yet have submitted | |
216 | * them for I/O. Adding another page may need to allocate memory, | |
217 | * which can trigger memory reclaim. Telling the VM we're in | |
218 | * the middle of a filesystem operation will cause it to not | |
219 | * touch file-backed pages, preventing a deadlock. Most (all?) | |
220 | * filesystems already specify __GFP_NOFS in their mapping's | |
221 | * gfp_mask, but let's be explicit here. | |
222 | */ | |
223 | unsigned int nofs = memalloc_nofs_save(); | |
224 | ||
730633f0 | 225 | filemap_invalidate_lock_shared(mapping); |
1da177e4 LT |
226 | /* |
227 | * Preallocate as many pages as we will need. | |
228 | */ | |
c2c7ad74 | 229 | for (i = 0; i < nr_to_read; i++) { |
0387df1d | 230 | struct folio *folio = xa_load(&mapping->i_pages, index + i); |
0fd44ab2 | 231 | int ret; |
1da177e4 | 232 | |
0387df1d | 233 | if (folio && !xa_is_value(folio)) { |
b3751e6a | 234 | /* |
2d8163e4 MWO |
235 | * Page already present? Kick off the current batch |
236 | * of contiguous pages before continuing with the | |
237 | * next batch. This page may be the one we would | |
238 | * have intended to mark as Readahead, but we don't | |
239 | * have a stable reference to this page, and it's | |
240 | * not worth getting one just for that. | |
b3751e6a | 241 | */ |
b4e089d7 CH |
242 | read_pages(ractl); |
243 | ractl->_index++; | |
f615bd5c | 244 | i = ractl->_index + ractl->_nr_pages - index - 1; |
1da177e4 | 245 | continue; |
b3751e6a | 246 | } |
1da177e4 | 247 | |
0387df1d MWO |
248 | folio = filemap_alloc_folio(gfp_mask, 0); |
249 | if (!folio) | |
1da177e4 | 250 | break; |
0fd44ab2 LS |
251 | |
252 | ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); | |
253 | if (ret < 0) { | |
0387df1d | 254 | folio_put(folio); |
0fd44ab2 LS |
255 | if (ret == -ENOMEM) |
256 | break; | |
b4e089d7 CH |
257 | read_pages(ractl); |
258 | ractl->_index++; | |
f615bd5c | 259 | i = ractl->_index + ractl->_nr_pages - index - 1; |
c1f6925e MWO |
260 | continue; |
261 | } | |
c2c7ad74 | 262 | if (i == nr_to_read - lookahead_size) |
0387df1d | 263 | folio_set_readahead(folio); |
17604240 | 264 | ractl->_workingset |= folio_test_workingset(folio); |
73bb49da | 265 | ractl->_nr_pages++; |
1da177e4 | 266 | } |
1da177e4 LT |
267 | |
268 | /* | |
7e0a1265 MWO |
269 | * Now start the IO. We ignore I/O errors - if the folio is not |
270 | * uptodate then the caller will launch read_folio again, and | |
1da177e4 LT |
271 | * will then handle the error. |
272 | */ | |
b4e089d7 | 273 | read_pages(ractl); |
730633f0 | 274 | filemap_invalidate_unlock_shared(mapping); |
f2c817be | 275 | memalloc_nofs_restore(nofs); |
1da177e4 | 276 | } |
73bb49da | 277 | EXPORT_SYMBOL_GPL(page_cache_ra_unbounded); |
2c684234 MWO |
278 | |
279 | /* | |
8238287e | 280 | * do_page_cache_ra() actually reads a chunk of disk. It allocates |
2c684234 MWO |
281 | * the pages first, then submits them for I/O. This avoids the very bad |
282 | * behaviour which would occur if page allocations are causing VM writeback. | |
283 | * We really don't want to intermingle reads and writes like that. | |
284 | */ | |
56a4d67c | 285 | static void do_page_cache_ra(struct readahead_control *ractl, |
8238287e | 286 | unsigned long nr_to_read, unsigned long lookahead_size) |
2c684234 | 287 | { |
8238287e MWO |
288 | struct inode *inode = ractl->mapping->host; |
289 | unsigned long index = readahead_index(ractl); | |
2c684234 MWO |
290 | loff_t isize = i_size_read(inode); |
291 | pgoff_t end_index; /* The last page we want to read */ | |
292 | ||
293 | if (isize == 0) | |
294 | return; | |
295 | ||
296 | end_index = (isize - 1) >> PAGE_SHIFT; | |
297 | if (index > end_index) | |
298 | return; | |
299 | /* Don't read past the page containing the last byte of the file */ | |
300 | if (nr_to_read > end_index - index) | |
301 | nr_to_read = end_index - index + 1; | |
302 | ||
8238287e | 303 | page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size); |
2c684234 | 304 | } |
1da177e4 LT |
305 | |
306 | /* | |
307 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
308 | * memory at once. | |
309 | */ | |
7b3df3b9 | 310 | void force_page_cache_ra(struct readahead_control *ractl, |
fcd9ae4f | 311 | unsigned long nr_to_read) |
1da177e4 | 312 | { |
7b3df3b9 | 313 | struct address_space *mapping = ractl->mapping; |
fcd9ae4f | 314 | struct file_ra_state *ra = ractl->ra; |
9491ae4a | 315 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
7b3df3b9 | 316 | unsigned long max_pages, index; |
9491ae4a | 317 | |
7e0a1265 | 318 | if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) |
9a42823a | 319 | return; |
1da177e4 | 320 | |
9491ae4a JA |
321 | /* |
322 | * If the request exceeds the readahead window, allow the read to | |
323 | * be up to the optimal hardware IO size | |
324 | */ | |
7b3df3b9 | 325 | index = readahead_index(ractl); |
9491ae4a | 326 | max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); |
7b3df3b9 | 327 | nr_to_read = min_t(unsigned long, nr_to_read, max_pages); |
1da177e4 | 328 | while (nr_to_read) { |
09cbfeaf | 329 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; |
1da177e4 LT |
330 | |
331 | if (this_chunk > nr_to_read) | |
332 | this_chunk = nr_to_read; | |
7b3df3b9 DH |
333 | ractl->_index = index; |
334 | do_page_cache_ra(ractl, this_chunk, 0); | |
58d5640e | 335 | |
08eb9658 | 336 | index += this_chunk; |
1da177e4 LT |
337 | nr_to_read -= this_chunk; |
338 | } | |
1da177e4 LT |
339 | } |
340 | ||
c743d96b FW |
341 | /* |
342 | * Set the initial window size, round to next power of 2 and square | |
343 | * for small size, x 4 for medium, and x 2 for large | |
344 | * for 128k (32 page) max ra | |
fb25a77d | 345 | * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial |
c743d96b FW |
346 | */ |
347 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
348 | { | |
349 | unsigned long newsize = roundup_pow_of_two(size); | |
350 | ||
351 | if (newsize <= max / 32) | |
352 | newsize = newsize * 4; | |
353 | else if (newsize <= max / 4) | |
354 | newsize = newsize * 2; | |
355 | else | |
356 | newsize = max; | |
357 | ||
358 | return newsize; | |
359 | } | |
360 | ||
122a21d1 FW |
361 | /* |
362 | * Get the previous window size, ramp it up, and | |
363 | * return it as the new window size. | |
364 | */ | |
c743d96b | 365 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
20ff1c95 | 366 | unsigned long max) |
122a21d1 | 367 | { |
f9acc8c7 | 368 | unsigned long cur = ra->size; |
122a21d1 FW |
369 | |
370 | if (cur < max / 16) | |
20ff1c95 GX |
371 | return 4 * cur; |
372 | if (cur <= max / 2) | |
373 | return 2 * cur; | |
374 | return max; | |
122a21d1 FW |
375 | } |
376 | ||
377 | /* | |
378 | * On-demand readahead design. | |
379 | * | |
380 | * The fields in struct file_ra_state represent the most-recently-executed | |
381 | * readahead attempt: | |
382 | * | |
f9acc8c7 FW |
383 | * |<----- async_size ---------| |
384 | * |------------------- size -------------------->| | |
385 | * |==================#===========================| | |
386 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
387 | * |
388 | * To overlap application thinking time and disk I/O time, we do | |
389 | * `readahead pipelining': Do not wait until the application consumed all | |
390 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
391 | * Instead, submit an asynchronous readahead I/O as soon as there are |
392 | * only async_size pages left in the readahead window. Normally async_size | |
393 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
394 | * |
395 | * In interleaved sequential reads, concurrent streams on the same fd can | |
396 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 397 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
398 | * indicator. The flag won't be set on already cached pages, to avoid the |
399 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
400 | * | |
f4e6b498 | 401 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
402 | * It should be maintained by the caller, and will be used for detecting |
403 | * small random reads. Note that the readahead algorithm checks loosely | |
404 | * for sequential patterns. Hence interleaved reads might be served as | |
405 | * sequential ones. | |
406 | * | |
407 | * There is a special-case: if the first page which the application tries to | |
408 | * read happens to be the first page of the file, it is assumed that a linear | |
409 | * read is about to happen and the window is immediately set to the initial size | |
410 | * based on I/O request size and the max_readahead. | |
411 | * | |
412 | * The code ramps up the readahead size aggressively at first, but slow down as | |
413 | * it approaches max_readhead. | |
414 | */ | |
415 | ||
10be0b37 | 416 | /* |
08eb9658 | 417 | * Count contiguously cached pages from @index-1 to @index-@max, |
10be0b37 WF |
418 | * this count is a conservative estimation of |
419 | * - length of the sequential read sequence, or | |
420 | * - thrashing threshold in memory tight systems | |
421 | */ | |
422 | static pgoff_t count_history_pages(struct address_space *mapping, | |
08eb9658 | 423 | pgoff_t index, unsigned long max) |
10be0b37 WF |
424 | { |
425 | pgoff_t head; | |
426 | ||
427 | rcu_read_lock(); | |
08eb9658 | 428 | head = page_cache_prev_miss(mapping, index - 1, max); |
10be0b37 WF |
429 | rcu_read_unlock(); |
430 | ||
08eb9658 | 431 | return index - 1 - head; |
10be0b37 WF |
432 | } |
433 | ||
434 | /* | |
1e470280 | 435 | * page cache context based readahead |
10be0b37 WF |
436 | */ |
437 | static int try_context_readahead(struct address_space *mapping, | |
438 | struct file_ra_state *ra, | |
08eb9658 | 439 | pgoff_t index, |
10be0b37 WF |
440 | unsigned long req_size, |
441 | unsigned long max) | |
442 | { | |
443 | pgoff_t size; | |
444 | ||
08eb9658 | 445 | size = count_history_pages(mapping, index, max); |
10be0b37 WF |
446 | |
447 | /* | |
2cad4018 | 448 | * not enough history pages: |
10be0b37 WF |
449 | * it could be a random read |
450 | */ | |
2cad4018 | 451 | if (size <= req_size) |
10be0b37 WF |
452 | return 0; |
453 | ||
454 | /* | |
455 | * starts from beginning of file: | |
456 | * it is a strong indication of long-run stream (or whole-file-read) | |
457 | */ | |
08eb9658 | 458 | if (size >= index) |
10be0b37 WF |
459 | size *= 2; |
460 | ||
08eb9658 | 461 | ra->start = index; |
2cad4018 FW |
462 | ra->size = min(size + req_size, max); |
463 | ra->async_size = 1; | |
10be0b37 WF |
464 | |
465 | return 1; | |
466 | } | |
467 | ||
793917d9 MWO |
468 | static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, |
469 | pgoff_t mark, unsigned int order, gfp_t gfp) | |
470 | { | |
471 | int err; | |
472 | struct folio *folio = filemap_alloc_folio(gfp, order); | |
473 | ||
474 | if (!folio) | |
475 | return -ENOMEM; | |
ab4443fe | 476 | mark = round_down(mark, 1UL << order); |
b9ff43dd | 477 | if (index == mark) |
793917d9 MWO |
478 | folio_set_readahead(folio); |
479 | err = filemap_add_folio(ractl->mapping, folio, index, gfp); | |
17604240 | 480 | if (err) { |
793917d9 | 481 | folio_put(folio); |
17604240 CH |
482 | return err; |
483 | } | |
484 | ||
485 | ractl->_nr_pages += 1UL << order; | |
486 | ractl->_workingset |= folio_test_workingset(folio); | |
487 | return 0; | |
793917d9 MWO |
488 | } |
489 | ||
56a4d67c | 490 | void page_cache_ra_order(struct readahead_control *ractl, |
793917d9 MWO |
491 | struct file_ra_state *ra, unsigned int new_order) |
492 | { | |
493 | struct address_space *mapping = ractl->mapping; | |
494 | pgoff_t index = readahead_index(ractl); | |
495 | pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; | |
496 | pgoff_t mark = index + ra->size - ra->async_size; | |
30153e44 | 497 | unsigned int nofs; |
793917d9 MWO |
498 | int err = 0; |
499 | gfp_t gfp = readahead_gfp_mask(mapping); | |
500 | ||
501 | if (!mapping_large_folio_support(mapping) || ra->size < 4) | |
502 | goto fallback; | |
503 | ||
504 | limit = min(limit, index + ra->size - 1); | |
505 | ||
506 | if (new_order < MAX_PAGECACHE_ORDER) { | |
507 | new_order += 2; | |
e03c16fb PR |
508 | new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order); |
509 | new_order = min_t(unsigned int, new_order, ilog2(ra->size)); | |
793917d9 MWO |
510 | } |
511 | ||
30153e44 KW |
512 | /* See comment in page_cache_ra_unbounded() */ |
513 | nofs = memalloc_nofs_save(); | |
00fa15e0 | 514 | filemap_invalidate_lock_shared(mapping); |
793917d9 MWO |
515 | while (index <= limit) { |
516 | unsigned int order = new_order; | |
517 | ||
518 | /* Align with smaller pages if needed */ | |
ec056cef | 519 | if (index & ((1UL << order) - 1)) |
793917d9 | 520 | order = __ffs(index); |
793917d9 | 521 | /* Don't allocate pages past EOF */ |
ec056cef RR |
522 | while (index + (1UL << order) - 1 > limit) |
523 | order--; | |
793917d9 MWO |
524 | err = ra_alloc_folio(ractl, index, mark, order, gfp); |
525 | if (err) | |
526 | break; | |
527 | index += 1UL << order; | |
528 | } | |
529 | ||
530 | if (index > limit) { | |
531 | ra->size += index - limit - 1; | |
532 | ra->async_size += index - limit - 1; | |
533 | } | |
534 | ||
b4e089d7 | 535 | read_pages(ractl); |
00fa15e0 | 536 | filemap_invalidate_unlock_shared(mapping); |
30153e44 | 537 | memalloc_nofs_restore(nofs); |
793917d9 MWO |
538 | |
539 | /* | |
540 | * If there were already pages in the page cache, then we may have | |
541 | * left some gaps. Let the regular readahead code take care of this | |
542 | * situation. | |
543 | */ | |
544 | if (!err) | |
545 | return; | |
546 | fallback: | |
547 | do_page_cache_ra(ractl, ra->size, ra->async_size); | |
548 | } | |
549 | ||
122a21d1 FW |
550 | /* |
551 | * A minimal readahead algorithm for trivial sequential/random reads. | |
552 | */ | |
6e4af69a | 553 | static void ondemand_readahead(struct readahead_control *ractl, |
793917d9 | 554 | struct folio *folio, unsigned long req_size) |
122a21d1 | 555 | { |
6e4af69a | 556 | struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); |
fcd9ae4f | 557 | struct file_ra_state *ra = ractl->ra; |
9491ae4a | 558 | unsigned long max_pages = ra->ra_pages; |
dc30b96a | 559 | unsigned long add_pages; |
b9ff43dd MWO |
560 | pgoff_t index = readahead_index(ractl); |
561 | pgoff_t expected, prev_index; | |
562 | unsigned int order = folio ? folio_order(folio) : 0; | |
045a2529 | 563 | |
9491ae4a JA |
564 | /* |
565 | * If the request exceeds the readahead window, allow the read to | |
566 | * be up to the optimal hardware IO size | |
567 | */ | |
568 | if (req_size > max_pages && bdi->io_pages > max_pages) | |
569 | max_pages = min(req_size, bdi->io_pages); | |
570 | ||
045a2529 WF |
571 | /* |
572 | * start of file | |
573 | */ | |
08eb9658 | 574 | if (!index) |
045a2529 | 575 | goto initial_readahead; |
122a21d1 FW |
576 | |
577 | /* | |
08eb9658 | 578 | * It's the expected callback index, assume sequential access. |
122a21d1 FW |
579 | * Ramp up sizes, and push forward the readahead window. |
580 | */ | |
ab4443fe | 581 | expected = round_down(ra->start + ra->size - ra->async_size, |
b9ff43dd MWO |
582 | 1UL << order); |
583 | if (index == expected || index == (ra->start + ra->size)) { | |
f9acc8c7 | 584 | ra->start += ra->size; |
9491ae4a | 585 | ra->size = get_next_ra_size(ra, max_pages); |
f9acc8c7 FW |
586 | ra->async_size = ra->size; |
587 | goto readit; | |
122a21d1 FW |
588 | } |
589 | ||
6b10c6c9 | 590 | /* |
793917d9 | 591 | * Hit a marked folio without valid readahead state. |
6b10c6c9 FW |
592 | * E.g. interleaved reads. |
593 | * Query the pagecache for async_size, which normally equals to | |
594 | * readahead size. Ramp it up and use it as the new readahead size. | |
595 | */ | |
793917d9 | 596 | if (folio) { |
6b10c6c9 FW |
597 | pgoff_t start; |
598 | ||
30002ed2 | 599 | rcu_read_lock(); |
6e4af69a DH |
600 | start = page_cache_next_miss(ractl->mapping, index + 1, |
601 | max_pages); | |
30002ed2 | 602 | rcu_read_unlock(); |
6b10c6c9 | 603 | |
08eb9658 | 604 | if (!start || start - index > max_pages) |
9a42823a | 605 | return; |
6b10c6c9 FW |
606 | |
607 | ra->start = start; | |
08eb9658 | 608 | ra->size = start - index; /* old async_size */ |
160334a0 | 609 | ra->size += req_size; |
9491ae4a | 610 | ra->size = get_next_ra_size(ra, max_pages); |
6b10c6c9 FW |
611 | ra->async_size = ra->size; |
612 | goto readit; | |
613 | } | |
614 | ||
122a21d1 | 615 | /* |
045a2529 | 616 | * oversize read |
122a21d1 | 617 | */ |
9491ae4a | 618 | if (req_size > max_pages) |
045a2529 WF |
619 | goto initial_readahead; |
620 | ||
621 | /* | |
622 | * sequential cache miss | |
08eb9658 MWO |
623 | * trivial case: (index - prev_index) == 1 |
624 | * unaligned reads: (index - prev_index) == 0 | |
045a2529 | 625 | */ |
08eb9658 MWO |
626 | prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; |
627 | if (index - prev_index <= 1UL) | |
045a2529 WF |
628 | goto initial_readahead; |
629 | ||
10be0b37 WF |
630 | /* |
631 | * Query the page cache and look for the traces(cached history pages) | |
632 | * that a sequential stream would leave behind. | |
633 | */ | |
6e4af69a DH |
634 | if (try_context_readahead(ractl->mapping, ra, index, req_size, |
635 | max_pages)) | |
10be0b37 WF |
636 | goto readit; |
637 | ||
045a2529 WF |
638 | /* |
639 | * standalone, small random read | |
640 | * Read as is, and do not pollute the readahead state. | |
641 | */ | |
6e4af69a | 642 | do_page_cache_ra(ractl, req_size, 0); |
9a42823a | 643 | return; |
045a2529 WF |
644 | |
645 | initial_readahead: | |
08eb9658 | 646 | ra->start = index; |
9491ae4a | 647 | ra->size = get_init_ra_size(req_size, max_pages); |
f9acc8c7 | 648 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; |
122a21d1 | 649 | |
f9acc8c7 | 650 | readit: |
51daa88e WF |
651 | /* |
652 | * Will this read hit the readahead marker made by itself? | |
653 | * If so, trigger the readahead marker hit now, and merge | |
654 | * the resulted next readahead window into the current one. | |
dc30b96a | 655 | * Take care of maximum IO pages as above. |
51daa88e | 656 | */ |
08eb9658 | 657 | if (index == ra->start && ra->size == ra->async_size) { |
dc30b96a MS |
658 | add_pages = get_next_ra_size(ra, max_pages); |
659 | if (ra->size + add_pages <= max_pages) { | |
660 | ra->async_size = add_pages; | |
661 | ra->size += add_pages; | |
662 | } else { | |
663 | ra->size = max_pages; | |
664 | ra->async_size = max_pages >> 1; | |
665 | } | |
51daa88e WF |
666 | } |
667 | ||
6e4af69a | 668 | ractl->_index = ra->start; |
b9ff43dd | 669 | page_cache_ra_order(ractl, ra, order); |
122a21d1 FW |
670 | } |
671 | ||
fefa7c47 | 672 | void page_cache_sync_ra(struct readahead_control *ractl, |
fcd9ae4f | 673 | unsigned long req_count) |
122a21d1 | 674 | { |
324bcf54 | 675 | bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); |
cf914a7d | 676 | |
324bcf54 | 677 | /* |
1e470280 | 678 | * Even if readahead is disabled, issue this request as readahead |
324bcf54 | 679 | * as we'll need it to satisfy the requested range. The forced |
1e470280 | 680 | * readahead will do the right thing and limit the read to just the |
324bcf54 JA |
681 | * requested range, which we'll set to 1 page for this case. |
682 | */ | |
fcd9ae4f | 683 | if (!ractl->ra->ra_pages || blk_cgroup_congested()) { |
324bcf54 JA |
684 | if (!ractl->file) |
685 | return; | |
686 | req_count = 1; | |
687 | do_forced_ra = true; | |
688 | } | |
ca47e8c7 | 689 | |
0141450f | 690 | /* be dumb */ |
324bcf54 | 691 | if (do_forced_ra) { |
fcd9ae4f | 692 | force_page_cache_ra(ractl, req_count); |
0141450f WF |
693 | return; |
694 | } | |
695 | ||
793917d9 | 696 | ondemand_readahead(ractl, NULL, req_count); |
cf914a7d | 697 | } |
fefa7c47 | 698 | EXPORT_SYMBOL_GPL(page_cache_sync_ra); |
cf914a7d | 699 | |
fefa7c47 | 700 | void page_cache_async_ra(struct readahead_control *ractl, |
7836d999 | 701 | struct folio *folio, unsigned long req_count) |
cf914a7d | 702 | { |
1e470280 | 703 | /* no readahead */ |
fcd9ae4f | 704 | if (!ractl->ra->ra_pages) |
cf914a7d RR |
705 | return; |
706 | ||
707 | /* | |
708 | * Same bit is used for PG_readahead and PG_reclaim. | |
709 | */ | |
7836d999 | 710 | if (folio_test_writeback(folio)) |
cf914a7d RR |
711 | return; |
712 | ||
7836d999 | 713 | folio_clear_readahead(folio); |
cf914a7d | 714 | |
ca47e8c7 JB |
715 | if (blk_cgroup_congested()) |
716 | return; | |
717 | ||
793917d9 | 718 | ondemand_readahead(ractl, folio, req_count); |
122a21d1 | 719 | } |
fefa7c47 | 720 | EXPORT_SYMBOL_GPL(page_cache_async_ra); |
782182e5 | 721 | |
c7b95d51 | 722 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count) |
782182e5 CW |
723 | { |
724 | ssize_t ret; | |
2903ff01 | 725 | struct fd f; |
782182e5 CW |
726 | |
727 | ret = -EBADF; | |
2903ff01 | 728 | f = fdget(fd); |
3d8f7615 AG |
729 | if (!f.file || !(f.file->f_mode & FMODE_READ)) |
730 | goto out; | |
731 | ||
732 | /* | |
733 | * The readahead() syscall is intended to run only on files | |
734 | * that can execute readahead. If readahead is not possible | |
735 | * on this file, then we must return -EINVAL. | |
736 | */ | |
737 | ret = -EINVAL; | |
738 | if (!f.file->f_mapping || !f.file->f_mapping->a_ops || | |
7116c0af RH |
739 | (!S_ISREG(file_inode(f.file)->i_mode) && |
740 | !S_ISBLK(file_inode(f.file)->i_mode))) | |
3d8f7615 AG |
741 | goto out; |
742 | ||
743 | ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); | |
744 | out: | |
745 | fdput(f); | |
782182e5 CW |
746 | return ret; |
747 | } | |
c7b95d51 DB |
748 | |
749 | SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) | |
750 | { | |
751 | return ksys_readahead(fd, offset, count); | |
752 | } | |
3ca23644 | 753 | |
59c10c52 GR |
754 | #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD) |
755 | COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count) | |
756 | { | |
757 | return ksys_readahead(fd, compat_arg_u64_glue(offset), count); | |
758 | } | |
759 | #endif | |
760 | ||
3ca23644 DH |
761 | /** |
762 | * readahead_expand - Expand a readahead request | |
763 | * @ractl: The request to be expanded | |
764 | * @new_start: The revised start | |
765 | * @new_len: The revised size of the request | |
766 | * | |
767 | * Attempt to expand a readahead request outwards from the current size to the | |
768 | * specified size by inserting locked pages before and after the current window | |
769 | * to increase the size to the new window. This may involve the insertion of | |
770 | * THPs, in which case the window may get expanded even beyond what was | |
771 | * requested. | |
772 | * | |
773 | * The algorithm will stop if it encounters a conflicting page already in the | |
774 | * pagecache and leave a smaller expansion than requested. | |
775 | * | |
776 | * The caller must check for this by examining the revised @ractl object for a | |
777 | * different expansion than was requested. | |
778 | */ | |
779 | void readahead_expand(struct readahead_control *ractl, | |
780 | loff_t new_start, size_t new_len) | |
781 | { | |
782 | struct address_space *mapping = ractl->mapping; | |
783 | struct file_ra_state *ra = ractl->ra; | |
784 | pgoff_t new_index, new_nr_pages; | |
785 | gfp_t gfp_mask = readahead_gfp_mask(mapping); | |
786 | ||
787 | new_index = new_start / PAGE_SIZE; | |
788 | ||
789 | /* Expand the leading edge downwards */ | |
790 | while (ractl->_index > new_index) { | |
791 | unsigned long index = ractl->_index - 1; | |
11a98042 | 792 | struct folio *folio = xa_load(&mapping->i_pages, index); |
3ca23644 | 793 | |
11a98042 MWO |
794 | if (folio && !xa_is_value(folio)) |
795 | return; /* Folio apparently present */ | |
3ca23644 | 796 | |
11a98042 MWO |
797 | folio = filemap_alloc_folio(gfp_mask, 0); |
798 | if (!folio) | |
3ca23644 | 799 | return; |
11a98042 MWO |
800 | if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { |
801 | folio_put(folio); | |
3ca23644 DH |
802 | return; |
803 | } | |
11a98042 MWO |
804 | if (unlikely(folio_test_workingset(folio)) && |
805 | !ractl->_workingset) { | |
806 | ractl->_workingset = true; | |
807 | psi_memstall_enter(&ractl->_pflags); | |
808 | } | |
3ca23644 | 809 | ractl->_nr_pages++; |
11a98042 | 810 | ractl->_index = folio->index; |
3ca23644 DH |
811 | } |
812 | ||
813 | new_len += new_start - readahead_pos(ractl); | |
814 | new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE); | |
815 | ||
816 | /* Expand the trailing edge upwards */ | |
817 | while (ractl->_nr_pages < new_nr_pages) { | |
818 | unsigned long index = ractl->_index + ractl->_nr_pages; | |
11a98042 | 819 | struct folio *folio = xa_load(&mapping->i_pages, index); |
3ca23644 | 820 | |
11a98042 MWO |
821 | if (folio && !xa_is_value(folio)) |
822 | return; /* Folio apparently present */ | |
3ca23644 | 823 | |
11a98042 MWO |
824 | folio = filemap_alloc_folio(gfp_mask, 0); |
825 | if (!folio) | |
3ca23644 | 826 | return; |
11a98042 MWO |
827 | if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { |
828 | folio_put(folio); | |
3ca23644 DH |
829 | return; |
830 | } | |
11a98042 MWO |
831 | if (unlikely(folio_test_workingset(folio)) && |
832 | !ractl->_workingset) { | |
17604240 CH |
833 | ractl->_workingset = true; |
834 | psi_memstall_enter(&ractl->_pflags); | |
835 | } | |
3ca23644 DH |
836 | ractl->_nr_pages++; |
837 | if (ra) { | |
838 | ra->size++; | |
839 | ra->async_size++; | |
840 | } | |
841 | } | |
842 | } | |
843 | EXPORT_SYMBOL(readahead_expand); |