Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * mm/readahead.c - address_space-level file readahead. | |
4 | * | |
5 | * Copyright (C) 2002, Linus Torvalds | |
6 | * | |
e1f8e874 | 7 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
8 | * Initial version. |
9 | */ | |
10 | ||
84dacdbd N |
11 | /** |
12 | * DOC: Readahead Overview | |
13 | * | |
14 | * Readahead is used to read content into the page cache before it is | |
15 | * explicitly requested by the application. Readahead only ever | |
16 | * attempts to read pages that are not yet in the page cache. If a | |
17 | * page is present but not up-to-date, readahead will not try to read | |
18 | * it. In that case a simple ->readpage() will be requested. | |
19 | * | |
20 | * Readahead is triggered when an application read request (whether a | |
21 | * systemcall or a page fault) finds that the requested page is not in | |
22 | * the page cache, or that it is in the page cache and has the | |
23 | * %PG_readahead flag set. This flag indicates that the page was loaded | |
24 | * as part of a previous read-ahead request and now that it has been | |
25 | * accessed, it is time for the next read-ahead. | |
26 | * | |
27 | * Each readahead request is partly synchronous read, and partly async | |
28 | * read-ahead. This is reflected in the struct file_ra_state which | |
29 | * contains ->size being to total number of pages, and ->async_size | |
30 | * which is the number of pages in the async section. The first page in | |
31 | * this async section will have %PG_readahead set as a trigger for a | |
32 | * subsequent read ahead. Once a series of sequential reads has been | |
33 | * established, there should be no need for a synchronous component and | |
34 | * all read ahead request will be fully asynchronous. | |
35 | * | |
36 | * When either of the triggers causes a readahead, three numbers need to | |
37 | * be determined: the start of the region, the size of the region, and | |
38 | * the size of the async tail. | |
39 | * | |
40 | * The start of the region is simply the first page address at or after | |
41 | * the accessed address, which is not currently populated in the page | |
42 | * cache. This is found with a simple search in the page cache. | |
43 | * | |
44 | * The size of the async tail is determined by subtracting the size that | |
45 | * was explicitly requested from the determined request size, unless | |
46 | * this would be less than zero - then zero is used. NOTE THIS | |
47 | * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED | |
48 | * PAGE. | |
49 | * | |
50 | * The size of the region is normally determined from the size of the | |
51 | * previous readahead which loaded the preceding pages. This may be | |
52 | * discovered from the struct file_ra_state for simple sequential reads, | |
53 | * or from examining the state of the page cache when multiple | |
54 | * sequential reads are interleaved. Specifically: where the readahead | |
55 | * was triggered by the %PG_readahead flag, the size of the previous | |
56 | * readahead is assumed to be the number of pages from the triggering | |
57 | * page to the start of the new readahead. In these cases, the size of | |
58 | * the previous readahead is scaled, often doubled, for the new | |
59 | * readahead, though see get_next_ra_size() for details. | |
60 | * | |
61 | * If the size of the previous read cannot be determined, the number of | |
62 | * preceding pages in the page cache is used to estimate the size of | |
63 | * a previous read. This estimate could easily be misled by random | |
64 | * reads being coincidentally adjacent, so it is ignored unless it is | |
65 | * larger than the current request, and it is not scaled up, unless it | |
66 | * is at the start of file. | |
67 | * | |
68 | * In general read ahead is accelerated at the start of the file, as | |
69 | * reads from there are often sequential. There are other minor | |
70 | * adjustments to the read ahead size in various special cases and these | |
71 | * are best discovered by reading the code. | |
72 | * | |
73 | * The above calculation determines the readahead, to which any requested | |
74 | * read size may be added. | |
75 | * | |
76 | * Readahead requests are sent to the filesystem using the ->readahead() | |
77 | * address space operation, for which mpage_readahead() is a canonical | |
78 | * implementation. ->readahead() should normally initiate reads on all | |
79 | * pages, but may fail to read any or all pages without causing an IO | |
80 | * error. The page cache reading code will issue a ->readpage() request | |
81 | * for any page which ->readahead() does not provided, and only an error | |
82 | * from this will be final. | |
83 | * | |
84 | * ->readahead() will generally call readahead_page() repeatedly to get | |
85 | * each page from those prepared for read ahead. It may fail to read a | |
86 | * page by: | |
87 | * | |
88 | * * not calling readahead_page() sufficiently many times, effectively | |
89 | * ignoring some pages, as might be appropriate if the path to | |
90 | * storage is congested. | |
91 | * | |
92 | * * failing to actually submit a read request for a given page, | |
93 | * possibly due to insufficient resources, or | |
94 | * | |
95 | * * getting an error during subsequent processing of a request. | |
96 | * | |
97 | * In the last two cases, the page should be unlocked to indicate that | |
98 | * the read attempt has failed. In the first case the page will be | |
99 | * unlocked by the caller. | |
100 | * | |
101 | * Those pages not in the final ``async_size`` of the request should be | |
102 | * considered to be important and ->readahead() should not fail them due | |
103 | * to congestion or temporary resource unavailability, but should wait | |
104 | * for necessary resources (e.g. memory or indexing information) to | |
105 | * become available. Pages in the final ``async_size`` may be | |
106 | * considered less urgent and failure to read them is more acceptable. | |
107 | * They will eventually be read individually using ->readpage(). | |
108 | */ | |
109 | ||
1da177e4 | 110 | #include <linux/kernel.h> |
11bd969f | 111 | #include <linux/dax.h> |
5a0e3ad6 | 112 | #include <linux/gfp.h> |
b95f1b31 | 113 | #include <linux/export.h> |
1da177e4 | 114 | #include <linux/backing-dev.h> |
8bde37f0 | 115 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 116 | #include <linux/pagevec.h> |
f5ff8422 | 117 | #include <linux/pagemap.h> |
782182e5 CW |
118 | #include <linux/syscalls.h> |
119 | #include <linux/file.h> | |
d72ee911 | 120 | #include <linux/mm_inline.h> |
ca47e8c7 | 121 | #include <linux/blk-cgroup.h> |
3d8f7615 | 122 | #include <linux/fadvise.h> |
f2c817be | 123 | #include <linux/sched/mm.h> |
1da177e4 | 124 | |
29f175d1 FF |
125 | #include "internal.h" |
126 | ||
1da177e4 LT |
127 | /* |
128 | * Initialise a struct file's readahead state. Assumes that the caller has | |
129 | * memset *ra to zero. | |
130 | */ | |
131 | void | |
132 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
133 | { | |
de1414a6 | 134 | ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; |
f4e6b498 | 135 | ra->prev_pos = -1; |
1da177e4 | 136 | } |
d41cc702 | 137 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 138 | |
03fb3d2a DH |
139 | /* |
140 | * see if a page needs releasing upon read_cache_pages() failure | |
266cf658 DH |
141 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache |
142 | * before calling, such as the NFS fs marking pages that are cached locally | |
143 | * on disk, thus we need to give the fs a chance to clean up in the event of | |
144 | * an error | |
03fb3d2a DH |
145 | */ |
146 | static void read_cache_pages_invalidate_page(struct address_space *mapping, | |
147 | struct page *page) | |
148 | { | |
266cf658 | 149 | if (page_has_private(page)) { |
03fb3d2a DH |
150 | if (!trylock_page(page)) |
151 | BUG(); | |
152 | page->mapping = mapping; | |
09cbfeaf | 153 | do_invalidatepage(page, 0, PAGE_SIZE); |
03fb3d2a DH |
154 | page->mapping = NULL; |
155 | unlock_page(page); | |
156 | } | |
09cbfeaf | 157 | put_page(page); |
03fb3d2a DH |
158 | } |
159 | ||
160 | /* | |
161 | * release a list of pages, invalidating them first if need be | |
162 | */ | |
163 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, | |
164 | struct list_head *pages) | |
165 | { | |
166 | struct page *victim; | |
167 | ||
168 | while (!list_empty(pages)) { | |
c8ad6302 | 169 | victim = lru_to_page(pages); |
03fb3d2a DH |
170 | list_del(&victim->lru); |
171 | read_cache_pages_invalidate_page(mapping, victim); | |
172 | } | |
173 | } | |
174 | ||
1da177e4 | 175 | /** |
bd40cdda | 176 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
177 | * @mapping: the address_space |
178 | * @pages: The address of a list_head which contains the target pages. These | |
179 | * pages have their ->index populated and are otherwise uninitialised. | |
180 | * @filler: callback routine for filling a single page. | |
181 | * @data: private data for the callback routine. | |
182 | * | |
183 | * Hides the details of the LRU cache etc from the filesystems. | |
a862f68a MR |
184 | * |
185 | * Returns: %0 on success, error return by @filler otherwise | |
1da177e4 LT |
186 | */ |
187 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
188 | int (*filler)(void *, struct page *), void *data) | |
189 | { | |
190 | struct page *page; | |
1da177e4 LT |
191 | int ret = 0; |
192 | ||
1da177e4 | 193 | while (!list_empty(pages)) { |
c8ad6302 | 194 | page = lru_to_page(pages); |
1da177e4 | 195 | list_del(&page->lru); |
063d99b4 | 196 | if (add_to_page_cache_lru(page, mapping, page->index, |
8a5c743e | 197 | readahead_gfp_mask(mapping))) { |
03fb3d2a | 198 | read_cache_pages_invalidate_page(mapping, page); |
1da177e4 LT |
199 | continue; |
200 | } | |
09cbfeaf | 201 | put_page(page); |
eb2be189 | 202 | |
1da177e4 | 203 | ret = filler(data, page); |
eb2be189 | 204 | if (unlikely(ret)) { |
03fb3d2a | 205 | read_cache_pages_invalidate_pages(mapping, pages); |
1da177e4 LT |
206 | break; |
207 | } | |
09cbfeaf | 208 | task_io_account_read(PAGE_SIZE); |
1da177e4 | 209 | } |
1da177e4 LT |
210 | return ret; |
211 | } | |
212 | ||
213 | EXPORT_SYMBOL(read_cache_pages); | |
214 | ||
a4d96536 | 215 | static void read_pages(struct readahead_control *rac, struct list_head *pages, |
c1f6925e | 216 | bool skip_page) |
1da177e4 | 217 | { |
a4d96536 | 218 | const struct address_space_operations *aops = rac->mapping->a_ops; |
c1f6925e | 219 | struct page *page; |
5b417b18 | 220 | struct blk_plug plug; |
1da177e4 | 221 | |
a4d96536 | 222 | if (!readahead_count(rac)) |
c1f6925e | 223 | goto out; |
ad4ae1c7 | 224 | |
5b417b18 JA |
225 | blk_start_plug(&plug); |
226 | ||
8151b4c8 MWO |
227 | if (aops->readahead) { |
228 | aops->readahead(rac); | |
229 | /* Clean up the remaining pages */ | |
230 | while ((page = readahead_page(rac))) { | |
231 | unlock_page(page); | |
232 | put_page(page); | |
233 | } | |
234 | } else if (aops->readpages) { | |
a4d96536 MWO |
235 | aops->readpages(rac->file, rac->mapping, pages, |
236 | readahead_count(rac)); | |
029e332e OH |
237 | /* Clean up the remaining pages */ |
238 | put_pages_list(pages); | |
c1f6925e MWO |
239 | rac->_index += rac->_nr_pages; |
240 | rac->_nr_pages = 0; | |
241 | } else { | |
242 | while ((page = readahead_page(rac))) { | |
a4d96536 | 243 | aops->readpage(rac->file, page); |
c1f6925e MWO |
244 | put_page(page); |
245 | } | |
1da177e4 | 246 | } |
5b417b18 | 247 | |
5b417b18 | 248 | blk_finish_plug(&plug); |
ad4ae1c7 MWO |
249 | |
250 | BUG_ON(!list_empty(pages)); | |
c1f6925e MWO |
251 | BUG_ON(readahead_count(rac)); |
252 | ||
253 | out: | |
254 | if (skip_page) | |
255 | rac->_index++; | |
1da177e4 LT |
256 | } |
257 | ||
2c684234 | 258 | /** |
73bb49da MWO |
259 | * page_cache_ra_unbounded - Start unchecked readahead. |
260 | * @ractl: Readahead control. | |
2c684234 MWO |
261 | * @nr_to_read: The number of pages to read. |
262 | * @lookahead_size: Where to start the next readahead. | |
263 | * | |
264 | * This function is for filesystems to call when they want to start | |
265 | * readahead beyond a file's stated i_size. This is almost certainly | |
266 | * not the function you want to call. Use page_cache_async_readahead() | |
267 | * or page_cache_sync_readahead() instead. | |
268 | * | |
269 | * Context: File is referenced by caller. Mutexes may be held by caller. | |
270 | * May sleep, but will not reenter filesystem to reclaim memory. | |
1da177e4 | 271 | */ |
73bb49da MWO |
272 | void page_cache_ra_unbounded(struct readahead_control *ractl, |
273 | unsigned long nr_to_read, unsigned long lookahead_size) | |
1da177e4 | 274 | { |
73bb49da MWO |
275 | struct address_space *mapping = ractl->mapping; |
276 | unsigned long index = readahead_index(ractl); | |
1da177e4 | 277 | LIST_HEAD(page_pool); |
8a5c743e | 278 | gfp_t gfp_mask = readahead_gfp_mask(mapping); |
c2c7ad74 | 279 | unsigned long i; |
1da177e4 | 280 | |
f2c817be MWO |
281 | /* |
282 | * Partway through the readahead operation, we will have added | |
283 | * locked pages to the page cache, but will not yet have submitted | |
284 | * them for I/O. Adding another page may need to allocate memory, | |
285 | * which can trigger memory reclaim. Telling the VM we're in | |
286 | * the middle of a filesystem operation will cause it to not | |
287 | * touch file-backed pages, preventing a deadlock. Most (all?) | |
288 | * filesystems already specify __GFP_NOFS in their mapping's | |
289 | * gfp_mask, but let's be explicit here. | |
290 | */ | |
291 | unsigned int nofs = memalloc_nofs_save(); | |
292 | ||
730633f0 | 293 | filemap_invalidate_lock_shared(mapping); |
1da177e4 LT |
294 | /* |
295 | * Preallocate as many pages as we will need. | |
296 | */ | |
c2c7ad74 | 297 | for (i = 0; i < nr_to_read; i++) { |
0387df1d | 298 | struct folio *folio = xa_load(&mapping->i_pages, index + i); |
1da177e4 | 299 | |
0387df1d | 300 | if (folio && !xa_is_value(folio)) { |
b3751e6a | 301 | /* |
2d8163e4 MWO |
302 | * Page already present? Kick off the current batch |
303 | * of contiguous pages before continuing with the | |
304 | * next batch. This page may be the one we would | |
305 | * have intended to mark as Readahead, but we don't | |
306 | * have a stable reference to this page, and it's | |
307 | * not worth getting one just for that. | |
b3751e6a | 308 | */ |
73bb49da | 309 | read_pages(ractl, &page_pool, true); |
f615bd5c | 310 | i = ractl->_index + ractl->_nr_pages - index - 1; |
1da177e4 | 311 | continue; |
b3751e6a | 312 | } |
1da177e4 | 313 | |
0387df1d MWO |
314 | folio = filemap_alloc_folio(gfp_mask, 0); |
315 | if (!folio) | |
1da177e4 | 316 | break; |
c1f6925e | 317 | if (mapping->a_ops->readpages) { |
0387df1d MWO |
318 | folio->index = index + i; |
319 | list_add(&folio->lru, &page_pool); | |
320 | } else if (filemap_add_folio(mapping, folio, index + i, | |
c1f6925e | 321 | gfp_mask) < 0) { |
0387df1d | 322 | folio_put(folio); |
73bb49da | 323 | read_pages(ractl, &page_pool, true); |
f615bd5c | 324 | i = ractl->_index + ractl->_nr_pages - index - 1; |
c1f6925e MWO |
325 | continue; |
326 | } | |
c2c7ad74 | 327 | if (i == nr_to_read - lookahead_size) |
0387df1d | 328 | folio_set_readahead(folio); |
73bb49da | 329 | ractl->_nr_pages++; |
1da177e4 | 330 | } |
1da177e4 LT |
331 | |
332 | /* | |
333 | * Now start the IO. We ignore I/O errors - if the page is not | |
334 | * uptodate then the caller will launch readpage again, and | |
335 | * will then handle the error. | |
336 | */ | |
73bb49da | 337 | read_pages(ractl, &page_pool, false); |
730633f0 | 338 | filemap_invalidate_unlock_shared(mapping); |
f2c817be | 339 | memalloc_nofs_restore(nofs); |
1da177e4 | 340 | } |
73bb49da | 341 | EXPORT_SYMBOL_GPL(page_cache_ra_unbounded); |
2c684234 MWO |
342 | |
343 | /* | |
8238287e | 344 | * do_page_cache_ra() actually reads a chunk of disk. It allocates |
2c684234 MWO |
345 | * the pages first, then submits them for I/O. This avoids the very bad |
346 | * behaviour which would occur if page allocations are causing VM writeback. | |
347 | * We really don't want to intermingle reads and writes like that. | |
348 | */ | |
8238287e MWO |
349 | void do_page_cache_ra(struct readahead_control *ractl, |
350 | unsigned long nr_to_read, unsigned long lookahead_size) | |
2c684234 | 351 | { |
8238287e MWO |
352 | struct inode *inode = ractl->mapping->host; |
353 | unsigned long index = readahead_index(ractl); | |
2c684234 MWO |
354 | loff_t isize = i_size_read(inode); |
355 | pgoff_t end_index; /* The last page we want to read */ | |
356 | ||
357 | if (isize == 0) | |
358 | return; | |
359 | ||
360 | end_index = (isize - 1) >> PAGE_SHIFT; | |
361 | if (index > end_index) | |
362 | return; | |
363 | /* Don't read past the page containing the last byte of the file */ | |
364 | if (nr_to_read > end_index - index) | |
365 | nr_to_read = end_index - index + 1; | |
366 | ||
8238287e | 367 | page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size); |
2c684234 | 368 | } |
1da177e4 LT |
369 | |
370 | /* | |
371 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
372 | * memory at once. | |
373 | */ | |
7b3df3b9 | 374 | void force_page_cache_ra(struct readahead_control *ractl, |
fcd9ae4f | 375 | unsigned long nr_to_read) |
1da177e4 | 376 | { |
7b3df3b9 | 377 | struct address_space *mapping = ractl->mapping; |
fcd9ae4f | 378 | struct file_ra_state *ra = ractl->ra; |
9491ae4a | 379 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
7b3df3b9 | 380 | unsigned long max_pages, index; |
9491ae4a | 381 | |
8151b4c8 MWO |
382 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages && |
383 | !mapping->a_ops->readahead)) | |
9a42823a | 384 | return; |
1da177e4 | 385 | |
9491ae4a JA |
386 | /* |
387 | * If the request exceeds the readahead window, allow the read to | |
388 | * be up to the optimal hardware IO size | |
389 | */ | |
7b3df3b9 | 390 | index = readahead_index(ractl); |
9491ae4a | 391 | max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); |
7b3df3b9 | 392 | nr_to_read = min_t(unsigned long, nr_to_read, max_pages); |
1da177e4 | 393 | while (nr_to_read) { |
09cbfeaf | 394 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; |
1da177e4 LT |
395 | |
396 | if (this_chunk > nr_to_read) | |
397 | this_chunk = nr_to_read; | |
7b3df3b9 DH |
398 | ractl->_index = index; |
399 | do_page_cache_ra(ractl, this_chunk, 0); | |
58d5640e | 400 | |
08eb9658 | 401 | index += this_chunk; |
1da177e4 LT |
402 | nr_to_read -= this_chunk; |
403 | } | |
1da177e4 LT |
404 | } |
405 | ||
c743d96b FW |
406 | /* |
407 | * Set the initial window size, round to next power of 2 and square | |
408 | * for small size, x 4 for medium, and x 2 for large | |
409 | * for 128k (32 page) max ra | |
fb25a77d | 410 | * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial |
c743d96b FW |
411 | */ |
412 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
413 | { | |
414 | unsigned long newsize = roundup_pow_of_two(size); | |
415 | ||
416 | if (newsize <= max / 32) | |
417 | newsize = newsize * 4; | |
418 | else if (newsize <= max / 4) | |
419 | newsize = newsize * 2; | |
420 | else | |
421 | newsize = max; | |
422 | ||
423 | return newsize; | |
424 | } | |
425 | ||
122a21d1 FW |
426 | /* |
427 | * Get the previous window size, ramp it up, and | |
428 | * return it as the new window size. | |
429 | */ | |
c743d96b | 430 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
20ff1c95 | 431 | unsigned long max) |
122a21d1 | 432 | { |
f9acc8c7 | 433 | unsigned long cur = ra->size; |
122a21d1 FW |
434 | |
435 | if (cur < max / 16) | |
20ff1c95 GX |
436 | return 4 * cur; |
437 | if (cur <= max / 2) | |
438 | return 2 * cur; | |
439 | return max; | |
122a21d1 FW |
440 | } |
441 | ||
442 | /* | |
443 | * On-demand readahead design. | |
444 | * | |
445 | * The fields in struct file_ra_state represent the most-recently-executed | |
446 | * readahead attempt: | |
447 | * | |
f9acc8c7 FW |
448 | * |<----- async_size ---------| |
449 | * |------------------- size -------------------->| | |
450 | * |==================#===========================| | |
451 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
452 | * |
453 | * To overlap application thinking time and disk I/O time, we do | |
454 | * `readahead pipelining': Do not wait until the application consumed all | |
455 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
456 | * Instead, submit an asynchronous readahead I/O as soon as there are |
457 | * only async_size pages left in the readahead window. Normally async_size | |
458 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
459 | * |
460 | * In interleaved sequential reads, concurrent streams on the same fd can | |
461 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 462 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
463 | * indicator. The flag won't be set on already cached pages, to avoid the |
464 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
465 | * | |
f4e6b498 | 466 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
467 | * It should be maintained by the caller, and will be used for detecting |
468 | * small random reads. Note that the readahead algorithm checks loosely | |
469 | * for sequential patterns. Hence interleaved reads might be served as | |
470 | * sequential ones. | |
471 | * | |
472 | * There is a special-case: if the first page which the application tries to | |
473 | * read happens to be the first page of the file, it is assumed that a linear | |
474 | * read is about to happen and the window is immediately set to the initial size | |
475 | * based on I/O request size and the max_readahead. | |
476 | * | |
477 | * The code ramps up the readahead size aggressively at first, but slow down as | |
478 | * it approaches max_readhead. | |
479 | */ | |
480 | ||
10be0b37 | 481 | /* |
08eb9658 | 482 | * Count contiguously cached pages from @index-1 to @index-@max, |
10be0b37 WF |
483 | * this count is a conservative estimation of |
484 | * - length of the sequential read sequence, or | |
485 | * - thrashing threshold in memory tight systems | |
486 | */ | |
487 | static pgoff_t count_history_pages(struct address_space *mapping, | |
08eb9658 | 488 | pgoff_t index, unsigned long max) |
10be0b37 WF |
489 | { |
490 | pgoff_t head; | |
491 | ||
492 | rcu_read_lock(); | |
08eb9658 | 493 | head = page_cache_prev_miss(mapping, index - 1, max); |
10be0b37 WF |
494 | rcu_read_unlock(); |
495 | ||
08eb9658 | 496 | return index - 1 - head; |
10be0b37 WF |
497 | } |
498 | ||
499 | /* | |
500 | * page cache context based read-ahead | |
501 | */ | |
502 | static int try_context_readahead(struct address_space *mapping, | |
503 | struct file_ra_state *ra, | |
08eb9658 | 504 | pgoff_t index, |
10be0b37 WF |
505 | unsigned long req_size, |
506 | unsigned long max) | |
507 | { | |
508 | pgoff_t size; | |
509 | ||
08eb9658 | 510 | size = count_history_pages(mapping, index, max); |
10be0b37 WF |
511 | |
512 | /* | |
2cad4018 | 513 | * not enough history pages: |
10be0b37 WF |
514 | * it could be a random read |
515 | */ | |
2cad4018 | 516 | if (size <= req_size) |
10be0b37 WF |
517 | return 0; |
518 | ||
519 | /* | |
520 | * starts from beginning of file: | |
521 | * it is a strong indication of long-run stream (or whole-file-read) | |
522 | */ | |
08eb9658 | 523 | if (size >= index) |
10be0b37 WF |
524 | size *= 2; |
525 | ||
08eb9658 | 526 | ra->start = index; |
2cad4018 FW |
527 | ra->size = min(size + req_size, max); |
528 | ra->async_size = 1; | |
10be0b37 WF |
529 | |
530 | return 1; | |
531 | } | |
532 | ||
122a21d1 FW |
533 | /* |
534 | * A minimal readahead algorithm for trivial sequential/random reads. | |
535 | */ | |
6e4af69a | 536 | static void ondemand_readahead(struct readahead_control *ractl, |
fcd9ae4f | 537 | bool hit_readahead_marker, unsigned long req_size) |
122a21d1 | 538 | { |
6e4af69a | 539 | struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); |
fcd9ae4f | 540 | struct file_ra_state *ra = ractl->ra; |
9491ae4a | 541 | unsigned long max_pages = ra->ra_pages; |
dc30b96a | 542 | unsigned long add_pages; |
6e4af69a | 543 | unsigned long index = readahead_index(ractl); |
08eb9658 | 544 | pgoff_t prev_index; |
045a2529 | 545 | |
9491ae4a JA |
546 | /* |
547 | * If the request exceeds the readahead window, allow the read to | |
548 | * be up to the optimal hardware IO size | |
549 | */ | |
550 | if (req_size > max_pages && bdi->io_pages > max_pages) | |
551 | max_pages = min(req_size, bdi->io_pages); | |
552 | ||
045a2529 WF |
553 | /* |
554 | * start of file | |
555 | */ | |
08eb9658 | 556 | if (!index) |
045a2529 | 557 | goto initial_readahead; |
122a21d1 FW |
558 | |
559 | /* | |
08eb9658 | 560 | * It's the expected callback index, assume sequential access. |
122a21d1 FW |
561 | * Ramp up sizes, and push forward the readahead window. |
562 | */ | |
08eb9658 MWO |
563 | if ((index == (ra->start + ra->size - ra->async_size) || |
564 | index == (ra->start + ra->size))) { | |
f9acc8c7 | 565 | ra->start += ra->size; |
9491ae4a | 566 | ra->size = get_next_ra_size(ra, max_pages); |
f9acc8c7 FW |
567 | ra->async_size = ra->size; |
568 | goto readit; | |
122a21d1 FW |
569 | } |
570 | ||
6b10c6c9 FW |
571 | /* |
572 | * Hit a marked page without valid readahead state. | |
573 | * E.g. interleaved reads. | |
574 | * Query the pagecache for async_size, which normally equals to | |
575 | * readahead size. Ramp it up and use it as the new readahead size. | |
576 | */ | |
577 | if (hit_readahead_marker) { | |
578 | pgoff_t start; | |
579 | ||
30002ed2 | 580 | rcu_read_lock(); |
6e4af69a DH |
581 | start = page_cache_next_miss(ractl->mapping, index + 1, |
582 | max_pages); | |
30002ed2 | 583 | rcu_read_unlock(); |
6b10c6c9 | 584 | |
08eb9658 | 585 | if (!start || start - index > max_pages) |
9a42823a | 586 | return; |
6b10c6c9 FW |
587 | |
588 | ra->start = start; | |
08eb9658 | 589 | ra->size = start - index; /* old async_size */ |
160334a0 | 590 | ra->size += req_size; |
9491ae4a | 591 | ra->size = get_next_ra_size(ra, max_pages); |
6b10c6c9 FW |
592 | ra->async_size = ra->size; |
593 | goto readit; | |
594 | } | |
595 | ||
122a21d1 | 596 | /* |
045a2529 | 597 | * oversize read |
122a21d1 | 598 | */ |
9491ae4a | 599 | if (req_size > max_pages) |
045a2529 WF |
600 | goto initial_readahead; |
601 | ||
602 | /* | |
603 | * sequential cache miss | |
08eb9658 MWO |
604 | * trivial case: (index - prev_index) == 1 |
605 | * unaligned reads: (index - prev_index) == 0 | |
045a2529 | 606 | */ |
08eb9658 MWO |
607 | prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; |
608 | if (index - prev_index <= 1UL) | |
045a2529 WF |
609 | goto initial_readahead; |
610 | ||
10be0b37 WF |
611 | /* |
612 | * Query the page cache and look for the traces(cached history pages) | |
613 | * that a sequential stream would leave behind. | |
614 | */ | |
6e4af69a DH |
615 | if (try_context_readahead(ractl->mapping, ra, index, req_size, |
616 | max_pages)) | |
10be0b37 WF |
617 | goto readit; |
618 | ||
045a2529 WF |
619 | /* |
620 | * standalone, small random read | |
621 | * Read as is, and do not pollute the readahead state. | |
622 | */ | |
6e4af69a | 623 | do_page_cache_ra(ractl, req_size, 0); |
9a42823a | 624 | return; |
045a2529 WF |
625 | |
626 | initial_readahead: | |
08eb9658 | 627 | ra->start = index; |
9491ae4a | 628 | ra->size = get_init_ra_size(req_size, max_pages); |
f9acc8c7 | 629 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; |
122a21d1 | 630 | |
f9acc8c7 | 631 | readit: |
51daa88e WF |
632 | /* |
633 | * Will this read hit the readahead marker made by itself? | |
634 | * If so, trigger the readahead marker hit now, and merge | |
635 | * the resulted next readahead window into the current one. | |
dc30b96a | 636 | * Take care of maximum IO pages as above. |
51daa88e | 637 | */ |
08eb9658 | 638 | if (index == ra->start && ra->size == ra->async_size) { |
dc30b96a MS |
639 | add_pages = get_next_ra_size(ra, max_pages); |
640 | if (ra->size + add_pages <= max_pages) { | |
641 | ra->async_size = add_pages; | |
642 | ra->size += add_pages; | |
643 | } else { | |
644 | ra->size = max_pages; | |
645 | ra->async_size = max_pages >> 1; | |
646 | } | |
51daa88e WF |
647 | } |
648 | ||
6e4af69a DH |
649 | ractl->_index = ra->start; |
650 | do_page_cache_ra(ractl, ra->size, ra->async_size); | |
122a21d1 FW |
651 | } |
652 | ||
fefa7c47 | 653 | void page_cache_sync_ra(struct readahead_control *ractl, |
fcd9ae4f | 654 | unsigned long req_count) |
122a21d1 | 655 | { |
324bcf54 | 656 | bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); |
cf914a7d | 657 | |
324bcf54 JA |
658 | /* |
659 | * Even if read-ahead is disabled, issue this request as read-ahead | |
660 | * as we'll need it to satisfy the requested range. The forced | |
661 | * read-ahead will do the right thing and limit the read to just the | |
662 | * requested range, which we'll set to 1 page for this case. | |
663 | */ | |
fcd9ae4f | 664 | if (!ractl->ra->ra_pages || blk_cgroup_congested()) { |
324bcf54 JA |
665 | if (!ractl->file) |
666 | return; | |
667 | req_count = 1; | |
668 | do_forced_ra = true; | |
669 | } | |
ca47e8c7 | 670 | |
0141450f | 671 | /* be dumb */ |
324bcf54 | 672 | if (do_forced_ra) { |
fcd9ae4f | 673 | force_page_cache_ra(ractl, req_count); |
0141450f WF |
674 | return; |
675 | } | |
676 | ||
cf914a7d | 677 | /* do read-ahead */ |
fcd9ae4f | 678 | ondemand_readahead(ractl, false, req_count); |
cf914a7d | 679 | } |
fefa7c47 | 680 | EXPORT_SYMBOL_GPL(page_cache_sync_ra); |
cf914a7d | 681 | |
fefa7c47 | 682 | void page_cache_async_ra(struct readahead_control *ractl, |
7836d999 | 683 | struct folio *folio, unsigned long req_count) |
cf914a7d RR |
684 | { |
685 | /* no read-ahead */ | |
fcd9ae4f | 686 | if (!ractl->ra->ra_pages) |
cf914a7d RR |
687 | return; |
688 | ||
689 | /* | |
690 | * Same bit is used for PG_readahead and PG_reclaim. | |
691 | */ | |
7836d999 | 692 | if (folio_test_writeback(folio)) |
cf914a7d RR |
693 | return; |
694 | ||
7836d999 | 695 | folio_clear_readahead(folio); |
cf914a7d RR |
696 | |
697 | /* | |
698 | * Defer asynchronous read-ahead on IO congestion. | |
699 | */ | |
fefa7c47 | 700 | if (inode_read_congested(ractl->mapping->host)) |
cf914a7d | 701 | return; |
122a21d1 | 702 | |
ca47e8c7 JB |
703 | if (blk_cgroup_congested()) |
704 | return; | |
705 | ||
122a21d1 | 706 | /* do read-ahead */ |
fcd9ae4f | 707 | ondemand_readahead(ractl, true, req_count); |
122a21d1 | 708 | } |
fefa7c47 | 709 | EXPORT_SYMBOL_GPL(page_cache_async_ra); |
782182e5 | 710 | |
c7b95d51 | 711 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count) |
782182e5 CW |
712 | { |
713 | ssize_t ret; | |
2903ff01 | 714 | struct fd f; |
782182e5 CW |
715 | |
716 | ret = -EBADF; | |
2903ff01 | 717 | f = fdget(fd); |
3d8f7615 AG |
718 | if (!f.file || !(f.file->f_mode & FMODE_READ)) |
719 | goto out; | |
720 | ||
721 | /* | |
722 | * The readahead() syscall is intended to run only on files | |
723 | * that can execute readahead. If readahead is not possible | |
724 | * on this file, then we must return -EINVAL. | |
725 | */ | |
726 | ret = -EINVAL; | |
727 | if (!f.file->f_mapping || !f.file->f_mapping->a_ops || | |
728 | !S_ISREG(file_inode(f.file)->i_mode)) | |
729 | goto out; | |
730 | ||
731 | ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); | |
732 | out: | |
733 | fdput(f); | |
782182e5 CW |
734 | return ret; |
735 | } | |
c7b95d51 DB |
736 | |
737 | SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) | |
738 | { | |
739 | return ksys_readahead(fd, offset, count); | |
740 | } | |
3ca23644 DH |
741 | |
742 | /** | |
743 | * readahead_expand - Expand a readahead request | |
744 | * @ractl: The request to be expanded | |
745 | * @new_start: The revised start | |
746 | * @new_len: The revised size of the request | |
747 | * | |
748 | * Attempt to expand a readahead request outwards from the current size to the | |
749 | * specified size by inserting locked pages before and after the current window | |
750 | * to increase the size to the new window. This may involve the insertion of | |
751 | * THPs, in which case the window may get expanded even beyond what was | |
752 | * requested. | |
753 | * | |
754 | * The algorithm will stop if it encounters a conflicting page already in the | |
755 | * pagecache and leave a smaller expansion than requested. | |
756 | * | |
757 | * The caller must check for this by examining the revised @ractl object for a | |
758 | * different expansion than was requested. | |
759 | */ | |
760 | void readahead_expand(struct readahead_control *ractl, | |
761 | loff_t new_start, size_t new_len) | |
762 | { | |
763 | struct address_space *mapping = ractl->mapping; | |
764 | struct file_ra_state *ra = ractl->ra; | |
765 | pgoff_t new_index, new_nr_pages; | |
766 | gfp_t gfp_mask = readahead_gfp_mask(mapping); | |
767 | ||
768 | new_index = new_start / PAGE_SIZE; | |
769 | ||
770 | /* Expand the leading edge downwards */ | |
771 | while (ractl->_index > new_index) { | |
772 | unsigned long index = ractl->_index - 1; | |
773 | struct page *page = xa_load(&mapping->i_pages, index); | |
774 | ||
775 | if (page && !xa_is_value(page)) | |
776 | return; /* Page apparently present */ | |
777 | ||
778 | page = __page_cache_alloc(gfp_mask); | |
779 | if (!page) | |
780 | return; | |
781 | if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) { | |
782 | put_page(page); | |
783 | return; | |
784 | } | |
785 | ||
786 | ractl->_nr_pages++; | |
787 | ractl->_index = page->index; | |
788 | } | |
789 | ||
790 | new_len += new_start - readahead_pos(ractl); | |
791 | new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE); | |
792 | ||
793 | /* Expand the trailing edge upwards */ | |
794 | while (ractl->_nr_pages < new_nr_pages) { | |
795 | unsigned long index = ractl->_index + ractl->_nr_pages; | |
796 | struct page *page = xa_load(&mapping->i_pages, index); | |
797 | ||
798 | if (page && !xa_is_value(page)) | |
799 | return; /* Page apparently present */ | |
800 | ||
801 | page = __page_cache_alloc(gfp_mask); | |
802 | if (!page) | |
803 | return; | |
804 | if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) { | |
805 | put_page(page); | |
806 | return; | |
807 | } | |
808 | ractl->_nr_pages++; | |
809 | if (ra) { | |
810 | ra->size++; | |
811 | ra->async_size++; | |
812 | } | |
813 | } | |
814 | } | |
815 | EXPORT_SYMBOL(readahead_expand); |