mm/readahead: Align file mappings for non-DAX
[linux-2.6-block.git] / mm / readahead.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * mm/readahead.c - address_space-level file readahead.
4 *
5 * Copyright (C) 2002, Linus Torvalds
6 *
e1f8e874 7 * 09Apr2002 Andrew Morton
1da177e4
LT
8 * Initial version.
9 */
10
11#include <linux/kernel.h>
11bd969f 12#include <linux/dax.h>
5a0e3ad6 13#include <linux/gfp.h>
b95f1b31 14#include <linux/export.h>
1da177e4 15#include <linux/backing-dev.h>
8bde37f0 16#include <linux/task_io_accounting_ops.h>
1da177e4 17#include <linux/pagevec.h>
f5ff8422 18#include <linux/pagemap.h>
782182e5
CW
19#include <linux/syscalls.h>
20#include <linux/file.h>
d72ee911 21#include <linux/mm_inline.h>
ca47e8c7 22#include <linux/blk-cgroup.h>
3d8f7615 23#include <linux/fadvise.h>
f2c817be 24#include <linux/sched/mm.h>
1da177e4 25
29f175d1
FF
26#include "internal.h"
27
1da177e4
LT
28/*
29 * Initialise a struct file's readahead state. Assumes that the caller has
30 * memset *ra to zero.
31 */
32void
33file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
34{
de1414a6 35 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
f4e6b498 36 ra->prev_pos = -1;
1da177e4 37}
d41cc702 38EXPORT_SYMBOL_GPL(file_ra_state_init);
1da177e4 39
03fb3d2a
DH
40/*
41 * see if a page needs releasing upon read_cache_pages() failure
266cf658
DH
42 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
43 * before calling, such as the NFS fs marking pages that are cached locally
44 * on disk, thus we need to give the fs a chance to clean up in the event of
45 * an error
03fb3d2a
DH
46 */
47static void read_cache_pages_invalidate_page(struct address_space *mapping,
48 struct page *page)
49{
266cf658 50 if (page_has_private(page)) {
03fb3d2a
DH
51 if (!trylock_page(page))
52 BUG();
53 page->mapping = mapping;
09cbfeaf 54 do_invalidatepage(page, 0, PAGE_SIZE);
03fb3d2a
DH
55 page->mapping = NULL;
56 unlock_page(page);
57 }
09cbfeaf 58 put_page(page);
03fb3d2a
DH
59}
60
61/*
62 * release a list of pages, invalidating them first if need be
63 */
64static void read_cache_pages_invalidate_pages(struct address_space *mapping,
65 struct list_head *pages)
66{
67 struct page *victim;
68
69 while (!list_empty(pages)) {
c8ad6302 70 victim = lru_to_page(pages);
03fb3d2a
DH
71 list_del(&victim->lru);
72 read_cache_pages_invalidate_page(mapping, victim);
73 }
74}
75
1da177e4 76/**
bd40cdda 77 * read_cache_pages - populate an address space with some pages & start reads against them
1da177e4
LT
78 * @mapping: the address_space
79 * @pages: The address of a list_head which contains the target pages. These
80 * pages have their ->index populated and are otherwise uninitialised.
81 * @filler: callback routine for filling a single page.
82 * @data: private data for the callback routine.
83 *
84 * Hides the details of the LRU cache etc from the filesystems.
a862f68a
MR
85 *
86 * Returns: %0 on success, error return by @filler otherwise
1da177e4
LT
87 */
88int read_cache_pages(struct address_space *mapping, struct list_head *pages,
89 int (*filler)(void *, struct page *), void *data)
90{
91 struct page *page;
1da177e4
LT
92 int ret = 0;
93
1da177e4 94 while (!list_empty(pages)) {
c8ad6302 95 page = lru_to_page(pages);
1da177e4 96 list_del(&page->lru);
063d99b4 97 if (add_to_page_cache_lru(page, mapping, page->index,
8a5c743e 98 readahead_gfp_mask(mapping))) {
03fb3d2a 99 read_cache_pages_invalidate_page(mapping, page);
1da177e4
LT
100 continue;
101 }
09cbfeaf 102 put_page(page);
eb2be189 103
1da177e4 104 ret = filler(data, page);
eb2be189 105 if (unlikely(ret)) {
03fb3d2a 106 read_cache_pages_invalidate_pages(mapping, pages);
1da177e4
LT
107 break;
108 }
09cbfeaf 109 task_io_account_read(PAGE_SIZE);
1da177e4 110 }
1da177e4
LT
111 return ret;
112}
113
114EXPORT_SYMBOL(read_cache_pages);
115
a4d96536 116static void read_pages(struct readahead_control *rac, struct list_head *pages,
c1f6925e 117 bool skip_page)
1da177e4 118{
a4d96536 119 const struct address_space_operations *aops = rac->mapping->a_ops;
c1f6925e 120 struct page *page;
5b417b18 121 struct blk_plug plug;
1da177e4 122
a4d96536 123 if (!readahead_count(rac))
c1f6925e 124 goto out;
ad4ae1c7 125
5b417b18
JA
126 blk_start_plug(&plug);
127
8151b4c8
MWO
128 if (aops->readahead) {
129 aops->readahead(rac);
130 /* Clean up the remaining pages */
131 while ((page = readahead_page(rac))) {
132 unlock_page(page);
133 put_page(page);
134 }
135 } else if (aops->readpages) {
a4d96536
MWO
136 aops->readpages(rac->file, rac->mapping, pages,
137 readahead_count(rac));
029e332e
OH
138 /* Clean up the remaining pages */
139 put_pages_list(pages);
c1f6925e
MWO
140 rac->_index += rac->_nr_pages;
141 rac->_nr_pages = 0;
142 } else {
143 while ((page = readahead_page(rac))) {
a4d96536 144 aops->readpage(rac->file, page);
c1f6925e
MWO
145 put_page(page);
146 }
1da177e4 147 }
5b417b18 148
5b417b18 149 blk_finish_plug(&plug);
ad4ae1c7 150
793917d9 151 BUG_ON(pages && !list_empty(pages));
c1f6925e
MWO
152 BUG_ON(readahead_count(rac));
153
154out:
155 if (skip_page)
156 rac->_index++;
1da177e4
LT
157}
158
2c684234 159/**
73bb49da
MWO
160 * page_cache_ra_unbounded - Start unchecked readahead.
161 * @ractl: Readahead control.
2c684234
MWO
162 * @nr_to_read: The number of pages to read.
163 * @lookahead_size: Where to start the next readahead.
164 *
165 * This function is for filesystems to call when they want to start
166 * readahead beyond a file's stated i_size. This is almost certainly
167 * not the function you want to call. Use page_cache_async_readahead()
168 * or page_cache_sync_readahead() instead.
169 *
170 * Context: File is referenced by caller. Mutexes may be held by caller.
171 * May sleep, but will not reenter filesystem to reclaim memory.
1da177e4 172 */
73bb49da
MWO
173void page_cache_ra_unbounded(struct readahead_control *ractl,
174 unsigned long nr_to_read, unsigned long lookahead_size)
1da177e4 175{
73bb49da
MWO
176 struct address_space *mapping = ractl->mapping;
177 unsigned long index = readahead_index(ractl);
1da177e4 178 LIST_HEAD(page_pool);
8a5c743e 179 gfp_t gfp_mask = readahead_gfp_mask(mapping);
c2c7ad74 180 unsigned long i;
1da177e4 181
f2c817be
MWO
182 /*
183 * Partway through the readahead operation, we will have added
184 * locked pages to the page cache, but will not yet have submitted
185 * them for I/O. Adding another page may need to allocate memory,
186 * which can trigger memory reclaim. Telling the VM we're in
187 * the middle of a filesystem operation will cause it to not
188 * touch file-backed pages, preventing a deadlock. Most (all?)
189 * filesystems already specify __GFP_NOFS in their mapping's
190 * gfp_mask, but let's be explicit here.
191 */
192 unsigned int nofs = memalloc_nofs_save();
193
730633f0 194 filemap_invalidate_lock_shared(mapping);
1da177e4
LT
195 /*
196 * Preallocate as many pages as we will need.
197 */
c2c7ad74 198 for (i = 0; i < nr_to_read; i++) {
0387df1d 199 struct folio *folio = xa_load(&mapping->i_pages, index + i);
1da177e4 200
0387df1d 201 if (folio && !xa_is_value(folio)) {
b3751e6a 202 /*
2d8163e4
MWO
203 * Page already present? Kick off the current batch
204 * of contiguous pages before continuing with the
205 * next batch. This page may be the one we would
206 * have intended to mark as Readahead, but we don't
207 * have a stable reference to this page, and it's
208 * not worth getting one just for that.
b3751e6a 209 */
73bb49da 210 read_pages(ractl, &page_pool, true);
f615bd5c 211 i = ractl->_index + ractl->_nr_pages - index - 1;
1da177e4 212 continue;
b3751e6a 213 }
1da177e4 214
0387df1d
MWO
215 folio = filemap_alloc_folio(gfp_mask, 0);
216 if (!folio)
1da177e4 217 break;
c1f6925e 218 if (mapping->a_ops->readpages) {
0387df1d
MWO
219 folio->index = index + i;
220 list_add(&folio->lru, &page_pool);
221 } else if (filemap_add_folio(mapping, folio, index + i,
c1f6925e 222 gfp_mask) < 0) {
0387df1d 223 folio_put(folio);
73bb49da 224 read_pages(ractl, &page_pool, true);
f615bd5c 225 i = ractl->_index + ractl->_nr_pages - index - 1;
c1f6925e
MWO
226 continue;
227 }
c2c7ad74 228 if (i == nr_to_read - lookahead_size)
0387df1d 229 folio_set_readahead(folio);
73bb49da 230 ractl->_nr_pages++;
1da177e4 231 }
1da177e4
LT
232
233 /*
234 * Now start the IO. We ignore I/O errors - if the page is not
235 * uptodate then the caller will launch readpage again, and
236 * will then handle the error.
237 */
73bb49da 238 read_pages(ractl, &page_pool, false);
730633f0 239 filemap_invalidate_unlock_shared(mapping);
f2c817be 240 memalloc_nofs_restore(nofs);
1da177e4 241}
73bb49da 242EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
2c684234
MWO
243
244/*
8238287e 245 * do_page_cache_ra() actually reads a chunk of disk. It allocates
2c684234
MWO
246 * the pages first, then submits them for I/O. This avoids the very bad
247 * behaviour which would occur if page allocations are causing VM writeback.
248 * We really don't want to intermingle reads and writes like that.
249 */
8238287e
MWO
250void do_page_cache_ra(struct readahead_control *ractl,
251 unsigned long nr_to_read, unsigned long lookahead_size)
2c684234 252{
8238287e
MWO
253 struct inode *inode = ractl->mapping->host;
254 unsigned long index = readahead_index(ractl);
2c684234
MWO
255 loff_t isize = i_size_read(inode);
256 pgoff_t end_index; /* The last page we want to read */
257
258 if (isize == 0)
259 return;
260
261 end_index = (isize - 1) >> PAGE_SHIFT;
262 if (index > end_index)
263 return;
264 /* Don't read past the page containing the last byte of the file */
265 if (nr_to_read > end_index - index)
266 nr_to_read = end_index - index + 1;
267
8238287e 268 page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
2c684234 269}
1da177e4
LT
270
271/*
272 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
273 * memory at once.
274 */
7b3df3b9 275void force_page_cache_ra(struct readahead_control *ractl,
fcd9ae4f 276 unsigned long nr_to_read)
1da177e4 277{
7b3df3b9 278 struct address_space *mapping = ractl->mapping;
fcd9ae4f 279 struct file_ra_state *ra = ractl->ra;
9491ae4a 280 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
7b3df3b9 281 unsigned long max_pages, index;
9491ae4a 282
8151b4c8
MWO
283 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
284 !mapping->a_ops->readahead))
9a42823a 285 return;
1da177e4 286
9491ae4a
JA
287 /*
288 * If the request exceeds the readahead window, allow the read to
289 * be up to the optimal hardware IO size
290 */
7b3df3b9 291 index = readahead_index(ractl);
9491ae4a 292 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
7b3df3b9 293 nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
1da177e4 294 while (nr_to_read) {
09cbfeaf 295 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
1da177e4
LT
296
297 if (this_chunk > nr_to_read)
298 this_chunk = nr_to_read;
7b3df3b9
DH
299 ractl->_index = index;
300 do_page_cache_ra(ractl, this_chunk, 0);
58d5640e 301
08eb9658 302 index += this_chunk;
1da177e4
LT
303 nr_to_read -= this_chunk;
304 }
1da177e4
LT
305}
306
c743d96b
FW
307/*
308 * Set the initial window size, round to next power of 2 and square
309 * for small size, x 4 for medium, and x 2 for large
310 * for 128k (32 page) max ra
fb25a77d 311 * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
c743d96b
FW
312 */
313static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
314{
315 unsigned long newsize = roundup_pow_of_two(size);
316
317 if (newsize <= max / 32)
318 newsize = newsize * 4;
319 else if (newsize <= max / 4)
320 newsize = newsize * 2;
321 else
322 newsize = max;
323
324 return newsize;
325}
326
122a21d1
FW
327/*
328 * Get the previous window size, ramp it up, and
329 * return it as the new window size.
330 */
c743d96b 331static unsigned long get_next_ra_size(struct file_ra_state *ra,
20ff1c95 332 unsigned long max)
122a21d1 333{
f9acc8c7 334 unsigned long cur = ra->size;
122a21d1
FW
335
336 if (cur < max / 16)
20ff1c95
GX
337 return 4 * cur;
338 if (cur <= max / 2)
339 return 2 * cur;
340 return max;
122a21d1
FW
341}
342
343/*
344 * On-demand readahead design.
345 *
346 * The fields in struct file_ra_state represent the most-recently-executed
347 * readahead attempt:
348 *
f9acc8c7
FW
349 * |<----- async_size ---------|
350 * |------------------- size -------------------->|
351 * |==================#===========================|
352 * ^start ^page marked with PG_readahead
122a21d1
FW
353 *
354 * To overlap application thinking time and disk I/O time, we do
355 * `readahead pipelining': Do not wait until the application consumed all
356 * readahead pages and stalled on the missing page at readahead_index;
f9acc8c7
FW
357 * Instead, submit an asynchronous readahead I/O as soon as there are
358 * only async_size pages left in the readahead window. Normally async_size
359 * will be equal to size, for maximum pipelining.
122a21d1
FW
360 *
361 * In interleaved sequential reads, concurrent streams on the same fd can
362 * be invalidating each other's readahead state. So we flag the new readahead
f9acc8c7 363 * page at (start+size-async_size) with PG_readahead, and use it as readahead
122a21d1
FW
364 * indicator. The flag won't be set on already cached pages, to avoid the
365 * readahead-for-nothing fuss, saving pointless page cache lookups.
366 *
f4e6b498 367 * prev_pos tracks the last visited byte in the _previous_ read request.
122a21d1
FW
368 * It should be maintained by the caller, and will be used for detecting
369 * small random reads. Note that the readahead algorithm checks loosely
370 * for sequential patterns. Hence interleaved reads might be served as
371 * sequential ones.
372 *
373 * There is a special-case: if the first page which the application tries to
374 * read happens to be the first page of the file, it is assumed that a linear
375 * read is about to happen and the window is immediately set to the initial size
376 * based on I/O request size and the max_readahead.
377 *
378 * The code ramps up the readahead size aggressively at first, but slow down as
379 * it approaches max_readhead.
380 */
381
10be0b37 382/*
08eb9658 383 * Count contiguously cached pages from @index-1 to @index-@max,
10be0b37
WF
384 * this count is a conservative estimation of
385 * - length of the sequential read sequence, or
386 * - thrashing threshold in memory tight systems
387 */
388static pgoff_t count_history_pages(struct address_space *mapping,
08eb9658 389 pgoff_t index, unsigned long max)
10be0b37
WF
390{
391 pgoff_t head;
392
393 rcu_read_lock();
08eb9658 394 head = page_cache_prev_miss(mapping, index - 1, max);
10be0b37
WF
395 rcu_read_unlock();
396
08eb9658 397 return index - 1 - head;
10be0b37
WF
398}
399
400/*
401 * page cache context based read-ahead
402 */
403static int try_context_readahead(struct address_space *mapping,
404 struct file_ra_state *ra,
08eb9658 405 pgoff_t index,
10be0b37
WF
406 unsigned long req_size,
407 unsigned long max)
408{
409 pgoff_t size;
410
08eb9658 411 size = count_history_pages(mapping, index, max);
10be0b37
WF
412
413 /*
2cad4018 414 * not enough history pages:
10be0b37
WF
415 * it could be a random read
416 */
2cad4018 417 if (size <= req_size)
10be0b37
WF
418 return 0;
419
420 /*
421 * starts from beginning of file:
422 * it is a strong indication of long-run stream (or whole-file-read)
423 */
08eb9658 424 if (size >= index)
10be0b37
WF
425 size *= 2;
426
08eb9658 427 ra->start = index;
2cad4018
FW
428 ra->size = min(size + req_size, max);
429 ra->async_size = 1;
10be0b37
WF
430
431 return 1;
432}
433
793917d9
MWO
434/*
435 * There are some parts of the kernel which assume that PMD entries
436 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
437 * limit the maximum allocation order to PMD size. I'm not aware of any
438 * assumptions about maximum order if THP are disabled, but 8 seems like
439 * a good order (that's 1MB if you're using 4kB pages)
440 */
441#ifdef CONFIG_TRANSPARENT_HUGEPAGE
442#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
443#else
444#define MAX_PAGECACHE_ORDER 8
445#endif
446
447static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
448 pgoff_t mark, unsigned int order, gfp_t gfp)
449{
450 int err;
451 struct folio *folio = filemap_alloc_folio(gfp, order);
452
453 if (!folio)
454 return -ENOMEM;
455 if (mark - index < (1UL << order))
456 folio_set_readahead(folio);
457 err = filemap_add_folio(ractl->mapping, folio, index, gfp);
458 if (err)
459 folio_put(folio);
460 else
461 ractl->_nr_pages += 1UL << order;
462 return err;
463}
464
465static void page_cache_ra_order(struct readahead_control *ractl,
466 struct file_ra_state *ra, unsigned int new_order)
467{
468 struct address_space *mapping = ractl->mapping;
469 pgoff_t index = readahead_index(ractl);
470 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
471 pgoff_t mark = index + ra->size - ra->async_size;
472 int err = 0;
473 gfp_t gfp = readahead_gfp_mask(mapping);
474
475 if (!mapping_large_folio_support(mapping) || ra->size < 4)
476 goto fallback;
477
478 limit = min(limit, index + ra->size - 1);
479
480 if (new_order < MAX_PAGECACHE_ORDER) {
481 new_order += 2;
482 if (new_order > MAX_PAGECACHE_ORDER)
483 new_order = MAX_PAGECACHE_ORDER;
484 while ((1 << new_order) > ra->size)
485 new_order--;
486 }
487
488 while (index <= limit) {
489 unsigned int order = new_order;
490
491 /* Align with smaller pages if needed */
492 if (index & ((1UL << order) - 1)) {
493 order = __ffs(index);
494 if (order == 1)
495 order = 0;
496 }
497 /* Don't allocate pages past EOF */
498 while (index + (1UL << order) - 1 > limit) {
499 if (--order == 1)
500 order = 0;
501 }
502 err = ra_alloc_folio(ractl, index, mark, order, gfp);
503 if (err)
504 break;
505 index += 1UL << order;
506 }
507
508 if (index > limit) {
509 ra->size += index - limit - 1;
510 ra->async_size += index - limit - 1;
511 }
512
513 read_pages(ractl, NULL, false);
514
515 /*
516 * If there were already pages in the page cache, then we may have
517 * left some gaps. Let the regular readahead code take care of this
518 * situation.
519 */
520 if (!err)
521 return;
522fallback:
523 do_page_cache_ra(ractl, ra->size, ra->async_size);
524}
525
122a21d1
FW
526/*
527 * A minimal readahead algorithm for trivial sequential/random reads.
528 */
6e4af69a 529static void ondemand_readahead(struct readahead_control *ractl,
793917d9 530 struct folio *folio, unsigned long req_size)
122a21d1 531{
6e4af69a 532 struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
fcd9ae4f 533 struct file_ra_state *ra = ractl->ra;
9491ae4a 534 unsigned long max_pages = ra->ra_pages;
dc30b96a 535 unsigned long add_pages;
6e4af69a 536 unsigned long index = readahead_index(ractl);
08eb9658 537 pgoff_t prev_index;
045a2529 538
9491ae4a
JA
539 /*
540 * If the request exceeds the readahead window, allow the read to
541 * be up to the optimal hardware IO size
542 */
543 if (req_size > max_pages && bdi->io_pages > max_pages)
544 max_pages = min(req_size, bdi->io_pages);
545
045a2529
WF
546 /*
547 * start of file
548 */
08eb9658 549 if (!index)
045a2529 550 goto initial_readahead;
122a21d1
FW
551
552 /*
08eb9658 553 * It's the expected callback index, assume sequential access.
122a21d1
FW
554 * Ramp up sizes, and push forward the readahead window.
555 */
08eb9658
MWO
556 if ((index == (ra->start + ra->size - ra->async_size) ||
557 index == (ra->start + ra->size))) {
f9acc8c7 558 ra->start += ra->size;
9491ae4a 559 ra->size = get_next_ra_size(ra, max_pages);
f9acc8c7
FW
560 ra->async_size = ra->size;
561 goto readit;
122a21d1
FW
562 }
563
6b10c6c9 564 /*
793917d9 565 * Hit a marked folio without valid readahead state.
6b10c6c9
FW
566 * E.g. interleaved reads.
567 * Query the pagecache for async_size, which normally equals to
568 * readahead size. Ramp it up and use it as the new readahead size.
569 */
793917d9 570 if (folio) {
6b10c6c9
FW
571 pgoff_t start;
572
30002ed2 573 rcu_read_lock();
6e4af69a
DH
574 start = page_cache_next_miss(ractl->mapping, index + 1,
575 max_pages);
30002ed2 576 rcu_read_unlock();
6b10c6c9 577
08eb9658 578 if (!start || start - index > max_pages)
9a42823a 579 return;
6b10c6c9
FW
580
581 ra->start = start;
08eb9658 582 ra->size = start - index; /* old async_size */
160334a0 583 ra->size += req_size;
9491ae4a 584 ra->size = get_next_ra_size(ra, max_pages);
6b10c6c9
FW
585 ra->async_size = ra->size;
586 goto readit;
587 }
588
122a21d1 589 /*
045a2529 590 * oversize read
122a21d1 591 */
9491ae4a 592 if (req_size > max_pages)
045a2529
WF
593 goto initial_readahead;
594
595 /*
596 * sequential cache miss
08eb9658
MWO
597 * trivial case: (index - prev_index) == 1
598 * unaligned reads: (index - prev_index) == 0
045a2529 599 */
08eb9658
MWO
600 prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
601 if (index - prev_index <= 1UL)
045a2529
WF
602 goto initial_readahead;
603
10be0b37
WF
604 /*
605 * Query the page cache and look for the traces(cached history pages)
606 * that a sequential stream would leave behind.
607 */
6e4af69a
DH
608 if (try_context_readahead(ractl->mapping, ra, index, req_size,
609 max_pages))
10be0b37
WF
610 goto readit;
611
045a2529
WF
612 /*
613 * standalone, small random read
614 * Read as is, and do not pollute the readahead state.
615 */
6e4af69a 616 do_page_cache_ra(ractl, req_size, 0);
9a42823a 617 return;
045a2529
WF
618
619initial_readahead:
08eb9658 620 ra->start = index;
9491ae4a 621 ra->size = get_init_ra_size(req_size, max_pages);
f9acc8c7 622 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
122a21d1 623
f9acc8c7 624readit:
51daa88e
WF
625 /*
626 * Will this read hit the readahead marker made by itself?
627 * If so, trigger the readahead marker hit now, and merge
628 * the resulted next readahead window into the current one.
dc30b96a 629 * Take care of maximum IO pages as above.
51daa88e 630 */
08eb9658 631 if (index == ra->start && ra->size == ra->async_size) {
dc30b96a
MS
632 add_pages = get_next_ra_size(ra, max_pages);
633 if (ra->size + add_pages <= max_pages) {
634 ra->async_size = add_pages;
635 ra->size += add_pages;
636 } else {
637 ra->size = max_pages;
638 ra->async_size = max_pages >> 1;
639 }
51daa88e
WF
640 }
641
6e4af69a 642 ractl->_index = ra->start;
793917d9 643 page_cache_ra_order(ractl, ra, folio ? folio_order(folio) : 0);
122a21d1
FW
644}
645
fefa7c47 646void page_cache_sync_ra(struct readahead_control *ractl,
fcd9ae4f 647 unsigned long req_count)
122a21d1 648{
324bcf54 649 bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
cf914a7d 650
324bcf54
JA
651 /*
652 * Even if read-ahead is disabled, issue this request as read-ahead
653 * as we'll need it to satisfy the requested range. The forced
654 * read-ahead will do the right thing and limit the read to just the
655 * requested range, which we'll set to 1 page for this case.
656 */
fcd9ae4f 657 if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
324bcf54
JA
658 if (!ractl->file)
659 return;
660 req_count = 1;
661 do_forced_ra = true;
662 }
ca47e8c7 663
0141450f 664 /* be dumb */
324bcf54 665 if (do_forced_ra) {
fcd9ae4f 666 force_page_cache_ra(ractl, req_count);
0141450f
WF
667 return;
668 }
669
cf914a7d 670 /* do read-ahead */
793917d9 671 ondemand_readahead(ractl, NULL, req_count);
cf914a7d 672}
fefa7c47 673EXPORT_SYMBOL_GPL(page_cache_sync_ra);
cf914a7d 674
fefa7c47 675void page_cache_async_ra(struct readahead_control *ractl,
7836d999 676 struct folio *folio, unsigned long req_count)
cf914a7d
RR
677{
678 /* no read-ahead */
fcd9ae4f 679 if (!ractl->ra->ra_pages)
cf914a7d
RR
680 return;
681
682 /*
683 * Same bit is used for PG_readahead and PG_reclaim.
684 */
7836d999 685 if (folio_test_writeback(folio))
cf914a7d
RR
686 return;
687
7836d999 688 folio_clear_readahead(folio);
cf914a7d
RR
689
690 /*
691 * Defer asynchronous read-ahead on IO congestion.
692 */
fefa7c47 693 if (inode_read_congested(ractl->mapping->host))
cf914a7d 694 return;
122a21d1 695
ca47e8c7
JB
696 if (blk_cgroup_congested())
697 return;
698
122a21d1 699 /* do read-ahead */
793917d9 700 ondemand_readahead(ractl, folio, req_count);
122a21d1 701}
fefa7c47 702EXPORT_SYMBOL_GPL(page_cache_async_ra);
782182e5 703
c7b95d51 704ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
782182e5
CW
705{
706 ssize_t ret;
2903ff01 707 struct fd f;
782182e5
CW
708
709 ret = -EBADF;
2903ff01 710 f = fdget(fd);
3d8f7615
AG
711 if (!f.file || !(f.file->f_mode & FMODE_READ))
712 goto out;
713
714 /*
715 * The readahead() syscall is intended to run only on files
716 * that can execute readahead. If readahead is not possible
717 * on this file, then we must return -EINVAL.
718 */
719 ret = -EINVAL;
720 if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
721 !S_ISREG(file_inode(f.file)->i_mode))
722 goto out;
723
724 ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
725out:
726 fdput(f);
782182e5
CW
727 return ret;
728}
c7b95d51
DB
729
730SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
731{
732 return ksys_readahead(fd, offset, count);
733}
3ca23644
DH
734
735/**
736 * readahead_expand - Expand a readahead request
737 * @ractl: The request to be expanded
738 * @new_start: The revised start
739 * @new_len: The revised size of the request
740 *
741 * Attempt to expand a readahead request outwards from the current size to the
742 * specified size by inserting locked pages before and after the current window
743 * to increase the size to the new window. This may involve the insertion of
744 * THPs, in which case the window may get expanded even beyond what was
745 * requested.
746 *
747 * The algorithm will stop if it encounters a conflicting page already in the
748 * pagecache and leave a smaller expansion than requested.
749 *
750 * The caller must check for this by examining the revised @ractl object for a
751 * different expansion than was requested.
752 */
753void readahead_expand(struct readahead_control *ractl,
754 loff_t new_start, size_t new_len)
755{
756 struct address_space *mapping = ractl->mapping;
757 struct file_ra_state *ra = ractl->ra;
758 pgoff_t new_index, new_nr_pages;
759 gfp_t gfp_mask = readahead_gfp_mask(mapping);
760
761 new_index = new_start / PAGE_SIZE;
762
763 /* Expand the leading edge downwards */
764 while (ractl->_index > new_index) {
765 unsigned long index = ractl->_index - 1;
766 struct page *page = xa_load(&mapping->i_pages, index);
767
768 if (page && !xa_is_value(page))
769 return; /* Page apparently present */
770
771 page = __page_cache_alloc(gfp_mask);
772 if (!page)
773 return;
774 if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
775 put_page(page);
776 return;
777 }
778
779 ractl->_nr_pages++;
780 ractl->_index = page->index;
781 }
782
783 new_len += new_start - readahead_pos(ractl);
784 new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
785
786 /* Expand the trailing edge upwards */
787 while (ractl->_nr_pages < new_nr_pages) {
788 unsigned long index = ractl->_index + ractl->_nr_pages;
789 struct page *page = xa_load(&mapping->i_pages, index);
790
791 if (page && !xa_is_value(page))
792 return; /* Page apparently present */
793
794 page = __page_cache_alloc(gfp_mask);
795 if (!page)
796 return;
797 if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
798 put_page(page);
799 return;
800 }
801 ractl->_nr_pages++;
802 if (ra) {
803 ra->size++;
804 ra->async_size++;
805 }
806 }
807}
808EXPORT_SYMBOL(readahead_expand);