net: dev: Makes sure netif_rx() can be invoked in any context.
[linux-block.git] / include / linux / pagemap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5/*
6 * Copyright 1995 Linus Torvalds
7 */
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
7c0f6ba6 13#include <linux/uaccess.h>
1da177e4 14#include <linux/gfp.h>
3e9f45bd 15#include <linux/bitops.h>
e286781d 16#include <linux/hardirq.h> /* for in_interrupt() */
8edf344c 17#include <linux/hugetlb_inline.h>
1da177e4 18
51dcbdac 19struct folio_batch;
aa65c29c 20
7716506a
MWO
21static inline bool mapping_empty(struct address_space *mapping)
22{
23 return xa_empty(&mapping->i_pages);
24}
25
51b8c1fe
JW
26/*
27 * mapping_shrinkable - test if page cache state allows inode reclaim
28 * @mapping: the page cache mapping
29 *
30 * This checks the mapping's cache state for the pupose of inode
31 * reclaim and LRU management.
32 *
33 * The caller is expected to hold the i_lock, but is not required to
34 * hold the i_pages lock, which usually protects cache state. That's
35 * because the i_lock and the list_lru lock that protect the inode and
36 * its LRU state don't nest inside the irq-safe i_pages lock.
37 *
38 * Cache deletions are performed under the i_lock, which ensures that
39 * when an inode goes empty, it will reliably get queued on the LRU.
40 *
41 * Cache additions do not acquire the i_lock and may race with this
42 * check, in which case we'll report the inode as shrinkable when it
43 * has cache pages. This is okay: the shrinker also checks the
44 * refcount and the referenced bit, which will be elevated or set in
45 * the process of adding new cache pages to an inode.
46 */
47static inline bool mapping_shrinkable(struct address_space *mapping)
48{
49 void *head;
50
51 /*
52 * On highmem systems, there could be lowmem pressure from the
53 * inodes before there is highmem pressure from the page
54 * cache. Make inodes shrinkable regardless of cache state.
55 */
56 if (IS_ENABLED(CONFIG_HIGHMEM))
57 return true;
58
59 /* Cache completely empty? Shrink away. */
60 head = rcu_access_pointer(mapping->i_pages.xa_head);
61 if (!head)
62 return true;
63
64 /*
65 * The xarray stores single offset-0 entries directly in the
66 * head pointer, which allows non-resident page cache entries
67 * to escape the shadow shrinker's list of xarray nodes. The
68 * inode shrinker needs to pick them up under memory pressure.
69 */
70 if (!xa_is_node(head) && xa_is_value(head))
71 return true;
72
73 return false;
74}
75
1da177e4 76/*
9c5d760b 77 * Bits in mapping->flags.
1da177e4 78 */
9a896c9a 79enum mapping_flags {
9c5d760b
MH
80 AS_EIO = 0, /* IO error on async write */
81 AS_ENOSPC = 1, /* ENOSPC on async write */
82 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
83 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
84 AS_EXITING = 4, /* final truncate in progress */
371a096e 85 /* writeback related tags are not used */
9c5d760b 86 AS_NO_WRITEBACK_TAGS = 5,
ed2145c4 87 AS_LARGE_FOLIO_SUPPORT = 6,
9a896c9a 88};
1da177e4 89
8ed1e46a
JL
90/**
91 * mapping_set_error - record a writeback error in the address_space
767e5ee5
MWO
92 * @mapping: the mapping in which an error should be set
93 * @error: the error to set in the mapping
8ed1e46a
JL
94 *
95 * When writeback fails in some way, we must record that error so that
96 * userspace can be informed when fsync and the like are called. We endeavor
97 * to report errors on any file that was open at the time of the error. Some
98 * internal callers also need to know when writeback errors have occurred.
99 *
100 * When a writeback error occurs, most filesystems will want to call
101 * mapping_set_error to record the error in the mapping so that it can be
102 * reported when the application calls fsync(2).
103 */
3e9f45bd
GC
104static inline void mapping_set_error(struct address_space *mapping, int error)
105{
8ed1e46a
JL
106 if (likely(!error))
107 return;
108
109 /* Record in wb_err for checkers using errseq_t based tracking */
735e4ae5
JL
110 __filemap_set_wb_err(mapping, error);
111
112 /* Record it in superblock */
8b7b2eb1
MK
113 if (mapping->host)
114 errseq_set(&mapping->host->i_sb->s_wb_err, error);
8ed1e46a
JL
115
116 /* Record it in flags for now, for legacy callers */
117 if (error == -ENOSPC)
118 set_bit(AS_ENOSPC, &mapping->flags);
119 else
120 set_bit(AS_EIO, &mapping->flags);
3e9f45bd
GC
121}
122
ba9ddf49
LS
123static inline void mapping_set_unevictable(struct address_space *mapping)
124{
125 set_bit(AS_UNEVICTABLE, &mapping->flags);
126}
127
89e004ea
LS
128static inline void mapping_clear_unevictable(struct address_space *mapping)
129{
130 clear_bit(AS_UNEVICTABLE, &mapping->flags);
131}
132
1eb6234e 133static inline bool mapping_unevictable(struct address_space *mapping)
ba9ddf49 134{
1eb6234e 135 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
ba9ddf49 136}
ba9ddf49 137
91b0abe3
JW
138static inline void mapping_set_exiting(struct address_space *mapping)
139{
140 set_bit(AS_EXITING, &mapping->flags);
141}
142
143static inline int mapping_exiting(struct address_space *mapping)
144{
145 return test_bit(AS_EXITING, &mapping->flags);
146}
147
371a096e
HY
148static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
149{
150 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
151}
152
153static inline int mapping_use_writeback_tags(struct address_space *mapping)
154{
155 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
156}
157
dd0fc66f 158static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 159{
9c5d760b 160 return mapping->gfp_mask;
1da177e4
LT
161}
162
c62d2555
MH
163/* Restricts the given gfp_mask to what the mapping allows. */
164static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
165 gfp_t gfp_mask)
166{
167 return mapping_gfp_mask(mapping) & gfp_mask;
168}
169
1da177e4
LT
170/*
171 * This is non-atomic. Only to be used before the mapping is activated.
172 * Probably needs a barrier...
173 */
260b2367 174static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 175{
9c5d760b 176 m->gfp_mask = mask;
1da177e4
LT
177}
178
ff36da69
MWO
179/**
180 * mapping_set_large_folios() - Indicate the file supports large folios.
181 * @mapping: The file.
182 *
183 * The filesystem should call this function in its inode constructor to
184 * indicate that the VFS can use large folios to cache the contents of
185 * the file.
186 *
187 * Context: This should not be called while the inode is active as it
188 * is non-atomic.
189 */
190static inline void mapping_set_large_folios(struct address_space *mapping)
191{
ed2145c4 192 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
ff36da69
MWO
193}
194
ed2145c4 195static inline bool mapping_large_folio_support(struct address_space *mapping)
01c70267 196{
ed2145c4 197 return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
01c70267
MWO
198}
199
6f4d2f97
MWO
200static inline int filemap_nr_thps(struct address_space *mapping)
201{
202#ifdef CONFIG_READ_ONLY_THP_FOR_FS
203 return atomic_read(&mapping->nr_thps);
204#else
205 return 0;
206#endif
207}
208
209static inline void filemap_nr_thps_inc(struct address_space *mapping)
210{
211#ifdef CONFIG_READ_ONLY_THP_FOR_FS
ed2145c4 212 if (!mapping_large_folio_support(mapping))
6f4d2f97
MWO
213 atomic_inc(&mapping->nr_thps);
214#else
215 WARN_ON_ONCE(1);
216#endif
217}
218
219static inline void filemap_nr_thps_dec(struct address_space *mapping)
220{
221#ifdef CONFIG_READ_ONLY_THP_FOR_FS
ed2145c4 222 if (!mapping_large_folio_support(mapping))
6f4d2f97
MWO
223 atomic_dec(&mapping->nr_thps);
224#else
225 WARN_ON_ONCE(1);
226#endif
227}
228
c6f92f9f 229void release_pages(struct page **pages, int nr);
1da177e4 230
2f52578f
MWO
231struct address_space *page_mapping(struct page *);
232struct address_space *folio_mapping(struct folio *);
233struct address_space *swapcache_mapping(struct folio *);
234
235/**
236 * folio_file_mapping - Find the mapping this folio belongs to.
237 * @folio: The folio.
238 *
239 * For folios which are in the page cache, return the mapping that this
240 * page belongs to. Folios in the swap cache return the mapping of the
241 * swap file or swap device where the data is stored. This is different
242 * from the mapping returned by folio_mapping(). The only reason to
243 * use it is if, like NFS, you return 0 from ->activate_swapfile.
244 *
245 * Do not call this for folios which aren't in the page cache or swap cache.
246 */
247static inline struct address_space *folio_file_mapping(struct folio *folio)
248{
249 if (unlikely(folio_test_swapcache(folio)))
250 return swapcache_mapping(folio);
251
252 return folio->mapping;
253}
254
255static inline struct address_space *page_file_mapping(struct page *page)
256{
257 return folio_file_mapping(page_folio(page));
258}
259
842ca547
MWO
260/*
261 * For file cache pages, return the address_space, otherwise return NULL
262 */
263static inline struct address_space *page_mapping_file(struct page *page)
264{
2f52578f
MWO
265 struct folio *folio = page_folio(page);
266
267 if (unlikely(folio_test_swapcache(folio)))
842ca547 268 return NULL;
2f52578f 269 return folio_mapping(folio);
842ca547
MWO
270}
271
452c472e
DH
272/**
273 * folio_inode - Get the host inode for this folio.
274 * @folio: The folio.
275 *
276 * For folios which are in the page cache, return the inode that this folio
277 * belongs to.
278 *
279 * Do not call this for folios which aren't in the page cache.
280 */
281static inline struct inode *folio_inode(struct folio *folio)
282{
283 return folio->mapping->host;
284}
285
020853b6 286static inline bool page_cache_add_speculative(struct page *page, int count)
e286781d 287{
020853b6 288 return folio_ref_try_add_rcu((struct folio *)page, count);
494eec70 289}
ce0ad7f0 290
020853b6 291static inline bool page_cache_get_speculative(struct page *page)
494eec70 292{
020853b6 293 return page_cache_add_speculative(page, 1);
ce0ad7f0
NP
294}
295
b03143ac 296/**
85d0a2ed
MWO
297 * folio_attach_private - Attach private data to a folio.
298 * @folio: Folio to attach data to.
299 * @data: Data to attach to folio.
b03143ac 300 *
85d0a2ed
MWO
301 * Attaching private data to a folio increments the page's reference count.
302 * The data must be detached before the folio will be freed.
b03143ac 303 */
85d0a2ed 304static inline void folio_attach_private(struct folio *folio, void *data)
b03143ac 305{
85d0a2ed
MWO
306 folio_get(folio);
307 folio->private = data;
308 folio_set_private(folio);
b03143ac
GJ
309}
310
a19672f6
DH
311/**
312 * folio_change_private - Change private data on a folio.
313 * @folio: Folio to change the data on.
314 * @data: Data to set on the folio.
315 *
316 * Change the private data attached to a folio and return the old
317 * data. The page must previously have had data attached and the data
318 * must be detached before the folio will be freed.
319 *
320 * Return: Data that was previously attached to the folio.
321 */
322static inline void *folio_change_private(struct folio *folio, void *data)
323{
324 void *old = folio_get_private(folio);
325
326 folio->private = data;
327 return old;
328}
329
b03143ac 330/**
85d0a2ed
MWO
331 * folio_detach_private - Detach private data from a folio.
332 * @folio: Folio to detach data from.
b03143ac 333 *
85d0a2ed 334 * Removes the data that was previously attached to the folio and decrements
b03143ac
GJ
335 * the refcount on the page.
336 *
85d0a2ed 337 * Return: Data that was attached to the folio.
b03143ac 338 */
85d0a2ed 339static inline void *folio_detach_private(struct folio *folio)
b03143ac 340{
85d0a2ed 341 void *data = folio_get_private(folio);
b03143ac 342
85d0a2ed 343 if (!folio_test_private(folio))
b03143ac 344 return NULL;
85d0a2ed
MWO
345 folio_clear_private(folio);
346 folio->private = NULL;
347 folio_put(folio);
b03143ac
GJ
348
349 return data;
350}
351
85d0a2ed
MWO
352static inline void attach_page_private(struct page *page, void *data)
353{
354 folio_attach_private(page_folio(page), data);
355}
356
357static inline void *detach_page_private(struct page *page)
358{
359 return folio_detach_private(page_folio(page));
360}
361
44110fe3 362#ifdef CONFIG_NUMA
bb3c579e 363struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
44110fe3 364#else
bb3c579e 365static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
2ae88149 366{
bb3c579e 367 return folio_alloc(gfp, order);
2ae88149
NP
368}
369#endif
370
bb3c579e
MWO
371static inline struct page *__page_cache_alloc(gfp_t gfp)
372{
373 return &filemap_alloc_folio(gfp, 0)->page;
374}
375
1da177e4
LT
376static inline struct page *page_cache_alloc(struct address_space *x)
377{
2ae88149 378 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
379}
380
8a5c743e 381static inline gfp_t readahead_gfp_mask(struct address_space *x)
7b1de586 382{
453f85d4 383 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
7b1de586
WF
384}
385
1da177e4
LT
386typedef int filler_t(void *, struct page *);
387
0d3f9296 388pgoff_t page_cache_next_miss(struct address_space *mapping,
e7b563bb 389 pgoff_t index, unsigned long max_scan);
0d3f9296 390pgoff_t page_cache_prev_miss(struct address_space *mapping,
e7b563bb
JW
391 pgoff_t index, unsigned long max_scan);
392
2457aec6
MG
393#define FGP_ACCESSED 0x00000001
394#define FGP_LOCK 0x00000002
395#define FGP_CREAT 0x00000004
396#define FGP_WRITE 0x00000008
397#define FGP_NOFS 0x00000010
398#define FGP_NOWAIT 0x00000020
a75d4c33 399#define FGP_FOR_MMAP 0x00000040
a8cf7f27 400#define FGP_HEAD 0x00000080
44835d20 401#define FGP_ENTRY 0x00000100
b27652d9 402#define FGP_STABLE 0x00000200
2457aec6 403
3f0c6a07
MWO
404struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
405 int fgp_flags, gfp_t gfp);
406struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
407 int fgp_flags, gfp_t gfp);
408
409/**
410 * filemap_get_folio - Find and get a folio.
411 * @mapping: The address_space to search.
412 * @index: The page index.
413 *
414 * Looks up the page cache entry at @mapping & @index. If a folio is
415 * present, it is returned with an increased refcount.
416 *
417 * Otherwise, %NULL is returned.
418 */
419static inline struct folio *filemap_get_folio(struct address_space *mapping,
420 pgoff_t index)
421{
422 return __filemap_get_folio(mapping, index, 0, 0);
423}
2457aec6
MG
424
425/**
426 * find_get_page - find and get a page reference
427 * @mapping: the address_space to search
428 * @offset: the page index
429 *
430 * Looks up the page cache slot at @mapping & @offset. If there is a
431 * page cache page, it is returned with an increased refcount.
432 *
433 * Otherwise, %NULL is returned.
434 */
435static inline struct page *find_get_page(struct address_space *mapping,
436 pgoff_t offset)
437{
45f87de5 438 return pagecache_get_page(mapping, offset, 0, 0);
2457aec6
MG
439}
440
441static inline struct page *find_get_page_flags(struct address_space *mapping,
442 pgoff_t offset, int fgp_flags)
443{
45f87de5 444 return pagecache_get_page(mapping, offset, fgp_flags, 0);
2457aec6
MG
445}
446
447/**
448 * find_lock_page - locate, pin and lock a pagecache page
2457aec6 449 * @mapping: the address_space to search
89b42235 450 * @index: the page index
2457aec6 451 *
89b42235 452 * Looks up the page cache entry at @mapping & @index. If there is a
2457aec6
MG
453 * page cache page, it is returned locked and with an increased
454 * refcount.
455 *
a8cf7f27
MWO
456 * Context: May sleep.
457 * Return: A struct page or %NULL if there is no page in the cache for this
458 * index.
2457aec6
MG
459 */
460static inline struct page *find_lock_page(struct address_space *mapping,
a8cf7f27
MWO
461 pgoff_t index)
462{
463 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
464}
465
2457aec6
MG
466/**
467 * find_or_create_page - locate or add a pagecache page
468 * @mapping: the page's address_space
469 * @index: the page's index into the mapping
470 * @gfp_mask: page allocation mode
471 *
472 * Looks up the page cache slot at @mapping & @offset. If there is a
473 * page cache page, it is returned locked and with an increased
474 * refcount.
475 *
476 * If the page is not present, a new page is allocated using @gfp_mask
477 * and added to the page cache and the VM's LRU list. The page is
478 * returned locked and with an increased refcount.
479 *
480 * On memory exhaustion, %NULL is returned.
481 *
482 * find_or_create_page() may sleep, even if @gfp_flags specifies an
483 * atomic allocation!
484 */
485static inline struct page *find_or_create_page(struct address_space *mapping,
767e5ee5 486 pgoff_t index, gfp_t gfp_mask)
2457aec6 487{
767e5ee5 488 return pagecache_get_page(mapping, index,
2457aec6 489 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
45f87de5 490 gfp_mask);
2457aec6
MG
491}
492
493/**
494 * grab_cache_page_nowait - returns locked page at given index in given cache
495 * @mapping: target address_space
496 * @index: the page index
497 *
498 * Same as grab_cache_page(), but do not wait if the page is unavailable.
499 * This is intended for speculative data generators, where the data can
500 * be regenerated if the page couldn't be grabbed. This routine should
501 * be safe to call while holding the lock for another page.
502 *
503 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
504 * and deadlock against the caller's locked page.
505 */
506static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
507 pgoff_t index)
508{
509 return pagecache_get_page(mapping, index,
510 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
45f87de5 511 mapping_gfp_mask(mapping));
2457aec6
MG
512}
513
9257e156
MWO
514#define swapcache_index(folio) __page_file_index(&(folio)->page)
515
516/**
517 * folio_index - File index of a folio.
518 * @folio: The folio.
519 *
520 * For a folio which is either in the page cache or the swap cache,
521 * return its index within the address_space it belongs to. If you know
522 * the page is definitely in the page cache, you can look at the folio's
523 * index directly.
524 *
525 * Return: The index (offset in units of pages) of a folio in its file.
526 */
527static inline pgoff_t folio_index(struct folio *folio)
528{
529 if (unlikely(folio_test_swapcache(folio)))
530 return swapcache_index(folio);
531 return folio->index;
532}
533
f94b18f6
MWO
534/**
535 * folio_next_index - Get the index of the next folio.
536 * @folio: The current folio.
537 *
538 * Return: The index of the folio which follows this folio in the file.
539 */
540static inline pgoff_t folio_next_index(struct folio *folio)
541{
542 return folio->index + folio_nr_pages(folio);
543}
544
9257e156
MWO
545/**
546 * folio_file_page - The page for a particular index.
547 * @folio: The folio which contains this index.
548 * @index: The index we want to look up.
549 *
550 * Sometimes after looking up a folio in the page cache, we need to
551 * obtain the specific page for an index (eg a page fault).
552 *
553 * Return: The page containing the file data for this index.
554 */
555static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
556{
557 /* HugeTLBfs indexes the page cache in units of hpage_size */
558 if (folio_test_hugetlb(folio))
559 return &folio->page;
560 return folio_page(folio, index & (folio_nr_pages(folio) - 1));
561}
562
563/**
564 * folio_contains - Does this folio contain this index?
565 * @folio: The folio.
566 * @index: The page index within the file.
567 *
568 * Context: The caller should have the page locked in order to prevent
569 * (eg) shmem from moving the page between the page cache and swap cache
570 * and changing its index in the middle of the operation.
571 * Return: true or false.
572 */
573static inline bool folio_contains(struct folio *folio, pgoff_t index)
574{
575 /* HugeTLBfs indexes the page cache in units of hpage_size */
576 if (folio_test_hugetlb(folio))
577 return folio->index == index;
578 return index - folio_index(folio) < folio_nr_pages(folio);
579}
580
ec848215
MWO
581/*
582 * Given the page we found in the page cache, return the page corresponding
583 * to this index in the file
584 */
585static inline struct page *find_subpage(struct page *head, pgoff_t index)
4101196b 586{
ec848215
MWO
587 /* HugeTLBfs wants the head page regardless */
588 if (PageHuge(head))
589 return head;
4101196b 590
6c357848 591 return head + (index & (thp_nr_pages(head) - 1));
4101196b
MWO
592}
593
b947cee4
JK
594unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
595 pgoff_t end, unsigned int nr_pages,
596 struct page **pages);
597static inline unsigned find_get_pages(struct address_space *mapping,
598 pgoff_t *start, unsigned int nr_pages,
599 struct page **pages)
600{
601 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
602 pages);
603}
ebf43500
JA
604unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
605 unsigned int nr_pages, struct page **pages);
72b045ae 606unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
a6906972 607 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
72b045ae
JK
608 struct page **pages);
609static inline unsigned find_get_pages_tag(struct address_space *mapping,
a6906972 610 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
72b045ae
JK
611 struct page **pages)
612{
613 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
614 nr_pages, pages);
615}
1da177e4 616
54566b2c
NP
617struct page *grab_cache_page_write_begin(struct address_space *mapping,
618 pgoff_t index, unsigned flags);
afddba49 619
1da177e4
LT
620/*
621 * Returns locked page at given index in given cache, creating it if needed.
622 */
57f6b96c
FW
623static inline struct page *grab_cache_page(struct address_space *mapping,
624 pgoff_t index)
1da177e4
LT
625{
626 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
627}
628
539a3322
MWO
629struct folio *read_cache_folio(struct address_space *, pgoff_t index,
630 filler_t *filler, void *data);
631struct page *read_cache_page(struct address_space *, pgoff_t index,
632 filler_t *filler, void *data);
0531b2aa
LT
633extern struct page * read_cache_page_gfp(struct address_space *mapping,
634 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
635extern int read_cache_pages(struct address_space *mapping,
636 struct list_head *pages, filler_t *filler, void *data);
637
090d2b18 638static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7 639 pgoff_t index, void *data)
090d2b18 640{
6c45b454 641 return read_cache_page(mapping, index, NULL, data);
090d2b18
PE
642}
643
539a3322
MWO
644static inline struct folio *read_mapping_folio(struct address_space *mapping,
645 pgoff_t index, void *data)
646{
647 return read_cache_folio(mapping, index, NULL, data);
648}
649
a0f7a756 650/*
fe19bd3d 651 * Get index of the page within radix-tree (but not for hugetlb pages).
5cbc198a 652 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
a0f7a756 653 */
5cbc198a 654static inline pgoff_t page_to_index(struct page *page)
a0f7a756 655{
fe3df441 656 struct page *head;
e9b61f19 657
e9b61f19 658 if (likely(!PageTransTail(page)))
09cbfeaf 659 return page->index;
e9b61f19 660
fe3df441 661 head = compound_head(page);
e9b61f19
KS
662 /*
663 * We don't initialize ->index for tail pages: calculate based on
664 * head page
665 */
fe3df441 666 return head->index + page - head;
a0f7a756
NH
667}
668
fe19bd3d
HD
669extern pgoff_t hugetlb_basepage_index(struct page *page);
670
5cbc198a 671/*
fe19bd3d
HD
672 * Get the offset in PAGE_SIZE (even for hugetlb pages).
673 * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
5cbc198a
KS
674 */
675static inline pgoff_t page_to_pgoff(struct page *page)
676{
fe19bd3d
HD
677 if (unlikely(PageHuge(page)))
678 return hugetlb_basepage_index(page);
5cbc198a
KS
679 return page_to_index(page);
680}
681
1da177e4
LT
682/*
683 * Return byte-offset into filesystem object for page.
684 */
685static inline loff_t page_offset(struct page *page)
686{
09cbfeaf 687 return ((loff_t)page->index) << PAGE_SHIFT;
1da177e4
LT
688}
689
f981c595
MG
690static inline loff_t page_file_offset(struct page *page)
691{
8cd79788 692 return ((loff_t)page_index(page)) << PAGE_SHIFT;
f981c595
MG
693}
694
352b47a6
MWO
695/**
696 * folio_pos - Returns the byte position of this folio in its file.
697 * @folio: The folio.
698 */
699static inline loff_t folio_pos(struct folio *folio)
700{
701 return page_offset(&folio->page);
702}
703
704/**
705 * folio_file_pos - Returns the byte position of this folio in its file.
706 * @folio: The folio.
707 *
708 * This differs from folio_pos() for folios which belong to a swap file.
709 * NFS is the only filesystem today which needs to use folio_file_pos().
710 */
711static inline loff_t folio_file_pos(struct folio *folio)
712{
713 return page_file_offset(&folio->page);
714}
715
0fe6e20b
NH
716extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
717 unsigned long address);
718
1da177e4
LT
719static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
720 unsigned long address)
721{
0fe6e20b
NH
722 pgoff_t pgoff;
723 if (unlikely(is_vm_hugetlb_page(vma)))
724 return linear_hugepage_index(vma, address);
725 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4 726 pgoff += vma->vm_pgoff;
09cbfeaf 727 return pgoff;
1da177e4
LT
728}
729
c7510ab2 730struct wait_page_key {
df4d4f12 731 struct folio *folio;
c7510ab2
JA
732 int bit_nr;
733 int page_match;
734};
735
736struct wait_page_queue {
df4d4f12 737 struct folio *folio;
c7510ab2
JA
738 int bit_nr;
739 wait_queue_entry_t wait;
740};
741
cdc8fcb4 742static inline bool wake_page_match(struct wait_page_queue *wait_page,
c7510ab2
JA
743 struct wait_page_key *key)
744{
df4d4f12 745 if (wait_page->folio != key->folio)
cdc8fcb4 746 return false;
c7510ab2
JA
747 key->page_match = 1;
748
749 if (wait_page->bit_nr != key->bit_nr)
cdc8fcb4 750 return false;
d1932dc3 751
cdc8fcb4 752 return true;
d1932dc3
JA
753}
754
7c23c782 755void __folio_lock(struct folio *folio);
af7f29d9 756int __folio_lock_killable(struct folio *folio);
9138e47e 757bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
d065bd81 758 unsigned int flags);
4e136428
MWO
759void unlock_page(struct page *page);
760void folio_unlock(struct folio *folio);
1da177e4 761
7c23c782
MWO
762static inline bool folio_trylock(struct folio *folio)
763{
764 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
765}
766
f4458845
AM
767/*
768 * Return true if the page was successfully locked
769 */
529ae9aa
NP
770static inline int trylock_page(struct page *page)
771{
7c23c782
MWO
772 return folio_trylock(page_folio(page));
773}
774
775static inline void folio_lock(struct folio *folio)
776{
777 might_sleep();
778 if (!folio_trylock(folio))
779 __folio_lock(folio);
529ae9aa
NP
780}
781
db37648c
NP
782/*
783 * lock_page may only be called if we have the page's inode pinned.
784 */
1da177e4
LT
785static inline void lock_page(struct page *page)
786{
7c23c782 787 struct folio *folio;
1da177e4 788 might_sleep();
7c23c782
MWO
789
790 folio = page_folio(page);
791 if (!folio_trylock(folio))
792 __folio_lock(folio);
1da177e4 793}
db37648c 794
af7f29d9
MWO
795static inline int folio_lock_killable(struct folio *folio)
796{
797 might_sleep();
798 if (!folio_trylock(folio))
799 return __folio_lock_killable(folio);
800 return 0;
801}
802
2687a356
MW
803/*
804 * lock_page_killable is like lock_page but can be interrupted by fatal
805 * signals. It returns 0 if it locked the page and -EINTR if it was
806 * killed while waiting.
807 */
808static inline int lock_page_killable(struct page *page)
809{
af7f29d9 810 return folio_lock_killable(page_folio(page));
2687a356
MW
811}
812
d065bd81
ML
813/*
814 * lock_page_or_retry - Lock the page, unless this would block and the
815 * caller indicated that it can handle a retry.
9a95f3cf 816 *
c1e8d7c6 817 * Return value and mmap_lock implications depend on flags; see
9138e47e 818 * __folio_lock_or_retry().
d065bd81 819 */
9138e47e 820static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
d065bd81
ML
821 unsigned int flags)
822{
9138e47e 823 struct folio *folio;
d065bd81 824 might_sleep();
9138e47e
MWO
825
826 folio = page_folio(page);
827 return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
d065bd81
ML
828}
829
1da177e4 830/*
101c0bf6 831 * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
74d81bfa 832 * and should not be used directly.
1da177e4 833 */
101c0bf6
MWO
834void folio_wait_bit(struct folio *folio, int bit_nr);
835int folio_wait_bit_killable(struct folio *folio, int bit_nr);
a4796e37 836
1da177e4 837/*
6baa8d60 838 * Wait for a folio to be unlocked.
1da177e4 839 *
6baa8d60
MWO
840 * This must be called with the caller "holding" the folio,
841 * ie with increased "page->count" so that the folio won't
1da177e4
LT
842 * go away during the wait..
843 */
6baa8d60
MWO
844static inline void folio_wait_locked(struct folio *folio)
845{
846 if (folio_test_locked(folio))
101c0bf6 847 folio_wait_bit(folio, PG_locked);
6baa8d60
MWO
848}
849
850static inline int folio_wait_locked_killable(struct folio *folio)
851{
852 if (!folio_test_locked(folio))
853 return 0;
101c0bf6 854 return folio_wait_bit_killable(folio, PG_locked);
6baa8d60
MWO
855}
856
1da177e4
LT
857static inline void wait_on_page_locked(struct page *page)
858{
6baa8d60 859 folio_wait_locked(page_folio(page));
1da177e4
LT
860}
861
62906027
NP
862static inline int wait_on_page_locked_killable(struct page *page)
863{
6baa8d60 864 return folio_wait_locked_killable(page_folio(page));
62906027
NP
865}
866
9f2b04a2 867int folio_put_wait_locked(struct folio *folio, int state);
19343b5b 868void wait_on_page_writeback(struct page *page);
490e016f
MWO
869void folio_wait_writeback(struct folio *folio);
870int folio_wait_writeback_killable(struct folio *folio);
4268b480
MWO
871void end_page_writeback(struct page *page);
872void folio_end_writeback(struct folio *folio);
1d1d1a76 873void wait_for_stable_page(struct page *page);
a49d0c50 874void folio_wait_stable(struct folio *folio);
203a3151
MWO
875void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
876static inline void __set_page_dirty(struct page *page,
877 struct address_space *mapping, int warn)
878{
879 __folio_mark_dirty(page_folio(page), mapping, warn);
880}
fc9b6a53
MWO
881void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
882 struct bdi_writeback *wb);
fdaf532a
MWO
883void __folio_cancel_dirty(struct folio *folio);
884static inline void folio_cancel_dirty(struct folio *folio)
885{
886 /* Avoid atomic ops, locking, etc. when not actually needed. */
887 if (folio_test_dirty(folio))
888 __folio_cancel_dirty(folio);
889}
890static inline void cancel_dirty_page(struct page *page)
891{
892 folio_cancel_dirty(page_folio(page));
893}
9350f20a
MWO
894bool folio_clear_dirty_for_io(struct folio *folio);
895bool clear_page_dirty_for_io(struct page *page);
121703c1
MWO
896int __must_check folio_write_one(struct folio *folio);
897static inline int __must_check write_one_page(struct page *page)
898{
899 return folio_write_one(page_folio(page));
900}
1da177e4 901
3a6b2162
MWO
902int __set_page_dirty_nobuffers(struct page *page);
903int __set_page_dirty_no_writeback(struct page *page);
904
c11f0c0b 905void page_endio(struct page *page, bool is_write, int err);
57d99845 906
b47393f8
MWO
907void folio_end_private_2(struct folio *folio);
908void folio_wait_private_2(struct folio *folio);
909int folio_wait_private_2_killable(struct folio *folio);
73e10ded 910
385e1ca5
DH
911/*
912 * Add an arbitrary waiter to a page's wait queue
913 */
df4d4f12 914void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
385e1ca5 915
1da177e4 916/*
bb523b40 917 * Fault in userspace address range.
1da177e4 918 */
bb523b40 919size_t fault_in_writeable(char __user *uaddr, size_t size);
cdd591fc 920size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
bb523b40 921size_t fault_in_readable(const char __user *uaddr, size_t size);
f56f821f 922
529ae9aa 923int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
9dd3d069 924 pgoff_t index, gfp_t gfp);
529ae9aa 925int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
9dd3d069
MWO
926 pgoff_t index, gfp_t gfp);
927int filemap_add_folio(struct address_space *mapping, struct folio *folio,
928 pgoff_t index, gfp_t gfp);
452e9e69
MWO
929void filemap_remove_folio(struct folio *folio);
930void delete_from_page_cache(struct page *page);
931void __filemap_remove_folio(struct folio *folio, void *shadow);
932static inline void __delete_from_page_cache(struct page *page, void *shadow)
933{
934 __filemap_remove_folio(page_folio(page), shadow);
935}
1f7ef657 936void replace_page_cache_page(struct page *old, struct page *new);
aa65c29c 937void delete_from_page_cache_batch(struct address_space *mapping,
51dcbdac 938 struct folio_batch *fbatch);
82c50f8b
MWO
939int try_to_release_page(struct page *page, gfp_t gfp);
940bool filemap_release_folio(struct folio *folio, gfp_t gfp);
41139aa4
MWO
941loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
942 int whence);
529ae9aa
NP
943
944/*
945 * Like add_to_page_cache_locked, but used to add newly allocated pages:
48c935ad 946 * the page is new, so we can just run __SetPageLocked() against it.
529ae9aa
NP
947 */
948static inline int add_to_page_cache(struct page *page,
949 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
950{
951 int error;
952
48c935ad 953 __SetPageLocked(page);
529ae9aa
NP
954 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
955 if (unlikely(error))
48c935ad 956 __ClearPageLocked(page);
529ae9aa
NP
957 return error;
958}
959
9dd3d069
MWO
960/* Must be non-static for BPF error injection */
961int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
962 pgoff_t index, gfp_t gfp, void **shadowp);
963
4bdcd1dd
JA
964bool filemap_range_has_writeback(struct address_space *mapping,
965 loff_t start_byte, loff_t end_byte);
966
967/**
968 * filemap_range_needs_writeback - check if range potentially needs writeback
969 * @mapping: address space within which to check
970 * @start_byte: offset in bytes where the range starts
971 * @end_byte: offset in bytes where the range ends (inclusive)
972 *
973 * Find at least one page in the range supplied, usually used to check if
974 * direct writing in this range will trigger a writeback. Used by O_DIRECT
975 * read/write with IOCB_NOWAIT, to see if the caller needs to do
976 * filemap_write_and_wait_range() before proceeding.
977 *
978 * Return: %true if the caller should do filemap_write_and_wait_range() before
979 * doing O_DIRECT to a page in this range, %false otherwise.
980 */
981static inline bool filemap_range_needs_writeback(struct address_space *mapping,
982 loff_t start_byte,
983 loff_t end_byte)
984{
985 if (!mapping->nrpages)
986 return false;
987 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
988 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
989 return false;
990 return filemap_range_has_writeback(mapping, start_byte, end_byte);
991}
992
042124cc
MWO
993/**
994 * struct readahead_control - Describes a readahead request.
995 *
996 * A readahead request is for consecutive pages. Filesystems which
997 * implement the ->readahead method should call readahead_page() or
998 * readahead_page_batch() in a loop and attempt to start I/O against
999 * each page in the request.
1000 *
1001 * Most of the fields in this struct are private and should be accessed
1002 * by the functions below.
1003 *
1004 * @file: The file, used primarily by network filesystems for authentication.
1005 * May be NULL if invoked internally by the filesystem.
1006 * @mapping: Readahead this filesystem object.
fcd9ae4f 1007 * @ra: File readahead state. May be NULL.
042124cc
MWO
1008 */
1009struct readahead_control {
1010 struct file *file;
1011 struct address_space *mapping;
fcd9ae4f 1012 struct file_ra_state *ra;
042124cc
MWO
1013/* private: use the readahead_* accessors instead */
1014 pgoff_t _index;
1015 unsigned int _nr_pages;
1016 unsigned int _batch_count;
1017};
1018
fcd9ae4f
MWO
1019#define DEFINE_READAHEAD(ractl, f, r, m, i) \
1020 struct readahead_control ractl = { \
1aa83cfa
MWO
1021 .file = f, \
1022 .mapping = m, \
fcd9ae4f 1023 .ra = r, \
1aa83cfa
MWO
1024 ._index = i, \
1025 }
1026
fefa7c47
MWO
1027#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
1028
1029void page_cache_ra_unbounded(struct readahead_control *,
1030 unsigned long nr_to_read, unsigned long lookahead_count);
fcd9ae4f 1031void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
7836d999 1032void page_cache_async_ra(struct readahead_control *, struct folio *,
fefa7c47 1033 unsigned long req_count);
3ca23644
DH
1034void readahead_expand(struct readahead_control *ractl,
1035 loff_t new_start, size_t new_len);
fefa7c47
MWO
1036
1037/**
1038 * page_cache_sync_readahead - generic file readahead
1039 * @mapping: address_space which holds the pagecache and I/O vectors
1040 * @ra: file_ra_state which holds the readahead state
1041 * @file: Used by the filesystem for authentication.
1042 * @index: Index of first page to be read.
1043 * @req_count: Total number of pages being read by the caller.
1044 *
1045 * page_cache_sync_readahead() should be called when a cache miss happened:
1046 * it will submit the read. The readahead logic may decide to piggyback more
1047 * pages onto the read request if access patterns suggest it will improve
1048 * performance.
1049 */
1050static inline
1051void page_cache_sync_readahead(struct address_space *mapping,
1052 struct file_ra_state *ra, struct file *file, pgoff_t index,
1053 unsigned long req_count)
1054{
fcd9ae4f
MWO
1055 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1056 page_cache_sync_ra(&ractl, req_count);
fefa7c47
MWO
1057}
1058
1059/**
1060 * page_cache_async_readahead - file readahead for marked pages
1061 * @mapping: address_space which holds the pagecache and I/O vectors
1062 * @ra: file_ra_state which holds the readahead state
1063 * @file: Used by the filesystem for authentication.
1064 * @page: The page at @index which triggered the readahead call.
1065 * @index: Index of first page to be read.
1066 * @req_count: Total number of pages being read by the caller.
1067 *
1068 * page_cache_async_readahead() should be called when a page is used which
1069 * is marked as PageReadahead; this is a marker to suggest that the application
1070 * has used up enough of the readahead window that we should start pulling in
1071 * more pages.
1072 */
1073static inline
1074void page_cache_async_readahead(struct address_space *mapping,
1075 struct file_ra_state *ra, struct file *file,
1076 struct page *page, pgoff_t index, unsigned long req_count)
1077{
fcd9ae4f 1078 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
7836d999 1079 page_cache_async_ra(&ractl, page_folio(page), req_count);
fefa7c47
MWO
1080}
1081
9bf70167
MWO
1082static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1083{
1084 struct folio *folio;
1085
1086 BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1087 ractl->_nr_pages -= ractl->_batch_count;
1088 ractl->_index += ractl->_batch_count;
1089
1090 if (!ractl->_nr_pages) {
1091 ractl->_batch_count = 0;
1092 return NULL;
1093 }
1094
1095 folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1096 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1097 ractl->_batch_count = folio_nr_pages(folio);
1098
1099 return folio;
1100}
1101
042124cc
MWO
1102/**
1103 * readahead_page - Get the next page to read.
9bf70167 1104 * @ractl: The current readahead request.
042124cc
MWO
1105 *
1106 * Context: The page is locked and has an elevated refcount. The caller
1107 * should decreases the refcount once the page has been submitted for I/O
1108 * and unlock the page once all I/O to that page has completed.
1109 * Return: A pointer to the next page, or %NULL if we are done.
1110 */
9bf70167 1111static inline struct page *readahead_page(struct readahead_control *ractl)
042124cc 1112{
9bf70167 1113 struct folio *folio = __readahead_folio(ractl);
042124cc 1114
9bf70167
MWO
1115 return &folio->page;
1116}
042124cc 1117
9bf70167
MWO
1118/**
1119 * readahead_folio - Get the next folio to read.
1120 * @ractl: The current readahead request.
1121 *
1122 * Context: The folio is locked. The caller should unlock the folio once
1123 * all I/O to that folio has completed.
1124 * Return: A pointer to the next folio, or %NULL if we are done.
1125 */
1126static inline struct folio *readahead_folio(struct readahead_control *ractl)
1127{
1128 struct folio *folio = __readahead_folio(ractl);
042124cc 1129
9bf70167
MWO
1130 if (folio)
1131 folio_put(folio);
1132 return folio;
042124cc
MWO
1133}
1134
1135static inline unsigned int __readahead_batch(struct readahead_control *rac,
1136 struct page **array, unsigned int array_sz)
1137{
1138 unsigned int i = 0;
1139 XA_STATE(xas, &rac->mapping->i_pages, 0);
1140 struct page *page;
1141
1142 BUG_ON(rac->_batch_count > rac->_nr_pages);
1143 rac->_nr_pages -= rac->_batch_count;
1144 rac->_index += rac->_batch_count;
1145 rac->_batch_count = 0;
1146
1147 xas_set(&xas, rac->_index);
1148 rcu_read_lock();
1149 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
4349a83a
MWO
1150 if (xas_retry(&xas, page))
1151 continue;
042124cc
MWO
1152 VM_BUG_ON_PAGE(!PageLocked(page), page);
1153 VM_BUG_ON_PAGE(PageTail(page), page);
1154 array[i++] = page;
6c357848 1155 rac->_batch_count += thp_nr_pages(page);
042124cc
MWO
1156 if (i == array_sz)
1157 break;
1158 }
1159 rcu_read_unlock();
1160
1161 return i;
1162}
1163
1164/**
1165 * readahead_page_batch - Get a batch of pages to read.
1166 * @rac: The current readahead request.
1167 * @array: An array of pointers to struct page.
1168 *
1169 * Context: The pages are locked and have an elevated refcount. The caller
1170 * should decreases the refcount once the page has been submitted for I/O
1171 * and unlock the page once all I/O to that page has completed.
1172 * Return: The number of pages placed in the array. 0 indicates the request
1173 * is complete.
1174 */
1175#define readahead_page_batch(rac, array) \
1176 __readahead_batch(rac, array, ARRAY_SIZE(array))
1177
1178/**
1179 * readahead_pos - The byte offset into the file of this readahead request.
1180 * @rac: The readahead request.
1181 */
1182static inline loff_t readahead_pos(struct readahead_control *rac)
1183{
1184 return (loff_t)rac->_index * PAGE_SIZE;
1185}
1186
1187/**
1188 * readahead_length - The number of bytes in this readahead request.
1189 * @rac: The readahead request.
1190 */
076171a6 1191static inline size_t readahead_length(struct readahead_control *rac)
042124cc 1192{
076171a6 1193 return rac->_nr_pages * PAGE_SIZE;
042124cc
MWO
1194}
1195
1196/**
1197 * readahead_index - The index of the first page in this readahead request.
1198 * @rac: The readahead request.
1199 */
1200static inline pgoff_t readahead_index(struct readahead_control *rac)
1201{
1202 return rac->_index;
1203}
1204
1205/**
1206 * readahead_count - The number of pages in this readahead request.
1207 * @rac: The readahead request.
1208 */
1209static inline unsigned int readahead_count(struct readahead_control *rac)
1210{
1211 return rac->_nr_pages;
1212}
1213
32c0a6bc
MWO
1214/**
1215 * readahead_batch_length - The number of bytes in the current batch.
1216 * @rac: The readahead request.
1217 */
076171a6 1218static inline size_t readahead_batch_length(struct readahead_control *rac)
32c0a6bc
MWO
1219{
1220 return rac->_batch_count * PAGE_SIZE;
1221}
1222
b57c2cb9
FF
1223static inline unsigned long dir_pages(struct inode *inode)
1224{
09cbfeaf
KS
1225 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1226 PAGE_SHIFT;
b57c2cb9
FF
1227}
1228
f705bf84
MWO
1229/**
1230 * folio_mkwrite_check_truncate - check if folio was truncated
1231 * @folio: the folio to check
1232 * @inode: the inode to check the folio against
1233 *
1234 * Return: the number of bytes in the folio up to EOF,
1235 * or -EFAULT if the folio was truncated.
1236 */
1237static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1238 struct inode *inode)
1239{
1240 loff_t size = i_size_read(inode);
1241 pgoff_t index = size >> PAGE_SHIFT;
1242 size_t offset = offset_in_folio(folio, size);
1243
1244 if (!folio->mapping)
1245 return -EFAULT;
1246
1247 /* folio is wholly inside EOF */
1248 if (folio_next_index(folio) - 1 < index)
1249 return folio_size(folio);
1250 /* folio is wholly past EOF */
1251 if (folio->index > index || !offset)
1252 return -EFAULT;
1253 /* folio is partially inside EOF */
1254 return offset;
1255}
1256
243145bc
AG
1257/**
1258 * page_mkwrite_check_truncate - check if page was truncated
1259 * @page: the page to check
1260 * @inode: the inode to check the page against
1261 *
1262 * Returns the number of bytes in the page up to EOF,
1263 * or -EFAULT if the page was truncated.
1264 */
1265static inline int page_mkwrite_check_truncate(struct page *page,
1266 struct inode *inode)
1267{
1268 loff_t size = i_size_read(inode);
1269 pgoff_t index = size >> PAGE_SHIFT;
1270 int offset = offset_in_page(size);
1271
1272 if (page->mapping != inode->i_mapping)
1273 return -EFAULT;
1274
1275 /* page is wholly inside EOF */
1276 if (page->index < index)
1277 return PAGE_SIZE;
1278 /* page is wholly past EOF */
1279 if (page->index > index || !offset)
1280 return -EFAULT;
1281 /* page is partially inside EOF */
1282 return offset;
1283}
1284
24addd84 1285/**
9eb7c76d 1286 * i_blocks_per_folio - How many blocks fit in this folio.
24addd84 1287 * @inode: The inode which contains the blocks.
9eb7c76d 1288 * @folio: The folio.
24addd84 1289 *
9eb7c76d 1290 * If the block size is larger than the size of this folio, return zero.
24addd84 1291 *
9eb7c76d 1292 * Context: The caller should hold a refcount on the folio to prevent it
24addd84 1293 * from being split.
9eb7c76d 1294 * Return: The number of filesystem blocks covered by this folio.
24addd84 1295 */
9eb7c76d
MWO
1296static inline
1297unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1298{
1299 return folio_size(folio) >> inode->i_blkbits;
1300}
1301
24addd84
MWO
1302static inline
1303unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1304{
9eb7c76d 1305 return i_blocks_per_folio(inode, page_folio(page));
24addd84 1306}
1da177e4 1307#endif /* _LINUX_PAGEMAP_H */