powerpc/kvm: Fix kvm_use_magic_page
[linux-block.git] / include / linux / pagemap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5/*
6 * Copyright 1995 Linus Torvalds
7 */
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
7c0f6ba6 13#include <linux/uaccess.h>
1da177e4 14#include <linux/gfp.h>
3e9f45bd 15#include <linux/bitops.h>
e286781d 16#include <linux/hardirq.h> /* for in_interrupt() */
8edf344c 17#include <linux/hugetlb_inline.h>
1da177e4 18
aa65c29c
JK
19struct pagevec;
20
7716506a
MWO
21static inline bool mapping_empty(struct address_space *mapping)
22{
23 return xa_empty(&mapping->i_pages);
24}
25
1da177e4 26/*
9c5d760b 27 * Bits in mapping->flags.
1da177e4 28 */
9a896c9a 29enum mapping_flags {
9c5d760b
MH
30 AS_EIO = 0, /* IO error on async write */
31 AS_ENOSPC = 1, /* ENOSPC on async write */
32 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
33 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
34 AS_EXITING = 4, /* final truncate in progress */
371a096e 35 /* writeback related tags are not used */
9c5d760b 36 AS_NO_WRITEBACK_TAGS = 5,
01c70267 37 AS_THP_SUPPORT = 6, /* THPs supported */
9a896c9a 38};
1da177e4 39
8ed1e46a
JL
40/**
41 * mapping_set_error - record a writeback error in the address_space
767e5ee5
MWO
42 * @mapping: the mapping in which an error should be set
43 * @error: the error to set in the mapping
8ed1e46a
JL
44 *
45 * When writeback fails in some way, we must record that error so that
46 * userspace can be informed when fsync and the like are called. We endeavor
47 * to report errors on any file that was open at the time of the error. Some
48 * internal callers also need to know when writeback errors have occurred.
49 *
50 * When a writeback error occurs, most filesystems will want to call
51 * mapping_set_error to record the error in the mapping so that it can be
52 * reported when the application calls fsync(2).
53 */
3e9f45bd
GC
54static inline void mapping_set_error(struct address_space *mapping, int error)
55{
8ed1e46a
JL
56 if (likely(!error))
57 return;
58
59 /* Record in wb_err for checkers using errseq_t based tracking */
735e4ae5
JL
60 __filemap_set_wb_err(mapping, error);
61
62 /* Record it in superblock */
8b7b2eb1
MK
63 if (mapping->host)
64 errseq_set(&mapping->host->i_sb->s_wb_err, error);
8ed1e46a
JL
65
66 /* Record it in flags for now, for legacy callers */
67 if (error == -ENOSPC)
68 set_bit(AS_ENOSPC, &mapping->flags);
69 else
70 set_bit(AS_EIO, &mapping->flags);
3e9f45bd
GC
71}
72
ba9ddf49
LS
73static inline void mapping_set_unevictable(struct address_space *mapping)
74{
75 set_bit(AS_UNEVICTABLE, &mapping->flags);
76}
77
89e004ea
LS
78static inline void mapping_clear_unevictable(struct address_space *mapping)
79{
80 clear_bit(AS_UNEVICTABLE, &mapping->flags);
81}
82
1eb6234e 83static inline bool mapping_unevictable(struct address_space *mapping)
ba9ddf49 84{
1eb6234e 85 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
ba9ddf49 86}
ba9ddf49 87
91b0abe3
JW
88static inline void mapping_set_exiting(struct address_space *mapping)
89{
90 set_bit(AS_EXITING, &mapping->flags);
91}
92
93static inline int mapping_exiting(struct address_space *mapping)
94{
95 return test_bit(AS_EXITING, &mapping->flags);
96}
97
371a096e
HY
98static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
99{
100 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
101}
102
103static inline int mapping_use_writeback_tags(struct address_space *mapping)
104{
105 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
106}
107
dd0fc66f 108static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 109{
9c5d760b 110 return mapping->gfp_mask;
1da177e4
LT
111}
112
c62d2555
MH
113/* Restricts the given gfp_mask to what the mapping allows. */
114static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
115 gfp_t gfp_mask)
116{
117 return mapping_gfp_mask(mapping) & gfp_mask;
118}
119
1da177e4
LT
120/*
121 * This is non-atomic. Only to be used before the mapping is activated.
122 * Probably needs a barrier...
123 */
260b2367 124static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 125{
9c5d760b 126 m->gfp_mask = mask;
1da177e4
LT
127}
128
01c70267
MWO
129static inline bool mapping_thp_support(struct address_space *mapping)
130{
131 return test_bit(AS_THP_SUPPORT, &mapping->flags);
132}
133
6f4d2f97
MWO
134static inline int filemap_nr_thps(struct address_space *mapping)
135{
136#ifdef CONFIG_READ_ONLY_THP_FOR_FS
137 return atomic_read(&mapping->nr_thps);
138#else
139 return 0;
140#endif
141}
142
143static inline void filemap_nr_thps_inc(struct address_space *mapping)
144{
145#ifdef CONFIG_READ_ONLY_THP_FOR_FS
146 if (!mapping_thp_support(mapping))
147 atomic_inc(&mapping->nr_thps);
148#else
149 WARN_ON_ONCE(1);
150#endif
151}
152
153static inline void filemap_nr_thps_dec(struct address_space *mapping)
154{
155#ifdef CONFIG_READ_ONLY_THP_FOR_FS
156 if (!mapping_thp_support(mapping))
157 atomic_dec(&mapping->nr_thps);
158#else
159 WARN_ON_ONCE(1);
160#endif
161}
162
c6f92f9f 163void release_pages(struct page **pages, int nr);
1da177e4 164
842ca547
MWO
165/*
166 * For file cache pages, return the address_space, otherwise return NULL
167 */
168static inline struct address_space *page_mapping_file(struct page *page)
169{
170 if (unlikely(PageSwapCache(page)))
171 return NULL;
172 return page_mapping(page);
173}
174
e286781d
NP
175/*
176 * speculatively take a reference to a page.
0139aa7b
JK
177 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
178 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
e286781d
NP
179 *
180 * This function must be called inside the same rcu_read_lock() section as has
181 * been used to lookup the page in the pagecache radix-tree (or page table):
0139aa7b 182 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
e286781d
NP
183 *
184 * Unless an RCU grace period has passed, the count of all pages coming out
185 * of the allocator must be considered unstable. page_count may return higher
186 * than expected, and put_page must be able to do the right thing when the
187 * page has been finished with, no matter what it is subsequently allocated
188 * for (because put_page is what is used here to drop an invalid speculative
189 * reference).
190 *
191 * This is the interesting part of the lockless pagecache (and lockless
192 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
193 * has the following pattern:
194 * 1. find page in radix tree
195 * 2. conditionally increment refcount
196 * 3. check the page is still in pagecache (if no, goto 1)
197 *
0139aa7b 198 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
b93b0163 199 * following (with the i_pages lock held):
e286781d
NP
200 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
201 * B. remove page from pagecache
202 * C. free the page
203 *
204 * There are 2 critical interleavings that matter:
205 * - 2 runs before A: in this case, A sees elevated refcount and bails out
206 * - A runs before 2: in this case, 2 sees zero refcount and retries;
207 * subsequently, B will complete and 1 will find no page, causing the
208 * lookup to return NULL.
209 *
210 * It is possible that between 1 and 2, the page is removed then the exact same
211 * page is inserted into the same position in pagecache. That's OK: the
b93b0163 212 * old find_get_page using a lock could equally have run before or after
e286781d
NP
213 * such a re-insertion, depending on order that locks are granted.
214 *
215 * Lookups racing against pagecache insertion isn't a big problem: either 1
216 * will find the page or it will not. Likewise, the old find_get_page could run
217 * either before the insertion or afterwards, depending on timing.
218 */
494eec70 219static inline int __page_cache_add_speculative(struct page *page, int count)
e286781d 220{
8375ad98 221#ifdef CONFIG_TINY_RCU
bdd4e85d 222# ifdef CONFIG_PREEMPT_COUNT
591a3d7c 223 VM_BUG_ON(!in_atomic() && !irqs_disabled());
e286781d
NP
224# endif
225 /*
226 * Preempt must be disabled here - we rely on rcu_read_lock doing
227 * this for us.
228 *
229 * Pagecache won't be truncated from interrupt context, so if we have
230 * found a page in the radix tree here, we have pinned its refcount by
231 * disabling preempt, and hence no need for the "speculative get" that
232 * SMP requires.
233 */
309381fe 234 VM_BUG_ON_PAGE(page_count(page) == 0, page);
494eec70 235 page_ref_add(page, count);
e286781d
NP
236
237#else
494eec70 238 if (unlikely(!page_ref_add_unless(page, count, 0))) {
e286781d
NP
239 /*
240 * Either the page has been freed, or will be freed.
241 * In either case, retry here and the caller should
242 * do the right thing (see comments above).
243 */
244 return 0;
245 }
246#endif
309381fe 247 VM_BUG_ON_PAGE(PageTail(page), page);
e286781d
NP
248
249 return 1;
250}
251
494eec70 252static inline int page_cache_get_speculative(struct page *page)
ce0ad7f0 253{
494eec70 254 return __page_cache_add_speculative(page, 1);
255}
ce0ad7f0 256
494eec70 257static inline int page_cache_add_speculative(struct page *page, int count)
258{
259 return __page_cache_add_speculative(page, count);
ce0ad7f0
NP
260}
261
b03143ac
GJ
262/**
263 * attach_page_private - Attach private data to a page.
264 * @page: Page to attach data to.
265 * @data: Data to attach to page.
266 *
267 * Attaching private data to a page increments the page's reference count.
268 * The data must be detached before the page will be freed.
269 */
270static inline void attach_page_private(struct page *page, void *data)
271{
272 get_page(page);
273 set_page_private(page, (unsigned long)data);
274 SetPagePrivate(page);
275}
276
277/**
278 * detach_page_private - Detach private data from a page.
279 * @page: Page to detach data from.
280 *
281 * Removes the data that was previously attached to the page and decrements
282 * the refcount on the page.
283 *
284 * Return: Data that was attached to the page.
285 */
286static inline void *detach_page_private(struct page *page)
287{
288 void *data = (void *)page_private(page);
289
290 if (!PagePrivate(page))
291 return NULL;
292 ClearPagePrivate(page);
293 set_page_private(page, 0);
294 put_page(page);
295
296 return data;
297}
298
44110fe3 299#ifdef CONFIG_NUMA
2ae88149 300extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 301#else
2ae88149
NP
302static inline struct page *__page_cache_alloc(gfp_t gfp)
303{
304 return alloc_pages(gfp, 0);
305}
306#endif
307
1da177e4
LT
308static inline struct page *page_cache_alloc(struct address_space *x)
309{
2ae88149 310 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
311}
312
8a5c743e 313static inline gfp_t readahead_gfp_mask(struct address_space *x)
7b1de586 314{
453f85d4 315 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
7b1de586
WF
316}
317
1da177e4
LT
318typedef int filler_t(void *, struct page *);
319
0d3f9296 320pgoff_t page_cache_next_miss(struct address_space *mapping,
e7b563bb 321 pgoff_t index, unsigned long max_scan);
0d3f9296 322pgoff_t page_cache_prev_miss(struct address_space *mapping,
e7b563bb
JW
323 pgoff_t index, unsigned long max_scan);
324
2457aec6
MG
325#define FGP_ACCESSED 0x00000001
326#define FGP_LOCK 0x00000002
327#define FGP_CREAT 0x00000004
328#define FGP_WRITE 0x00000008
329#define FGP_NOFS 0x00000010
330#define FGP_NOWAIT 0x00000020
a75d4c33 331#define FGP_FOR_MMAP 0x00000040
a8cf7f27 332#define FGP_HEAD 0x00000080
44835d20 333#define FGP_ENTRY 0x00000100
2457aec6
MG
334
335struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de5 336 int fgp_flags, gfp_t cache_gfp_mask);
2457aec6
MG
337
338/**
339 * find_get_page - find and get a page reference
340 * @mapping: the address_space to search
341 * @offset: the page index
342 *
343 * Looks up the page cache slot at @mapping & @offset. If there is a
344 * page cache page, it is returned with an increased refcount.
345 *
346 * Otherwise, %NULL is returned.
347 */
348static inline struct page *find_get_page(struct address_space *mapping,
349 pgoff_t offset)
350{
45f87de5 351 return pagecache_get_page(mapping, offset, 0, 0);
2457aec6
MG
352}
353
354static inline struct page *find_get_page_flags(struct address_space *mapping,
355 pgoff_t offset, int fgp_flags)
356{
45f87de5 357 return pagecache_get_page(mapping, offset, fgp_flags, 0);
2457aec6
MG
358}
359
360/**
361 * find_lock_page - locate, pin and lock a pagecache page
2457aec6 362 * @mapping: the address_space to search
89b42235 363 * @index: the page index
2457aec6 364 *
89b42235 365 * Looks up the page cache entry at @mapping & @index. If there is a
2457aec6
MG
366 * page cache page, it is returned locked and with an increased
367 * refcount.
368 *
a8cf7f27
MWO
369 * Context: May sleep.
370 * Return: A struct page or %NULL if there is no page in the cache for this
371 * index.
2457aec6
MG
372 */
373static inline struct page *find_lock_page(struct address_space *mapping,
a8cf7f27
MWO
374 pgoff_t index)
375{
376 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
377}
378
379/**
380 * find_lock_head - Locate, pin and lock a pagecache page.
381 * @mapping: The address_space to search.
89b42235 382 * @index: The page index.
a8cf7f27 383 *
89b42235 384 * Looks up the page cache entry at @mapping & @index. If there is a
a8cf7f27
MWO
385 * page cache page, its head page is returned locked and with an increased
386 * refcount.
387 *
388 * Context: May sleep.
389 * Return: A struct page which is !PageTail, or %NULL if there is no page
390 * in the cache for this index.
391 */
392static inline struct page *find_lock_head(struct address_space *mapping,
393 pgoff_t index)
2457aec6 394{
a8cf7f27 395 return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
2457aec6
MG
396}
397
398/**
399 * find_or_create_page - locate or add a pagecache page
400 * @mapping: the page's address_space
401 * @index: the page's index into the mapping
402 * @gfp_mask: page allocation mode
403 *
404 * Looks up the page cache slot at @mapping & @offset. If there is a
405 * page cache page, it is returned locked and with an increased
406 * refcount.
407 *
408 * If the page is not present, a new page is allocated using @gfp_mask
409 * and added to the page cache and the VM's LRU list. The page is
410 * returned locked and with an increased refcount.
411 *
412 * On memory exhaustion, %NULL is returned.
413 *
414 * find_or_create_page() may sleep, even if @gfp_flags specifies an
415 * atomic allocation!
416 */
417static inline struct page *find_or_create_page(struct address_space *mapping,
767e5ee5 418 pgoff_t index, gfp_t gfp_mask)
2457aec6 419{
767e5ee5 420 return pagecache_get_page(mapping, index,
2457aec6 421 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
45f87de5 422 gfp_mask);
2457aec6
MG
423}
424
425/**
426 * grab_cache_page_nowait - returns locked page at given index in given cache
427 * @mapping: target address_space
428 * @index: the page index
429 *
430 * Same as grab_cache_page(), but do not wait if the page is unavailable.
431 * This is intended for speculative data generators, where the data can
432 * be regenerated if the page couldn't be grabbed. This routine should
433 * be safe to call while holding the lock for another page.
434 *
435 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
436 * and deadlock against the caller's locked page.
437 */
438static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
439 pgoff_t index)
440{
441 return pagecache_get_page(mapping, index,
442 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
45f87de5 443 mapping_gfp_mask(mapping));
2457aec6
MG
444}
445
63ec1973
MWO
446/* Does this page contain this index? */
447static inline bool thp_contains(struct page *head, pgoff_t index)
448{
449 /* HugeTLBfs indexes the page cache in units of hpage_size */
450 if (PageHuge(head))
451 return head->index == index;
452 return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
453}
454
ec848215
MWO
455/*
456 * Given the page we found in the page cache, return the page corresponding
457 * to this index in the file
458 */
459static inline struct page *find_subpage(struct page *head, pgoff_t index)
4101196b 460{
ec848215
MWO
461 /* HugeTLBfs wants the head page regardless */
462 if (PageHuge(head))
463 return head;
4101196b 464
6c357848 465 return head + (index & (thp_nr_pages(head) - 1));
4101196b
MWO
466}
467
0cd6144a 468unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
cf2039af 469 pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
b947cee4
JK
470unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
471 pgoff_t end, unsigned int nr_pages,
472 struct page **pages);
473static inline unsigned find_get_pages(struct address_space *mapping,
474 pgoff_t *start, unsigned int nr_pages,
475 struct page **pages)
476{
477 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
478 pages);
479}
ebf43500
JA
480unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
481 unsigned int nr_pages, struct page **pages);
72b045ae 482unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
a6906972 483 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
72b045ae
JK
484 struct page **pages);
485static inline unsigned find_get_pages_tag(struct address_space *mapping,
a6906972 486 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
72b045ae
JK
487 struct page **pages)
488{
489 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
490 nr_pages, pages);
491}
1da177e4 492
54566b2c
NP
493struct page *grab_cache_page_write_begin(struct address_space *mapping,
494 pgoff_t index, unsigned flags);
afddba49 495
1da177e4
LT
496/*
497 * Returns locked page at given index in given cache, creating it if needed.
498 */
57f6b96c
FW
499static inline struct page *grab_cache_page(struct address_space *mapping,
500 pgoff_t index)
1da177e4
LT
501{
502 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
503}
504
1da177e4 505extern struct page * read_cache_page(struct address_space *mapping,
5e5358e7 506 pgoff_t index, filler_t *filler, void *data);
0531b2aa
LT
507extern struct page * read_cache_page_gfp(struct address_space *mapping,
508 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
509extern int read_cache_pages(struct address_space *mapping,
510 struct list_head *pages, filler_t *filler, void *data);
511
090d2b18 512static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7 513 pgoff_t index, void *data)
090d2b18 514{
6c45b454 515 return read_cache_page(mapping, index, NULL, data);
090d2b18
PE
516}
517
a0f7a756 518/*
fe19bd3d 519 * Get index of the page within radix-tree (but not for hugetlb pages).
5cbc198a 520 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
a0f7a756 521 */
5cbc198a 522static inline pgoff_t page_to_index(struct page *page)
a0f7a756 523{
fe3df441 524 struct page *head;
e9b61f19 525
e9b61f19 526 if (likely(!PageTransTail(page)))
09cbfeaf 527 return page->index;
e9b61f19 528
fe3df441 529 head = compound_head(page);
e9b61f19
KS
530 /*
531 * We don't initialize ->index for tail pages: calculate based on
532 * head page
533 */
fe3df441 534 return head->index + page - head;
a0f7a756
NH
535}
536
fe19bd3d
HD
537extern pgoff_t hugetlb_basepage_index(struct page *page);
538
5cbc198a 539/*
fe19bd3d
HD
540 * Get the offset in PAGE_SIZE (even for hugetlb pages).
541 * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
5cbc198a
KS
542 */
543static inline pgoff_t page_to_pgoff(struct page *page)
544{
fe19bd3d
HD
545 if (unlikely(PageHuge(page)))
546 return hugetlb_basepage_index(page);
5cbc198a
KS
547 return page_to_index(page);
548}
549
1da177e4
LT
550/*
551 * Return byte-offset into filesystem object for page.
552 */
553static inline loff_t page_offset(struct page *page)
554{
09cbfeaf 555 return ((loff_t)page->index) << PAGE_SHIFT;
1da177e4
LT
556}
557
f981c595
MG
558static inline loff_t page_file_offset(struct page *page)
559{
8cd79788 560 return ((loff_t)page_index(page)) << PAGE_SHIFT;
f981c595
MG
561}
562
0fe6e20b
NH
563extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
564 unsigned long address);
565
1da177e4
LT
566static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
567 unsigned long address)
568{
0fe6e20b
NH
569 pgoff_t pgoff;
570 if (unlikely(is_vm_hugetlb_page(vma)))
571 return linear_hugepage_index(vma, address);
572 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4 573 pgoff += vma->vm_pgoff;
09cbfeaf 574 return pgoff;
1da177e4
LT
575}
576
c7510ab2
JA
577struct wait_page_key {
578 struct page *page;
579 int bit_nr;
580 int page_match;
581};
582
583struct wait_page_queue {
584 struct page *page;
585 int bit_nr;
586 wait_queue_entry_t wait;
587};
588
cdc8fcb4 589static inline bool wake_page_match(struct wait_page_queue *wait_page,
c7510ab2
JA
590 struct wait_page_key *key)
591{
592 if (wait_page->page != key->page)
cdc8fcb4 593 return false;
c7510ab2
JA
594 key->page_match = 1;
595
596 if (wait_page->bit_nr != key->bit_nr)
cdc8fcb4 597 return false;
d1932dc3 598
cdc8fcb4 599 return true;
d1932dc3
JA
600}
601
b3c97528
HH
602extern void __lock_page(struct page *page);
603extern int __lock_page_killable(struct page *page);
dd3e6d50 604extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
d065bd81
ML
605extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
606 unsigned int flags);
b3c97528 607extern void unlock_page(struct page *page);
1da177e4 608
f4458845
AM
609/*
610 * Return true if the page was successfully locked
611 */
529ae9aa
NP
612static inline int trylock_page(struct page *page)
613{
48c935ad 614 page = compound_head(page);
8413ac9d 615 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aa
NP
616}
617
db37648c
NP
618/*
619 * lock_page may only be called if we have the page's inode pinned.
620 */
1da177e4
LT
621static inline void lock_page(struct page *page)
622{
623 might_sleep();
529ae9aa 624 if (!trylock_page(page))
1da177e4
LT
625 __lock_page(page);
626}
db37648c 627
2687a356
MW
628/*
629 * lock_page_killable is like lock_page but can be interrupted by fatal
630 * signals. It returns 0 if it locked the page and -EINTR if it was
631 * killed while waiting.
632 */
633static inline int lock_page_killable(struct page *page)
634{
635 might_sleep();
529ae9aa 636 if (!trylock_page(page))
2687a356
MW
637 return __lock_page_killable(page);
638 return 0;
639}
640
dd3e6d50
JA
641/*
642 * lock_page_async - Lock the page, unless this would block. If the page
643 * is already locked, then queue a callback when the page becomes unlocked.
644 * This callback can then retry the operation.
645 *
646 * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
647 * was already locked and the callback defined in 'wait' was queued.
648 */
649static inline int lock_page_async(struct page *page,
650 struct wait_page_queue *wait)
651{
652 if (!trylock_page(page))
653 return __lock_page_async(page, wait);
654 return 0;
655}
656
d065bd81
ML
657/*
658 * lock_page_or_retry - Lock the page, unless this would block and the
659 * caller indicated that it can handle a retry.
9a95f3cf 660 *
c1e8d7c6 661 * Return value and mmap_lock implications depend on flags; see
9a95f3cf 662 * __lock_page_or_retry().
d065bd81
ML
663 */
664static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
665 unsigned int flags)
666{
667 might_sleep();
668 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
669}
670
1da177e4 671/*
74d81bfa
NP
672 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
673 * and should not be used directly.
1da177e4 674 */
b3c97528 675extern void wait_on_page_bit(struct page *page, int bit_nr);
f62e00cc 676extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
a4796e37 677
1da177e4
LT
678/*
679 * Wait for a page to be unlocked.
680 *
681 * This must be called with the caller "holding" the page,
682 * ie with increased "page->count" so that the page won't
683 * go away during the wait..
684 */
685static inline void wait_on_page_locked(struct page *page)
686{
687 if (PageLocked(page))
48c935ad 688 wait_on_page_bit(compound_head(page), PG_locked);
1da177e4
LT
689}
690
62906027
NP
691static inline int wait_on_page_locked_killable(struct page *page)
692{
693 if (!PageLocked(page))
694 return 0;
695 return wait_on_page_bit_killable(compound_head(page), PG_locked);
696}
697
48054625 698int put_and_wait_on_page_locked(struct page *page, int state);
19343b5b 699void wait_on_page_writeback(struct page *page);
e5dbd332 700int wait_on_page_writeback_killable(struct page *page);
1da177e4 701extern void end_page_writeback(struct page *page);
1d1d1a76 702void wait_for_stable_page(struct page *page);
1da177e4 703
3a6b2162
MWO
704void __set_page_dirty(struct page *, struct address_space *, int warn);
705int __set_page_dirty_nobuffers(struct page *page);
706int __set_page_dirty_no_writeback(struct page *page);
707
c11f0c0b 708void page_endio(struct page *page, bool is_write, int err);
57d99845 709
73e10ded
DH
710/**
711 * set_page_private_2 - Set PG_private_2 on a page and take a ref
712 * @page: The page.
713 *
714 * Set the PG_private_2 flag on a page and take the reference needed for the VM
715 * to handle its lifetime correctly. This sets the flag and takes the
716 * reference unconditionally, so care must be taken not to set the flag again
717 * if it's already set.
718 */
719static inline void set_page_private_2(struct page *page)
720{
721 page = compound_head(page);
722 get_page(page);
723 SetPagePrivate2(page);
724}
725
726void end_page_private_2(struct page *page);
727void wait_on_page_private_2(struct page *page);
728int wait_on_page_private_2_killable(struct page *page);
729
385e1ca5
DH
730/*
731 * Add an arbitrary waiter to a page's wait queue
732 */
ac6424b9 733extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
385e1ca5 734
1da177e4 735/*
4bce9f6e 736 * Fault everything in given userspace address range in.
1da177e4 737 */
e15710bf 738static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
f56f821f 739{
9923777d 740 char __user *end = uaddr + size - 1;
f56f821f
DV
741
742 if (unlikely(size == 0))
e23d4159 743 return 0;
f56f821f 744
e23d4159
AV
745 if (unlikely(uaddr > end))
746 return -EFAULT;
f56f821f
DV
747 /*
748 * Writing zeroes into userspace here is OK, because we know that if
749 * the zero gets there, we'll be overwriting it.
750 */
e23d4159
AV
751 do {
752 if (unlikely(__put_user(0, uaddr) != 0))
753 return -EFAULT;
f56f821f 754 uaddr += PAGE_SIZE;
e23d4159 755 } while (uaddr <= end);
f56f821f
DV
756
757 /* Check whether the range spilled into the next page. */
758 if (((unsigned long)uaddr & PAGE_MASK) ==
759 ((unsigned long)end & PAGE_MASK))
e23d4159 760 return __put_user(0, end);
f56f821f 761
e23d4159 762 return 0;
f56f821f
DV
763}
764
e15710bf 765static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
f56f821f
DV
766{
767 volatile char c;
f56f821f
DV
768 const char __user *end = uaddr + size - 1;
769
770 if (unlikely(size == 0))
e23d4159 771 return 0;
f56f821f 772
e23d4159
AV
773 if (unlikely(uaddr > end))
774 return -EFAULT;
775
776 do {
777 if (unlikely(__get_user(c, uaddr) != 0))
778 return -EFAULT;
f56f821f 779 uaddr += PAGE_SIZE;
e23d4159 780 } while (uaddr <= end);
f56f821f
DV
781
782 /* Check whether the range spilled into the next page. */
783 if (((unsigned long)uaddr & PAGE_MASK) ==
784 ((unsigned long)end & PAGE_MASK)) {
e23d4159 785 return __get_user(c, end);
f56f821f
DV
786 }
787
90b75db6 788 (void)c;
e23d4159 789 return 0;
f56f821f
DV
790}
791
529ae9aa
NP
792int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
793 pgoff_t index, gfp_t gfp_mask);
794int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
795 pgoff_t index, gfp_t gfp_mask);
97cecb5a 796extern void delete_from_page_cache(struct page *page);
62cccb8c 797extern void __delete_from_page_cache(struct page *page, void *shadow);
1f7ef657 798void replace_page_cache_page(struct page *old, struct page *new);
aa65c29c
JK
799void delete_from_page_cache_batch(struct address_space *mapping,
800 struct pagevec *pvec);
41139aa4
MWO
801loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
802 int whence);
529ae9aa
NP
803
804/*
805 * Like add_to_page_cache_locked, but used to add newly allocated pages:
48c935ad 806 * the page is new, so we can just run __SetPageLocked() against it.
529ae9aa
NP
807 */
808static inline int add_to_page_cache(struct page *page,
809 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
810{
811 int error;
812
48c935ad 813 __SetPageLocked(page);
529ae9aa
NP
814 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
815 if (unlikely(error))
48c935ad 816 __ClearPageLocked(page);
529ae9aa
NP
817 return error;
818}
819
042124cc
MWO
820/**
821 * struct readahead_control - Describes a readahead request.
822 *
823 * A readahead request is for consecutive pages. Filesystems which
824 * implement the ->readahead method should call readahead_page() or
825 * readahead_page_batch() in a loop and attempt to start I/O against
826 * each page in the request.
827 *
828 * Most of the fields in this struct are private and should be accessed
829 * by the functions below.
830 *
831 * @file: The file, used primarily by network filesystems for authentication.
832 * May be NULL if invoked internally by the filesystem.
833 * @mapping: Readahead this filesystem object.
fcd9ae4f 834 * @ra: File readahead state. May be NULL.
042124cc
MWO
835 */
836struct readahead_control {
837 struct file *file;
838 struct address_space *mapping;
fcd9ae4f 839 struct file_ra_state *ra;
042124cc
MWO
840/* private: use the readahead_* accessors instead */
841 pgoff_t _index;
842 unsigned int _nr_pages;
843 unsigned int _batch_count;
844};
845
fcd9ae4f
MWO
846#define DEFINE_READAHEAD(ractl, f, r, m, i) \
847 struct readahead_control ractl = { \
1aa83cfa
MWO
848 .file = f, \
849 .mapping = m, \
fcd9ae4f 850 .ra = r, \
1aa83cfa
MWO
851 ._index = i, \
852 }
853
fefa7c47
MWO
854#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
855
856void page_cache_ra_unbounded(struct readahead_control *,
857 unsigned long nr_to_read, unsigned long lookahead_count);
fcd9ae4f
MWO
858void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
859void page_cache_async_ra(struct readahead_control *, struct page *,
fefa7c47 860 unsigned long req_count);
3ca23644
DH
861void readahead_expand(struct readahead_control *ractl,
862 loff_t new_start, size_t new_len);
fefa7c47
MWO
863
864/**
865 * page_cache_sync_readahead - generic file readahead
866 * @mapping: address_space which holds the pagecache and I/O vectors
867 * @ra: file_ra_state which holds the readahead state
868 * @file: Used by the filesystem for authentication.
869 * @index: Index of first page to be read.
870 * @req_count: Total number of pages being read by the caller.
871 *
872 * page_cache_sync_readahead() should be called when a cache miss happened:
873 * it will submit the read. The readahead logic may decide to piggyback more
874 * pages onto the read request if access patterns suggest it will improve
875 * performance.
876 */
877static inline
878void page_cache_sync_readahead(struct address_space *mapping,
879 struct file_ra_state *ra, struct file *file, pgoff_t index,
880 unsigned long req_count)
881{
fcd9ae4f
MWO
882 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
883 page_cache_sync_ra(&ractl, req_count);
fefa7c47
MWO
884}
885
886/**
887 * page_cache_async_readahead - file readahead for marked pages
888 * @mapping: address_space which holds the pagecache and I/O vectors
889 * @ra: file_ra_state which holds the readahead state
890 * @file: Used by the filesystem for authentication.
891 * @page: The page at @index which triggered the readahead call.
892 * @index: Index of first page to be read.
893 * @req_count: Total number of pages being read by the caller.
894 *
895 * page_cache_async_readahead() should be called when a page is used which
896 * is marked as PageReadahead; this is a marker to suggest that the application
897 * has used up enough of the readahead window that we should start pulling in
898 * more pages.
899 */
900static inline
901void page_cache_async_readahead(struct address_space *mapping,
902 struct file_ra_state *ra, struct file *file,
903 struct page *page, pgoff_t index, unsigned long req_count)
904{
fcd9ae4f
MWO
905 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
906 page_cache_async_ra(&ractl, page, req_count);
fefa7c47
MWO
907}
908
042124cc
MWO
909/**
910 * readahead_page - Get the next page to read.
911 * @rac: The current readahead request.
912 *
913 * Context: The page is locked and has an elevated refcount. The caller
914 * should decreases the refcount once the page has been submitted for I/O
915 * and unlock the page once all I/O to that page has completed.
916 * Return: A pointer to the next page, or %NULL if we are done.
917 */
918static inline struct page *readahead_page(struct readahead_control *rac)
919{
920 struct page *page;
921
922 BUG_ON(rac->_batch_count > rac->_nr_pages);
923 rac->_nr_pages -= rac->_batch_count;
924 rac->_index += rac->_batch_count;
925
926 if (!rac->_nr_pages) {
927 rac->_batch_count = 0;
928 return NULL;
929 }
930
931 page = xa_load(&rac->mapping->i_pages, rac->_index);
932 VM_BUG_ON_PAGE(!PageLocked(page), page);
6c357848 933 rac->_batch_count = thp_nr_pages(page);
042124cc
MWO
934
935 return page;
936}
937
938static inline unsigned int __readahead_batch(struct readahead_control *rac,
939 struct page **array, unsigned int array_sz)
940{
941 unsigned int i = 0;
942 XA_STATE(xas, &rac->mapping->i_pages, 0);
943 struct page *page;
944
945 BUG_ON(rac->_batch_count > rac->_nr_pages);
946 rac->_nr_pages -= rac->_batch_count;
947 rac->_index += rac->_batch_count;
948 rac->_batch_count = 0;
949
950 xas_set(&xas, rac->_index);
951 rcu_read_lock();
952 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
4349a83a
MWO
953 if (xas_retry(&xas, page))
954 continue;
042124cc
MWO
955 VM_BUG_ON_PAGE(!PageLocked(page), page);
956 VM_BUG_ON_PAGE(PageTail(page), page);
957 array[i++] = page;
6c357848 958 rac->_batch_count += thp_nr_pages(page);
042124cc
MWO
959
960 /*
961 * The page cache isn't using multi-index entries yet,
962 * so the xas cursor needs to be manually moved to the
963 * next index. This can be removed once the page cache
964 * is converted.
965 */
966 if (PageHead(page))
967 xas_set(&xas, rac->_index + rac->_batch_count);
968
969 if (i == array_sz)
970 break;
971 }
972 rcu_read_unlock();
973
974 return i;
975}
976
977/**
978 * readahead_page_batch - Get a batch of pages to read.
979 * @rac: The current readahead request.
980 * @array: An array of pointers to struct page.
981 *
982 * Context: The pages are locked and have an elevated refcount. The caller
983 * should decreases the refcount once the page has been submitted for I/O
984 * and unlock the page once all I/O to that page has completed.
985 * Return: The number of pages placed in the array. 0 indicates the request
986 * is complete.
987 */
988#define readahead_page_batch(rac, array) \
989 __readahead_batch(rac, array, ARRAY_SIZE(array))
990
991/**
992 * readahead_pos - The byte offset into the file of this readahead request.
993 * @rac: The readahead request.
994 */
995static inline loff_t readahead_pos(struct readahead_control *rac)
996{
997 return (loff_t)rac->_index * PAGE_SIZE;
998}
999
1000/**
1001 * readahead_length - The number of bytes in this readahead request.
1002 * @rac: The readahead request.
1003 */
076171a6 1004static inline size_t readahead_length(struct readahead_control *rac)
042124cc 1005{
076171a6 1006 return rac->_nr_pages * PAGE_SIZE;
042124cc
MWO
1007}
1008
1009/**
1010 * readahead_index - The index of the first page in this readahead request.
1011 * @rac: The readahead request.
1012 */
1013static inline pgoff_t readahead_index(struct readahead_control *rac)
1014{
1015 return rac->_index;
1016}
1017
1018/**
1019 * readahead_count - The number of pages in this readahead request.
1020 * @rac: The readahead request.
1021 */
1022static inline unsigned int readahead_count(struct readahead_control *rac)
1023{
1024 return rac->_nr_pages;
1025}
1026
32c0a6bc
MWO
1027/**
1028 * readahead_batch_length - The number of bytes in the current batch.
1029 * @rac: The readahead request.
1030 */
076171a6 1031static inline size_t readahead_batch_length(struct readahead_control *rac)
32c0a6bc
MWO
1032{
1033 return rac->_batch_count * PAGE_SIZE;
1034}
1035
b57c2cb9
FF
1036static inline unsigned long dir_pages(struct inode *inode)
1037{
09cbfeaf
KS
1038 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1039 PAGE_SHIFT;
b57c2cb9
FF
1040}
1041
243145bc
AG
1042/**
1043 * page_mkwrite_check_truncate - check if page was truncated
1044 * @page: the page to check
1045 * @inode: the inode to check the page against
1046 *
1047 * Returns the number of bytes in the page up to EOF,
1048 * or -EFAULT if the page was truncated.
1049 */
1050static inline int page_mkwrite_check_truncate(struct page *page,
1051 struct inode *inode)
1052{
1053 loff_t size = i_size_read(inode);
1054 pgoff_t index = size >> PAGE_SHIFT;
1055 int offset = offset_in_page(size);
1056
1057 if (page->mapping != inode->i_mapping)
1058 return -EFAULT;
1059
1060 /* page is wholly inside EOF */
1061 if (page->index < index)
1062 return PAGE_SIZE;
1063 /* page is wholly past EOF */
1064 if (page->index > index || !offset)
1065 return -EFAULT;
1066 /* page is partially inside EOF */
1067 return offset;
1068}
1069
24addd84
MWO
1070/**
1071 * i_blocks_per_page - How many blocks fit in this page.
1072 * @inode: The inode which contains the blocks.
1073 * @page: The page (head page if the page is a THP).
1074 *
1075 * If the block size is larger than the size of this page, return zero.
1076 *
1077 * Context: The caller should hold a refcount on the page to prevent it
1078 * from being split.
1079 * Return: The number of filesystem blocks covered by this page.
1080 */
1081static inline
1082unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1083{
1084 return thp_size(page) >> inode->i_blkbits;
1085}
1da177e4 1086#endif /* _LINUX_PAGEMAP_H */