afs: Provide a splice-read wrapper
[linux-block.git] / mm / folio-compat.c
CommitLineData
2f52578f
MWO
1/*
2 * Compatibility functions which bloat the callers too much to make inline.
3 * All of the callers of these functions should be converted to use folios
4 * eventually.
5 */
6
3417013e 7#include <linux/migrate.h>
2f52578f 8#include <linux/pagemap.h>
4d510f3d 9#include <linux/rmap.h>
76580b65 10#include <linux/swap.h>
d1d8a3b4 11#include "internal.h"
2f52578f
MWO
12
13struct address_space *page_mapping(struct page *page)
14{
15 return folio_mapping(page_folio(page));
16}
17EXPORT_SYMBOL(page_mapping);
4e136428
MWO
18
19void unlock_page(struct page *page)
20{
21 return folio_unlock(page_folio(page));
22}
23EXPORT_SYMBOL(unlock_page);
4268b480
MWO
24
25void end_page_writeback(struct page *page)
26{
27 return folio_end_writeback(page_folio(page));
28}
29EXPORT_SYMBOL(end_page_writeback);
490e016f
MWO
30
31void wait_on_page_writeback(struct page *page)
32{
33 return folio_wait_writeback(page_folio(page));
34}
35EXPORT_SYMBOL_GPL(wait_on_page_writeback);
a49d0c50
MWO
36
37void wait_for_stable_page(struct page *page)
38{
39 return folio_wait_stable(page_folio(page));
40}
41EXPORT_SYMBOL_GPL(wait_for_stable_page);
dd10ab04 42
76580b65
MWO
43void mark_page_accessed(struct page *page)
44{
45 folio_mark_accessed(page_folio(page));
46}
47EXPORT_SYMBOL(mark_page_accessed);
3417013e 48
f143f1ea
MWO
49bool set_page_writeback(struct page *page)
50{
51 return folio_start_writeback(page_folio(page));
52}
53EXPORT_SYMBOL(set_page_writeback);
b5e84594
MWO
54
55bool set_page_dirty(struct page *page)
56{
57 return folio_mark_dirty(page_folio(page));
58}
59EXPORT_SYMBOL(set_page_dirty);
85d4d2eb
MWO
60
61int __set_page_dirty_nobuffers(struct page *page)
62{
63 return filemap_dirty_folio(page_mapping(page), page_folio(page));
64}
65EXPORT_SYMBOL(__set_page_dirty_nobuffers);
9350f20a
MWO
66
67bool clear_page_dirty_for_io(struct page *page)
68{
69 return folio_clear_dirty_for_io(page_folio(page));
70}
71EXPORT_SYMBOL(clear_page_dirty_for_io);
cd78ab11
MWO
72
73bool redirty_page_for_writepage(struct writeback_control *wbc,
74 struct page *page)
75{
76 return folio_redirty_for_writepage(wbc, page_folio(page));
77}
78EXPORT_SYMBOL(redirty_page_for_writepage);
0d31125d 79
681ecf63
MWO
80void lru_cache_add_inactive_or_unevictable(struct page *page,
81 struct vm_area_struct *vma)
82{
83 folio_add_lru_vma(page_folio(page), vma);
84}
85
9dd3d069
MWO
86int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
87 pgoff_t index, gfp_t gfp)
88{
89 return filemap_add_folio(mapping, page_folio(page), index, gfp);
90}
91EXPORT_SYMBOL(add_to_page_cache_lru);
3f0c6a07 92
b27652d9 93noinline
3f0c6a07
MWO
94struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
95 int fgp_flags, gfp_t gfp)
96{
97 struct folio *folio;
98
99 folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
66dabbb6 100 if (IS_ERR(folio))
48c9d113 101 return NULL;
3f0c6a07
MWO
102 return folio_file_page(folio, index);
103}
104EXPORT_SYMBOL(pagecache_get_page);
b27652d9
MWO
105
106struct page *grab_cache_page_write_begin(struct address_space *mapping,
b7446e7c 107 pgoff_t index)
b27652d9 108{
e999a5c5 109 return pagecache_get_page(mapping, index, FGP_WRITEBEGIN,
b27652d9
MWO
110 mapping_gfp_mask(mapping));
111}
112EXPORT_SYMBOL(grab_cache_page_write_begin);
452e9e69 113
f7f9c00d 114bool isolate_lru_page(struct page *page)
d1d8a3b4
MWO
115{
116 if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
f7f9c00d
BW
117 return false;
118 return folio_isolate_lru((struct folio *)page);
d1d8a3b4 119}
ca6d60f3
MWO
120
121void putback_lru_page(struct page *page)
122{
123 folio_putback_lru(page_folio(page));
124}
4d510f3d
MWO
125
126#ifdef CONFIG_MMU
127void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
128 unsigned long address)
129{
130 VM_BUG_ON_PAGE(PageTail(page), page);
131
132 return folio_add_new_anon_rmap((struct folio *)page, vma, address);
133}
134#endif