Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 | 18 | |
aa65c29c JK |
19 | struct pagevec; |
20 | ||
7716506a MWO |
21 | static inline bool mapping_empty(struct address_space *mapping) |
22 | { | |
23 | return xa_empty(&mapping->i_pages); | |
24 | } | |
25 | ||
1da177e4 | 26 | /* |
9c5d760b | 27 | * Bits in mapping->flags. |
1da177e4 | 28 | */ |
9a896c9a | 29 | enum mapping_flags { |
9c5d760b MH |
30 | AS_EIO = 0, /* IO error on async write */ |
31 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
32 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
33 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
34 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 35 | /* writeback related tags are not used */ |
9c5d760b | 36 | AS_NO_WRITEBACK_TAGS = 5, |
01c70267 | 37 | AS_THP_SUPPORT = 6, /* THPs supported */ |
9a896c9a | 38 | }; |
1da177e4 | 39 | |
8ed1e46a JL |
40 | /** |
41 | * mapping_set_error - record a writeback error in the address_space | |
767e5ee5 MWO |
42 | * @mapping: the mapping in which an error should be set |
43 | * @error: the error to set in the mapping | |
8ed1e46a JL |
44 | * |
45 | * When writeback fails in some way, we must record that error so that | |
46 | * userspace can be informed when fsync and the like are called. We endeavor | |
47 | * to report errors on any file that was open at the time of the error. Some | |
48 | * internal callers also need to know when writeback errors have occurred. | |
49 | * | |
50 | * When a writeback error occurs, most filesystems will want to call | |
51 | * mapping_set_error to record the error in the mapping so that it can be | |
52 | * reported when the application calls fsync(2). | |
53 | */ | |
3e9f45bd GC |
54 | static inline void mapping_set_error(struct address_space *mapping, int error) |
55 | { | |
8ed1e46a JL |
56 | if (likely(!error)) |
57 | return; | |
58 | ||
59 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
735e4ae5 JL |
60 | __filemap_set_wb_err(mapping, error); |
61 | ||
62 | /* Record it in superblock */ | |
8b7b2eb1 MK |
63 | if (mapping->host) |
64 | errseq_set(&mapping->host->i_sb->s_wb_err, error); | |
8ed1e46a JL |
65 | |
66 | /* Record it in flags for now, for legacy callers */ | |
67 | if (error == -ENOSPC) | |
68 | set_bit(AS_ENOSPC, &mapping->flags); | |
69 | else | |
70 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
71 | } |
72 | ||
ba9ddf49 LS |
73 | static inline void mapping_set_unevictable(struct address_space *mapping) |
74 | { | |
75 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
76 | } | |
77 | ||
89e004ea LS |
78 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
79 | { | |
80 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
81 | } | |
82 | ||
1eb6234e | 83 | static inline bool mapping_unevictable(struct address_space *mapping) |
ba9ddf49 | 84 | { |
1eb6234e | 85 | return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); |
ba9ddf49 | 86 | } |
ba9ddf49 | 87 | |
91b0abe3 JW |
88 | static inline void mapping_set_exiting(struct address_space *mapping) |
89 | { | |
90 | set_bit(AS_EXITING, &mapping->flags); | |
91 | } | |
92 | ||
93 | static inline int mapping_exiting(struct address_space *mapping) | |
94 | { | |
95 | return test_bit(AS_EXITING, &mapping->flags); | |
96 | } | |
97 | ||
371a096e HY |
98 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
99 | { | |
100 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
101 | } | |
102 | ||
103 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
104 | { | |
105 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
106 | } | |
107 | ||
dd0fc66f | 108 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 109 | { |
9c5d760b | 110 | return mapping->gfp_mask; |
1da177e4 LT |
111 | } |
112 | ||
c62d2555 MH |
113 | /* Restricts the given gfp_mask to what the mapping allows. */ |
114 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
115 | gfp_t gfp_mask) | |
116 | { | |
117 | return mapping_gfp_mask(mapping) & gfp_mask; | |
118 | } | |
119 | ||
1da177e4 LT |
120 | /* |
121 | * This is non-atomic. Only to be used before the mapping is activated. | |
122 | * Probably needs a barrier... | |
123 | */ | |
260b2367 | 124 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 125 | { |
9c5d760b | 126 | m->gfp_mask = mask; |
1da177e4 LT |
127 | } |
128 | ||
01c70267 MWO |
129 | static inline bool mapping_thp_support(struct address_space *mapping) |
130 | { | |
131 | return test_bit(AS_THP_SUPPORT, &mapping->flags); | |
132 | } | |
133 | ||
6f4d2f97 MWO |
134 | static inline int filemap_nr_thps(struct address_space *mapping) |
135 | { | |
136 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
137 | return atomic_read(&mapping->nr_thps); | |
138 | #else | |
139 | return 0; | |
140 | #endif | |
141 | } | |
142 | ||
143 | static inline void filemap_nr_thps_inc(struct address_space *mapping) | |
144 | { | |
145 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
146 | if (!mapping_thp_support(mapping)) | |
147 | atomic_inc(&mapping->nr_thps); | |
148 | #else | |
149 | WARN_ON_ONCE(1); | |
150 | #endif | |
151 | } | |
152 | ||
153 | static inline void filemap_nr_thps_dec(struct address_space *mapping) | |
154 | { | |
155 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
156 | if (!mapping_thp_support(mapping)) | |
157 | atomic_dec(&mapping->nr_thps); | |
158 | #else | |
159 | WARN_ON_ONCE(1); | |
160 | #endif | |
161 | } | |
162 | ||
c6f92f9f | 163 | void release_pages(struct page **pages, int nr); |
1da177e4 | 164 | |
842ca547 MWO |
165 | /* |
166 | * For file cache pages, return the address_space, otherwise return NULL | |
167 | */ | |
168 | static inline struct address_space *page_mapping_file(struct page *page) | |
169 | { | |
170 | if (unlikely(PageSwapCache(page))) | |
171 | return NULL; | |
172 | return page_mapping(page); | |
173 | } | |
174 | ||
020853b6 | 175 | static inline bool page_cache_add_speculative(struct page *page, int count) |
e286781d | 176 | { |
309381fe | 177 | VM_BUG_ON_PAGE(PageTail(page), page); |
020853b6 | 178 | return folio_ref_try_add_rcu((struct folio *)page, count); |
494eec70 | 179 | } |
ce0ad7f0 | 180 | |
020853b6 | 181 | static inline bool page_cache_get_speculative(struct page *page) |
494eec70 | 182 | { |
020853b6 | 183 | return page_cache_add_speculative(page, 1); |
ce0ad7f0 NP |
184 | } |
185 | ||
b03143ac | 186 | /** |
85d0a2ed MWO |
187 | * folio_attach_private - Attach private data to a folio. |
188 | * @folio: Folio to attach data to. | |
189 | * @data: Data to attach to folio. | |
b03143ac | 190 | * |
85d0a2ed MWO |
191 | * Attaching private data to a folio increments the page's reference count. |
192 | * The data must be detached before the folio will be freed. | |
b03143ac | 193 | */ |
85d0a2ed | 194 | static inline void folio_attach_private(struct folio *folio, void *data) |
b03143ac | 195 | { |
85d0a2ed MWO |
196 | folio_get(folio); |
197 | folio->private = data; | |
198 | folio_set_private(folio); | |
b03143ac GJ |
199 | } |
200 | ||
201 | /** | |
85d0a2ed MWO |
202 | * folio_detach_private - Detach private data from a folio. |
203 | * @folio: Folio to detach data from. | |
b03143ac | 204 | * |
85d0a2ed | 205 | * Removes the data that was previously attached to the folio and decrements |
b03143ac GJ |
206 | * the refcount on the page. |
207 | * | |
85d0a2ed | 208 | * Return: Data that was attached to the folio. |
b03143ac | 209 | */ |
85d0a2ed | 210 | static inline void *folio_detach_private(struct folio *folio) |
b03143ac | 211 | { |
85d0a2ed | 212 | void *data = folio_get_private(folio); |
b03143ac | 213 | |
85d0a2ed | 214 | if (!folio_test_private(folio)) |
b03143ac | 215 | return NULL; |
85d0a2ed MWO |
216 | folio_clear_private(folio); |
217 | folio->private = NULL; | |
218 | folio_put(folio); | |
b03143ac GJ |
219 | |
220 | return data; | |
221 | } | |
222 | ||
85d0a2ed MWO |
223 | static inline void attach_page_private(struct page *page, void *data) |
224 | { | |
225 | folio_attach_private(page_folio(page), data); | |
226 | } | |
227 | ||
228 | static inline void *detach_page_private(struct page *page) | |
229 | { | |
230 | return folio_detach_private(page_folio(page)); | |
231 | } | |
232 | ||
44110fe3 | 233 | #ifdef CONFIG_NUMA |
2ae88149 | 234 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 235 | #else |
2ae88149 NP |
236 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
237 | { | |
238 | return alloc_pages(gfp, 0); | |
239 | } | |
240 | #endif | |
241 | ||
1da177e4 LT |
242 | static inline struct page *page_cache_alloc(struct address_space *x) |
243 | { | |
2ae88149 | 244 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
245 | } |
246 | ||
8a5c743e | 247 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 248 | { |
453f85d4 | 249 | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; |
7b1de586 WF |
250 | } |
251 | ||
1da177e4 LT |
252 | typedef int filler_t(void *, struct page *); |
253 | ||
0d3f9296 | 254 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
e7b563bb | 255 | pgoff_t index, unsigned long max_scan); |
0d3f9296 | 256 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
e7b563bb JW |
257 | pgoff_t index, unsigned long max_scan); |
258 | ||
2457aec6 MG |
259 | #define FGP_ACCESSED 0x00000001 |
260 | #define FGP_LOCK 0x00000002 | |
261 | #define FGP_CREAT 0x00000004 | |
262 | #define FGP_WRITE 0x00000008 | |
263 | #define FGP_NOFS 0x00000010 | |
264 | #define FGP_NOWAIT 0x00000020 | |
a75d4c33 | 265 | #define FGP_FOR_MMAP 0x00000040 |
a8cf7f27 | 266 | #define FGP_HEAD 0x00000080 |
44835d20 | 267 | #define FGP_ENTRY 0x00000100 |
2457aec6 MG |
268 | |
269 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 270 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
271 | |
272 | /** | |
273 | * find_get_page - find and get a page reference | |
274 | * @mapping: the address_space to search | |
275 | * @offset: the page index | |
276 | * | |
277 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
278 | * page cache page, it is returned with an increased refcount. | |
279 | * | |
280 | * Otherwise, %NULL is returned. | |
281 | */ | |
282 | static inline struct page *find_get_page(struct address_space *mapping, | |
283 | pgoff_t offset) | |
284 | { | |
45f87de5 | 285 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
286 | } |
287 | ||
288 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
289 | pgoff_t offset, int fgp_flags) | |
290 | { | |
45f87de5 | 291 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
292 | } |
293 | ||
294 | /** | |
295 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 | 296 | * @mapping: the address_space to search |
89b42235 | 297 | * @index: the page index |
2457aec6 | 298 | * |
89b42235 | 299 | * Looks up the page cache entry at @mapping & @index. If there is a |
2457aec6 MG |
300 | * page cache page, it is returned locked and with an increased |
301 | * refcount. | |
302 | * | |
a8cf7f27 MWO |
303 | * Context: May sleep. |
304 | * Return: A struct page or %NULL if there is no page in the cache for this | |
305 | * index. | |
2457aec6 MG |
306 | */ |
307 | static inline struct page *find_lock_page(struct address_space *mapping, | |
a8cf7f27 MWO |
308 | pgoff_t index) |
309 | { | |
310 | return pagecache_get_page(mapping, index, FGP_LOCK, 0); | |
311 | } | |
312 | ||
313 | /** | |
314 | * find_lock_head - Locate, pin and lock a pagecache page. | |
315 | * @mapping: The address_space to search. | |
89b42235 | 316 | * @index: The page index. |
a8cf7f27 | 317 | * |
89b42235 | 318 | * Looks up the page cache entry at @mapping & @index. If there is a |
a8cf7f27 MWO |
319 | * page cache page, its head page is returned locked and with an increased |
320 | * refcount. | |
321 | * | |
322 | * Context: May sleep. | |
323 | * Return: A struct page which is !PageTail, or %NULL if there is no page | |
324 | * in the cache for this index. | |
325 | */ | |
326 | static inline struct page *find_lock_head(struct address_space *mapping, | |
327 | pgoff_t index) | |
2457aec6 | 328 | { |
a8cf7f27 | 329 | return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0); |
2457aec6 MG |
330 | } |
331 | ||
332 | /** | |
333 | * find_or_create_page - locate or add a pagecache page | |
334 | * @mapping: the page's address_space | |
335 | * @index: the page's index into the mapping | |
336 | * @gfp_mask: page allocation mode | |
337 | * | |
338 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
339 | * page cache page, it is returned locked and with an increased | |
340 | * refcount. | |
341 | * | |
342 | * If the page is not present, a new page is allocated using @gfp_mask | |
343 | * and added to the page cache and the VM's LRU list. The page is | |
344 | * returned locked and with an increased refcount. | |
345 | * | |
346 | * On memory exhaustion, %NULL is returned. | |
347 | * | |
348 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
349 | * atomic allocation! | |
350 | */ | |
351 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
767e5ee5 | 352 | pgoff_t index, gfp_t gfp_mask) |
2457aec6 | 353 | { |
767e5ee5 | 354 | return pagecache_get_page(mapping, index, |
2457aec6 | 355 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, |
45f87de5 | 356 | gfp_mask); |
2457aec6 MG |
357 | } |
358 | ||
359 | /** | |
360 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
361 | * @mapping: target address_space | |
362 | * @index: the page index | |
363 | * | |
364 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
365 | * This is intended for speculative data generators, where the data can | |
366 | * be regenerated if the page couldn't be grabbed. This routine should | |
367 | * be safe to call while holding the lock for another page. | |
368 | * | |
369 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
370 | * and deadlock against the caller's locked page. | |
371 | */ | |
372 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
373 | pgoff_t index) | |
374 | { | |
375 | return pagecache_get_page(mapping, index, | |
376 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 377 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
378 | } |
379 | ||
63ec1973 MWO |
380 | /* Does this page contain this index? */ |
381 | static inline bool thp_contains(struct page *head, pgoff_t index) | |
382 | { | |
383 | /* HugeTLBfs indexes the page cache in units of hpage_size */ | |
384 | if (PageHuge(head)) | |
385 | return head->index == index; | |
386 | return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); | |
387 | } | |
388 | ||
ec848215 MWO |
389 | /* |
390 | * Given the page we found in the page cache, return the page corresponding | |
391 | * to this index in the file | |
392 | */ | |
393 | static inline struct page *find_subpage(struct page *head, pgoff_t index) | |
4101196b | 394 | { |
ec848215 MWO |
395 | /* HugeTLBfs wants the head page regardless */ |
396 | if (PageHuge(head)) | |
397 | return head; | |
4101196b | 398 | |
6c357848 | 399 | return head + (index & (thp_nr_pages(head) - 1)); |
4101196b MWO |
400 | } |
401 | ||
0cd6144a | 402 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
cf2039af | 403 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); |
b947cee4 JK |
404 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
405 | pgoff_t end, unsigned int nr_pages, | |
406 | struct page **pages); | |
407 | static inline unsigned find_get_pages(struct address_space *mapping, | |
408 | pgoff_t *start, unsigned int nr_pages, | |
409 | struct page **pages) | |
410 | { | |
411 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
412 | pages); | |
413 | } | |
ebf43500 JA |
414 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
415 | unsigned int nr_pages, struct page **pages); | |
72b045ae | 416 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
a6906972 | 417 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
418 | struct page **pages); |
419 | static inline unsigned find_get_pages_tag(struct address_space *mapping, | |
a6906972 | 420 | pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
421 | struct page **pages) |
422 | { | |
423 | return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, | |
424 | nr_pages, pages); | |
425 | } | |
1da177e4 | 426 | |
54566b2c NP |
427 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
428 | pgoff_t index, unsigned flags); | |
afddba49 | 429 | |
1da177e4 LT |
430 | /* |
431 | * Returns locked page at given index in given cache, creating it if needed. | |
432 | */ | |
57f6b96c FW |
433 | static inline struct page *grab_cache_page(struct address_space *mapping, |
434 | pgoff_t index) | |
1da177e4 LT |
435 | { |
436 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
437 | } | |
438 | ||
1da177e4 | 439 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 440 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
441 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
442 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
443 | extern int read_cache_pages(struct address_space *mapping, |
444 | struct list_head *pages, filler_t *filler, void *data); | |
445 | ||
090d2b18 | 446 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 447 | pgoff_t index, void *data) |
090d2b18 | 448 | { |
6c45b454 | 449 | return read_cache_page(mapping, index, NULL, data); |
090d2b18 PE |
450 | } |
451 | ||
a0f7a756 | 452 | /* |
fe19bd3d | 453 | * Get index of the page within radix-tree (but not for hugetlb pages). |
5cbc198a | 454 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) |
a0f7a756 | 455 | */ |
5cbc198a | 456 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 457 | { |
fe3df441 | 458 | struct page *head; |
e9b61f19 | 459 | |
e9b61f19 | 460 | if (likely(!PageTransTail(page))) |
09cbfeaf | 461 | return page->index; |
e9b61f19 | 462 | |
fe3df441 | 463 | head = compound_head(page); |
e9b61f19 KS |
464 | /* |
465 | * We don't initialize ->index for tail pages: calculate based on | |
466 | * head page | |
467 | */ | |
fe3df441 | 468 | return head->index + page - head; |
a0f7a756 NH |
469 | } |
470 | ||
fe19bd3d HD |
471 | extern pgoff_t hugetlb_basepage_index(struct page *page); |
472 | ||
5cbc198a | 473 | /* |
fe19bd3d HD |
474 | * Get the offset in PAGE_SIZE (even for hugetlb pages). |
475 | * (TODO: hugetlb pages should have ->index in PAGE_SIZE) | |
5cbc198a KS |
476 | */ |
477 | static inline pgoff_t page_to_pgoff(struct page *page) | |
478 | { | |
fe19bd3d HD |
479 | if (unlikely(PageHuge(page))) |
480 | return hugetlb_basepage_index(page); | |
5cbc198a KS |
481 | return page_to_index(page); |
482 | } | |
483 | ||
1da177e4 LT |
484 | /* |
485 | * Return byte-offset into filesystem object for page. | |
486 | */ | |
487 | static inline loff_t page_offset(struct page *page) | |
488 | { | |
09cbfeaf | 489 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
490 | } |
491 | ||
f981c595 MG |
492 | static inline loff_t page_file_offset(struct page *page) |
493 | { | |
8cd79788 | 494 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
495 | } |
496 | ||
0fe6e20b NH |
497 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
498 | unsigned long address); | |
499 | ||
1da177e4 LT |
500 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
501 | unsigned long address) | |
502 | { | |
0fe6e20b NH |
503 | pgoff_t pgoff; |
504 | if (unlikely(is_vm_hugetlb_page(vma))) | |
505 | return linear_hugepage_index(vma, address); | |
506 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 507 | pgoff += vma->vm_pgoff; |
09cbfeaf | 508 | return pgoff; |
1da177e4 LT |
509 | } |
510 | ||
c7510ab2 JA |
511 | struct wait_page_key { |
512 | struct page *page; | |
513 | int bit_nr; | |
514 | int page_match; | |
515 | }; | |
516 | ||
517 | struct wait_page_queue { | |
518 | struct page *page; | |
519 | int bit_nr; | |
520 | wait_queue_entry_t wait; | |
521 | }; | |
522 | ||
cdc8fcb4 | 523 | static inline bool wake_page_match(struct wait_page_queue *wait_page, |
c7510ab2 JA |
524 | struct wait_page_key *key) |
525 | { | |
526 | if (wait_page->page != key->page) | |
cdc8fcb4 | 527 | return false; |
c7510ab2 JA |
528 | key->page_match = 1; |
529 | ||
530 | if (wait_page->bit_nr != key->bit_nr) | |
cdc8fcb4 | 531 | return false; |
d1932dc3 | 532 | |
cdc8fcb4 | 533 | return true; |
d1932dc3 JA |
534 | } |
535 | ||
b3c97528 HH |
536 | extern void __lock_page(struct page *page); |
537 | extern int __lock_page_killable(struct page *page); | |
dd3e6d50 | 538 | extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); |
d065bd81 ML |
539 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
540 | unsigned int flags); | |
b3c97528 | 541 | extern void unlock_page(struct page *page); |
1da177e4 | 542 | |
f4458845 AM |
543 | /* |
544 | * Return true if the page was successfully locked | |
545 | */ | |
529ae9aa NP |
546 | static inline int trylock_page(struct page *page) |
547 | { | |
48c935ad | 548 | page = compound_head(page); |
8413ac9d | 549 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
550 | } |
551 | ||
db37648c NP |
552 | /* |
553 | * lock_page may only be called if we have the page's inode pinned. | |
554 | */ | |
1da177e4 LT |
555 | static inline void lock_page(struct page *page) |
556 | { | |
557 | might_sleep(); | |
529ae9aa | 558 | if (!trylock_page(page)) |
1da177e4 LT |
559 | __lock_page(page); |
560 | } | |
db37648c | 561 | |
2687a356 MW |
562 | /* |
563 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
564 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
565 | * killed while waiting. | |
566 | */ | |
567 | static inline int lock_page_killable(struct page *page) | |
568 | { | |
569 | might_sleep(); | |
529ae9aa | 570 | if (!trylock_page(page)) |
2687a356 MW |
571 | return __lock_page_killable(page); |
572 | return 0; | |
573 | } | |
574 | ||
dd3e6d50 JA |
575 | /* |
576 | * lock_page_async - Lock the page, unless this would block. If the page | |
577 | * is already locked, then queue a callback when the page becomes unlocked. | |
578 | * This callback can then retry the operation. | |
579 | * | |
580 | * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page | |
581 | * was already locked and the callback defined in 'wait' was queued. | |
582 | */ | |
583 | static inline int lock_page_async(struct page *page, | |
584 | struct wait_page_queue *wait) | |
585 | { | |
586 | if (!trylock_page(page)) | |
587 | return __lock_page_async(page, wait); | |
588 | return 0; | |
589 | } | |
590 | ||
d065bd81 ML |
591 | /* |
592 | * lock_page_or_retry - Lock the page, unless this would block and the | |
593 | * caller indicated that it can handle a retry. | |
9a95f3cf | 594 | * |
c1e8d7c6 | 595 | * Return value and mmap_lock implications depend on flags; see |
9a95f3cf | 596 | * __lock_page_or_retry(). |
d065bd81 ML |
597 | */ |
598 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
599 | unsigned int flags) | |
600 | { | |
601 | might_sleep(); | |
602 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
603 | } | |
604 | ||
1da177e4 | 605 | /* |
74d81bfa NP |
606 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
607 | * and should not be used directly. | |
1da177e4 | 608 | */ |
b3c97528 | 609 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 610 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 611 | |
1da177e4 LT |
612 | /* |
613 | * Wait for a page to be unlocked. | |
614 | * | |
615 | * This must be called with the caller "holding" the page, | |
616 | * ie with increased "page->count" so that the page won't | |
617 | * go away during the wait.. | |
618 | */ | |
619 | static inline void wait_on_page_locked(struct page *page) | |
620 | { | |
621 | if (PageLocked(page)) | |
48c935ad | 622 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
623 | } |
624 | ||
62906027 NP |
625 | static inline int wait_on_page_locked_killable(struct page *page) |
626 | { | |
627 | if (!PageLocked(page)) | |
628 | return 0; | |
629 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
630 | } | |
631 | ||
48054625 | 632 | int put_and_wait_on_page_locked(struct page *page, int state); |
19343b5b | 633 | void wait_on_page_writeback(struct page *page); |
e5dbd332 | 634 | int wait_on_page_writeback_killable(struct page *page); |
1da177e4 | 635 | extern void end_page_writeback(struct page *page); |
1d1d1a76 | 636 | void wait_for_stable_page(struct page *page); |
1da177e4 | 637 | |
3a6b2162 MWO |
638 | void __set_page_dirty(struct page *, struct address_space *, int warn); |
639 | int __set_page_dirty_nobuffers(struct page *page); | |
640 | int __set_page_dirty_no_writeback(struct page *page); | |
641 | ||
c11f0c0b | 642 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 643 | |
73e10ded DH |
644 | /** |
645 | * set_page_private_2 - Set PG_private_2 on a page and take a ref | |
646 | * @page: The page. | |
647 | * | |
648 | * Set the PG_private_2 flag on a page and take the reference needed for the VM | |
649 | * to handle its lifetime correctly. This sets the flag and takes the | |
650 | * reference unconditionally, so care must be taken not to set the flag again | |
651 | * if it's already set. | |
652 | */ | |
653 | static inline void set_page_private_2(struct page *page) | |
654 | { | |
655 | page = compound_head(page); | |
656 | get_page(page); | |
657 | SetPagePrivate2(page); | |
658 | } | |
659 | ||
660 | void end_page_private_2(struct page *page); | |
661 | void wait_on_page_private_2(struct page *page); | |
662 | int wait_on_page_private_2_killable(struct page *page); | |
663 | ||
385e1ca5 DH |
664 | /* |
665 | * Add an arbitrary waiter to a page's wait queue | |
666 | */ | |
ac6424b9 | 667 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 668 | |
1da177e4 | 669 | /* |
4bce9f6e | 670 | * Fault everything in given userspace address range in. |
1da177e4 | 671 | */ |
e15710bf | 672 | static inline int fault_in_pages_writeable(char __user *uaddr, size_t size) |
f56f821f | 673 | { |
9923777d | 674 | char __user *end = uaddr + size - 1; |
f56f821f DV |
675 | |
676 | if (unlikely(size == 0)) | |
e23d4159 | 677 | return 0; |
f56f821f | 678 | |
e23d4159 AV |
679 | if (unlikely(uaddr > end)) |
680 | return -EFAULT; | |
f56f821f DV |
681 | /* |
682 | * Writing zeroes into userspace here is OK, because we know that if | |
683 | * the zero gets there, we'll be overwriting it. | |
684 | */ | |
e23d4159 AV |
685 | do { |
686 | if (unlikely(__put_user(0, uaddr) != 0)) | |
687 | return -EFAULT; | |
f56f821f | 688 | uaddr += PAGE_SIZE; |
e23d4159 | 689 | } while (uaddr <= end); |
f56f821f DV |
690 | |
691 | /* Check whether the range spilled into the next page. */ | |
692 | if (((unsigned long)uaddr & PAGE_MASK) == | |
693 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 694 | return __put_user(0, end); |
f56f821f | 695 | |
e23d4159 | 696 | return 0; |
f56f821f DV |
697 | } |
698 | ||
e15710bf | 699 | static inline int fault_in_pages_readable(const char __user *uaddr, size_t size) |
f56f821f DV |
700 | { |
701 | volatile char c; | |
f56f821f DV |
702 | const char __user *end = uaddr + size - 1; |
703 | ||
704 | if (unlikely(size == 0)) | |
e23d4159 | 705 | return 0; |
f56f821f | 706 | |
e23d4159 AV |
707 | if (unlikely(uaddr > end)) |
708 | return -EFAULT; | |
709 | ||
710 | do { | |
711 | if (unlikely(__get_user(c, uaddr) != 0)) | |
712 | return -EFAULT; | |
f56f821f | 713 | uaddr += PAGE_SIZE; |
e23d4159 | 714 | } while (uaddr <= end); |
f56f821f DV |
715 | |
716 | /* Check whether the range spilled into the next page. */ | |
717 | if (((unsigned long)uaddr & PAGE_MASK) == | |
718 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 719 | return __get_user(c, end); |
f56f821f DV |
720 | } |
721 | ||
90b75db6 | 722 | (void)c; |
e23d4159 | 723 | return 0; |
f56f821f DV |
724 | } |
725 | ||
529ae9aa NP |
726 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
727 | pgoff_t index, gfp_t gfp_mask); | |
728 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
729 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 730 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 731 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
1f7ef657 | 732 | void replace_page_cache_page(struct page *old, struct page *new); |
aa65c29c JK |
733 | void delete_from_page_cache_batch(struct address_space *mapping, |
734 | struct pagevec *pvec); | |
41139aa4 MWO |
735 | loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, |
736 | int whence); | |
529ae9aa NP |
737 | |
738 | /* | |
739 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 740 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
741 | */ |
742 | static inline int add_to_page_cache(struct page *page, | |
743 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
744 | { | |
745 | int error; | |
746 | ||
48c935ad | 747 | __SetPageLocked(page); |
529ae9aa NP |
748 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
749 | if (unlikely(error)) | |
48c935ad | 750 | __ClearPageLocked(page); |
529ae9aa NP |
751 | return error; |
752 | } | |
753 | ||
042124cc MWO |
754 | /** |
755 | * struct readahead_control - Describes a readahead request. | |
756 | * | |
757 | * A readahead request is for consecutive pages. Filesystems which | |
758 | * implement the ->readahead method should call readahead_page() or | |
759 | * readahead_page_batch() in a loop and attempt to start I/O against | |
760 | * each page in the request. | |
761 | * | |
762 | * Most of the fields in this struct are private and should be accessed | |
763 | * by the functions below. | |
764 | * | |
765 | * @file: The file, used primarily by network filesystems for authentication. | |
766 | * May be NULL if invoked internally by the filesystem. | |
767 | * @mapping: Readahead this filesystem object. | |
fcd9ae4f | 768 | * @ra: File readahead state. May be NULL. |
042124cc MWO |
769 | */ |
770 | struct readahead_control { | |
771 | struct file *file; | |
772 | struct address_space *mapping; | |
fcd9ae4f | 773 | struct file_ra_state *ra; |
042124cc MWO |
774 | /* private: use the readahead_* accessors instead */ |
775 | pgoff_t _index; | |
776 | unsigned int _nr_pages; | |
777 | unsigned int _batch_count; | |
778 | }; | |
779 | ||
fcd9ae4f MWO |
780 | #define DEFINE_READAHEAD(ractl, f, r, m, i) \ |
781 | struct readahead_control ractl = { \ | |
1aa83cfa MWO |
782 | .file = f, \ |
783 | .mapping = m, \ | |
fcd9ae4f | 784 | .ra = r, \ |
1aa83cfa MWO |
785 | ._index = i, \ |
786 | } | |
787 | ||
fefa7c47 MWO |
788 | #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) |
789 | ||
790 | void page_cache_ra_unbounded(struct readahead_control *, | |
791 | unsigned long nr_to_read, unsigned long lookahead_count); | |
fcd9ae4f MWO |
792 | void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); |
793 | void page_cache_async_ra(struct readahead_control *, struct page *, | |
fefa7c47 | 794 | unsigned long req_count); |
3ca23644 DH |
795 | void readahead_expand(struct readahead_control *ractl, |
796 | loff_t new_start, size_t new_len); | |
fefa7c47 MWO |
797 | |
798 | /** | |
799 | * page_cache_sync_readahead - generic file readahead | |
800 | * @mapping: address_space which holds the pagecache and I/O vectors | |
801 | * @ra: file_ra_state which holds the readahead state | |
802 | * @file: Used by the filesystem for authentication. | |
803 | * @index: Index of first page to be read. | |
804 | * @req_count: Total number of pages being read by the caller. | |
805 | * | |
806 | * page_cache_sync_readahead() should be called when a cache miss happened: | |
807 | * it will submit the read. The readahead logic may decide to piggyback more | |
808 | * pages onto the read request if access patterns suggest it will improve | |
809 | * performance. | |
810 | */ | |
811 | static inline | |
812 | void page_cache_sync_readahead(struct address_space *mapping, | |
813 | struct file_ra_state *ra, struct file *file, pgoff_t index, | |
814 | unsigned long req_count) | |
815 | { | |
fcd9ae4f MWO |
816 | DEFINE_READAHEAD(ractl, file, ra, mapping, index); |
817 | page_cache_sync_ra(&ractl, req_count); | |
fefa7c47 MWO |
818 | } |
819 | ||
820 | /** | |
821 | * page_cache_async_readahead - file readahead for marked pages | |
822 | * @mapping: address_space which holds the pagecache and I/O vectors | |
823 | * @ra: file_ra_state which holds the readahead state | |
824 | * @file: Used by the filesystem for authentication. | |
825 | * @page: The page at @index which triggered the readahead call. | |
826 | * @index: Index of first page to be read. | |
827 | * @req_count: Total number of pages being read by the caller. | |
828 | * | |
829 | * page_cache_async_readahead() should be called when a page is used which | |
830 | * is marked as PageReadahead; this is a marker to suggest that the application | |
831 | * has used up enough of the readahead window that we should start pulling in | |
832 | * more pages. | |
833 | */ | |
834 | static inline | |
835 | void page_cache_async_readahead(struct address_space *mapping, | |
836 | struct file_ra_state *ra, struct file *file, | |
837 | struct page *page, pgoff_t index, unsigned long req_count) | |
838 | { | |
fcd9ae4f MWO |
839 | DEFINE_READAHEAD(ractl, file, ra, mapping, index); |
840 | page_cache_async_ra(&ractl, page, req_count); | |
fefa7c47 MWO |
841 | } |
842 | ||
042124cc MWO |
843 | /** |
844 | * readahead_page - Get the next page to read. | |
845 | * @rac: The current readahead request. | |
846 | * | |
847 | * Context: The page is locked and has an elevated refcount. The caller | |
848 | * should decreases the refcount once the page has been submitted for I/O | |
849 | * and unlock the page once all I/O to that page has completed. | |
850 | * Return: A pointer to the next page, or %NULL if we are done. | |
851 | */ | |
852 | static inline struct page *readahead_page(struct readahead_control *rac) | |
853 | { | |
854 | struct page *page; | |
855 | ||
856 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
857 | rac->_nr_pages -= rac->_batch_count; | |
858 | rac->_index += rac->_batch_count; | |
859 | ||
860 | if (!rac->_nr_pages) { | |
861 | rac->_batch_count = 0; | |
862 | return NULL; | |
863 | } | |
864 | ||
865 | page = xa_load(&rac->mapping->i_pages, rac->_index); | |
866 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
6c357848 | 867 | rac->_batch_count = thp_nr_pages(page); |
042124cc MWO |
868 | |
869 | return page; | |
870 | } | |
871 | ||
872 | static inline unsigned int __readahead_batch(struct readahead_control *rac, | |
873 | struct page **array, unsigned int array_sz) | |
874 | { | |
875 | unsigned int i = 0; | |
876 | XA_STATE(xas, &rac->mapping->i_pages, 0); | |
877 | struct page *page; | |
878 | ||
879 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
880 | rac->_nr_pages -= rac->_batch_count; | |
881 | rac->_index += rac->_batch_count; | |
882 | rac->_batch_count = 0; | |
883 | ||
884 | xas_set(&xas, rac->_index); | |
885 | rcu_read_lock(); | |
886 | xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { | |
4349a83a MWO |
887 | if (xas_retry(&xas, page)) |
888 | continue; | |
042124cc MWO |
889 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
890 | VM_BUG_ON_PAGE(PageTail(page), page); | |
891 | array[i++] = page; | |
6c357848 | 892 | rac->_batch_count += thp_nr_pages(page); |
042124cc MWO |
893 | |
894 | /* | |
895 | * The page cache isn't using multi-index entries yet, | |
896 | * so the xas cursor needs to be manually moved to the | |
897 | * next index. This can be removed once the page cache | |
898 | * is converted. | |
899 | */ | |
900 | if (PageHead(page)) | |
901 | xas_set(&xas, rac->_index + rac->_batch_count); | |
902 | ||
903 | if (i == array_sz) | |
904 | break; | |
905 | } | |
906 | rcu_read_unlock(); | |
907 | ||
908 | return i; | |
909 | } | |
910 | ||
911 | /** | |
912 | * readahead_page_batch - Get a batch of pages to read. | |
913 | * @rac: The current readahead request. | |
914 | * @array: An array of pointers to struct page. | |
915 | * | |
916 | * Context: The pages are locked and have an elevated refcount. The caller | |
917 | * should decreases the refcount once the page has been submitted for I/O | |
918 | * and unlock the page once all I/O to that page has completed. | |
919 | * Return: The number of pages placed in the array. 0 indicates the request | |
920 | * is complete. | |
921 | */ | |
922 | #define readahead_page_batch(rac, array) \ | |
923 | __readahead_batch(rac, array, ARRAY_SIZE(array)) | |
924 | ||
925 | /** | |
926 | * readahead_pos - The byte offset into the file of this readahead request. | |
927 | * @rac: The readahead request. | |
928 | */ | |
929 | static inline loff_t readahead_pos(struct readahead_control *rac) | |
930 | { | |
931 | return (loff_t)rac->_index * PAGE_SIZE; | |
932 | } | |
933 | ||
934 | /** | |
935 | * readahead_length - The number of bytes in this readahead request. | |
936 | * @rac: The readahead request. | |
937 | */ | |
076171a6 | 938 | static inline size_t readahead_length(struct readahead_control *rac) |
042124cc | 939 | { |
076171a6 | 940 | return rac->_nr_pages * PAGE_SIZE; |
042124cc MWO |
941 | } |
942 | ||
943 | /** | |
944 | * readahead_index - The index of the first page in this readahead request. | |
945 | * @rac: The readahead request. | |
946 | */ | |
947 | static inline pgoff_t readahead_index(struct readahead_control *rac) | |
948 | { | |
949 | return rac->_index; | |
950 | } | |
951 | ||
952 | /** | |
953 | * readahead_count - The number of pages in this readahead request. | |
954 | * @rac: The readahead request. | |
955 | */ | |
956 | static inline unsigned int readahead_count(struct readahead_control *rac) | |
957 | { | |
958 | return rac->_nr_pages; | |
959 | } | |
960 | ||
32c0a6bc MWO |
961 | /** |
962 | * readahead_batch_length - The number of bytes in the current batch. | |
963 | * @rac: The readahead request. | |
964 | */ | |
076171a6 | 965 | static inline size_t readahead_batch_length(struct readahead_control *rac) |
32c0a6bc MWO |
966 | { |
967 | return rac->_batch_count * PAGE_SIZE; | |
968 | } | |
969 | ||
b57c2cb9 FF |
970 | static inline unsigned long dir_pages(struct inode *inode) |
971 | { | |
09cbfeaf KS |
972 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
973 | PAGE_SHIFT; | |
b57c2cb9 FF |
974 | } |
975 | ||
243145bc AG |
976 | /** |
977 | * page_mkwrite_check_truncate - check if page was truncated | |
978 | * @page: the page to check | |
979 | * @inode: the inode to check the page against | |
980 | * | |
981 | * Returns the number of bytes in the page up to EOF, | |
982 | * or -EFAULT if the page was truncated. | |
983 | */ | |
984 | static inline int page_mkwrite_check_truncate(struct page *page, | |
985 | struct inode *inode) | |
986 | { | |
987 | loff_t size = i_size_read(inode); | |
988 | pgoff_t index = size >> PAGE_SHIFT; | |
989 | int offset = offset_in_page(size); | |
990 | ||
991 | if (page->mapping != inode->i_mapping) | |
992 | return -EFAULT; | |
993 | ||
994 | /* page is wholly inside EOF */ | |
995 | if (page->index < index) | |
996 | return PAGE_SIZE; | |
997 | /* page is wholly past EOF */ | |
998 | if (page->index > index || !offset) | |
999 | return -EFAULT; | |
1000 | /* page is partially inside EOF */ | |
1001 | return offset; | |
1002 | } | |
1003 | ||
24addd84 MWO |
1004 | /** |
1005 | * i_blocks_per_page - How many blocks fit in this page. | |
1006 | * @inode: The inode which contains the blocks. | |
1007 | * @page: The page (head page if the page is a THP). | |
1008 | * | |
1009 | * If the block size is larger than the size of this page, return zero. | |
1010 | * | |
1011 | * Context: The caller should hold a refcount on the page to prevent it | |
1012 | * from being split. | |
1013 | * Return: The number of filesystem blocks covered by this page. | |
1014 | */ | |
1015 | static inline | |
1016 | unsigned int i_blocks_per_page(struct inode *inode, struct page *page) | |
1017 | { | |
1018 | return thp_size(page) >> inode->i_blkbits; | |
1019 | } | |
1da177e4 | 1020 | #endif /* _LINUX_PAGEMAP_H */ |