Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 | 18 | |
aa65c29c JK |
19 | struct pagevec; |
20 | ||
1da177e4 | 21 | /* |
9c5d760b | 22 | * Bits in mapping->flags. |
1da177e4 | 23 | */ |
9a896c9a | 24 | enum mapping_flags { |
9c5d760b MH |
25 | AS_EIO = 0, /* IO error on async write */ |
26 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
27 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
28 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
29 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 30 | /* writeback related tags are not used */ |
9c5d760b | 31 | AS_NO_WRITEBACK_TAGS = 5, |
01c70267 | 32 | AS_THP_SUPPORT = 6, /* THPs supported */ |
9a896c9a | 33 | }; |
1da177e4 | 34 | |
8ed1e46a JL |
35 | /** |
36 | * mapping_set_error - record a writeback error in the address_space | |
767e5ee5 MWO |
37 | * @mapping: the mapping in which an error should be set |
38 | * @error: the error to set in the mapping | |
8ed1e46a JL |
39 | * |
40 | * When writeback fails in some way, we must record that error so that | |
41 | * userspace can be informed when fsync and the like are called. We endeavor | |
42 | * to report errors on any file that was open at the time of the error. Some | |
43 | * internal callers also need to know when writeback errors have occurred. | |
44 | * | |
45 | * When a writeback error occurs, most filesystems will want to call | |
46 | * mapping_set_error to record the error in the mapping so that it can be | |
47 | * reported when the application calls fsync(2). | |
48 | */ | |
3e9f45bd GC |
49 | static inline void mapping_set_error(struct address_space *mapping, int error) |
50 | { | |
8ed1e46a JL |
51 | if (likely(!error)) |
52 | return; | |
53 | ||
54 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
735e4ae5 JL |
55 | __filemap_set_wb_err(mapping, error); |
56 | ||
57 | /* Record it in superblock */ | |
8b7b2eb1 MK |
58 | if (mapping->host) |
59 | errseq_set(&mapping->host->i_sb->s_wb_err, error); | |
8ed1e46a JL |
60 | |
61 | /* Record it in flags for now, for legacy callers */ | |
62 | if (error == -ENOSPC) | |
63 | set_bit(AS_ENOSPC, &mapping->flags); | |
64 | else | |
65 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
66 | } |
67 | ||
ba9ddf49 LS |
68 | static inline void mapping_set_unevictable(struct address_space *mapping) |
69 | { | |
70 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
71 | } | |
72 | ||
89e004ea LS |
73 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
74 | { | |
75 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
76 | } | |
77 | ||
1eb6234e | 78 | static inline bool mapping_unevictable(struct address_space *mapping) |
ba9ddf49 | 79 | { |
1eb6234e | 80 | return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); |
ba9ddf49 | 81 | } |
ba9ddf49 | 82 | |
91b0abe3 JW |
83 | static inline void mapping_set_exiting(struct address_space *mapping) |
84 | { | |
85 | set_bit(AS_EXITING, &mapping->flags); | |
86 | } | |
87 | ||
88 | static inline int mapping_exiting(struct address_space *mapping) | |
89 | { | |
90 | return test_bit(AS_EXITING, &mapping->flags); | |
91 | } | |
92 | ||
371a096e HY |
93 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
94 | { | |
95 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
96 | } | |
97 | ||
98 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
99 | { | |
100 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
101 | } | |
102 | ||
dd0fc66f | 103 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 104 | { |
9c5d760b | 105 | return mapping->gfp_mask; |
1da177e4 LT |
106 | } |
107 | ||
c62d2555 MH |
108 | /* Restricts the given gfp_mask to what the mapping allows. */ |
109 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
110 | gfp_t gfp_mask) | |
111 | { | |
112 | return mapping_gfp_mask(mapping) & gfp_mask; | |
113 | } | |
114 | ||
1da177e4 LT |
115 | /* |
116 | * This is non-atomic. Only to be used before the mapping is activated. | |
117 | * Probably needs a barrier... | |
118 | */ | |
260b2367 | 119 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 120 | { |
9c5d760b | 121 | m->gfp_mask = mask; |
1da177e4 LT |
122 | } |
123 | ||
01c70267 MWO |
124 | static inline bool mapping_thp_support(struct address_space *mapping) |
125 | { | |
126 | return test_bit(AS_THP_SUPPORT, &mapping->flags); | |
127 | } | |
128 | ||
6f4d2f97 MWO |
129 | static inline int filemap_nr_thps(struct address_space *mapping) |
130 | { | |
131 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
132 | return atomic_read(&mapping->nr_thps); | |
133 | #else | |
134 | return 0; | |
135 | #endif | |
136 | } | |
137 | ||
138 | static inline void filemap_nr_thps_inc(struct address_space *mapping) | |
139 | { | |
140 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
141 | if (!mapping_thp_support(mapping)) | |
142 | atomic_inc(&mapping->nr_thps); | |
143 | #else | |
144 | WARN_ON_ONCE(1); | |
145 | #endif | |
146 | } | |
147 | ||
148 | static inline void filemap_nr_thps_dec(struct address_space *mapping) | |
149 | { | |
150 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
151 | if (!mapping_thp_support(mapping)) | |
152 | atomic_dec(&mapping->nr_thps); | |
153 | #else | |
154 | WARN_ON_ONCE(1); | |
155 | #endif | |
156 | } | |
157 | ||
c6f92f9f | 158 | void release_pages(struct page **pages, int nr); |
1da177e4 | 159 | |
e286781d NP |
160 | /* |
161 | * speculatively take a reference to a page. | |
0139aa7b JK |
162 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
163 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
164 | * |
165 | * This function must be called inside the same rcu_read_lock() section as has | |
166 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 167 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
168 | * |
169 | * Unless an RCU grace period has passed, the count of all pages coming out | |
170 | * of the allocator must be considered unstable. page_count may return higher | |
171 | * than expected, and put_page must be able to do the right thing when the | |
172 | * page has been finished with, no matter what it is subsequently allocated | |
173 | * for (because put_page is what is used here to drop an invalid speculative | |
174 | * reference). | |
175 | * | |
176 | * This is the interesting part of the lockless pagecache (and lockless | |
177 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
178 | * has the following pattern: | |
179 | * 1. find page in radix tree | |
180 | * 2. conditionally increment refcount | |
181 | * 3. check the page is still in pagecache (if no, goto 1) | |
182 | * | |
0139aa7b | 183 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
b93b0163 | 184 | * following (with the i_pages lock held): |
e286781d NP |
185 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
186 | * B. remove page from pagecache | |
187 | * C. free the page | |
188 | * | |
189 | * There are 2 critical interleavings that matter: | |
190 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
191 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
192 | * subsequently, B will complete and 1 will find no page, causing the | |
193 | * lookup to return NULL. | |
194 | * | |
195 | * It is possible that between 1 and 2, the page is removed then the exact same | |
196 | * page is inserted into the same position in pagecache. That's OK: the | |
b93b0163 | 197 | * old find_get_page using a lock could equally have run before or after |
e286781d NP |
198 | * such a re-insertion, depending on order that locks are granted. |
199 | * | |
200 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
201 | * will find the page or it will not. Likewise, the old find_get_page could run | |
202 | * either before the insertion or afterwards, depending on timing. | |
203 | */ | |
494eec70 | 204 | static inline int __page_cache_add_speculative(struct page *page, int count) |
e286781d | 205 | { |
8375ad98 | 206 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 207 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 208 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
e286781d NP |
209 | # endif |
210 | /* | |
211 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
212 | * this for us. | |
213 | * | |
214 | * Pagecache won't be truncated from interrupt context, so if we have | |
215 | * found a page in the radix tree here, we have pinned its refcount by | |
216 | * disabling preempt, and hence no need for the "speculative get" that | |
217 | * SMP requires. | |
218 | */ | |
309381fe | 219 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
494eec70 | 220 | page_ref_add(page, count); |
e286781d NP |
221 | |
222 | #else | |
494eec70 | 223 | if (unlikely(!page_ref_add_unless(page, count, 0))) { |
e286781d NP |
224 | /* |
225 | * Either the page has been freed, or will be freed. | |
226 | * In either case, retry here and the caller should | |
227 | * do the right thing (see comments above). | |
228 | */ | |
229 | return 0; | |
230 | } | |
231 | #endif | |
309381fe | 232 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
233 | |
234 | return 1; | |
235 | } | |
236 | ||
494eec70 | 237 | static inline int page_cache_get_speculative(struct page *page) |
ce0ad7f0 | 238 | { |
494eec70 | 239 | return __page_cache_add_speculative(page, 1); |
240 | } | |
ce0ad7f0 | 241 | |
494eec70 | 242 | static inline int page_cache_add_speculative(struct page *page, int count) |
243 | { | |
244 | return __page_cache_add_speculative(page, count); | |
ce0ad7f0 NP |
245 | } |
246 | ||
b03143ac GJ |
247 | /** |
248 | * attach_page_private - Attach private data to a page. | |
249 | * @page: Page to attach data to. | |
250 | * @data: Data to attach to page. | |
251 | * | |
252 | * Attaching private data to a page increments the page's reference count. | |
253 | * The data must be detached before the page will be freed. | |
254 | */ | |
255 | static inline void attach_page_private(struct page *page, void *data) | |
256 | { | |
257 | get_page(page); | |
258 | set_page_private(page, (unsigned long)data); | |
259 | SetPagePrivate(page); | |
260 | } | |
261 | ||
262 | /** | |
263 | * detach_page_private - Detach private data from a page. | |
264 | * @page: Page to detach data from. | |
265 | * | |
266 | * Removes the data that was previously attached to the page and decrements | |
267 | * the refcount on the page. | |
268 | * | |
269 | * Return: Data that was attached to the page. | |
270 | */ | |
271 | static inline void *detach_page_private(struct page *page) | |
272 | { | |
273 | void *data = (void *)page_private(page); | |
274 | ||
275 | if (!PagePrivate(page)) | |
276 | return NULL; | |
277 | ClearPagePrivate(page); | |
278 | set_page_private(page, 0); | |
279 | put_page(page); | |
280 | ||
281 | return data; | |
282 | } | |
283 | ||
44110fe3 | 284 | #ifdef CONFIG_NUMA |
2ae88149 | 285 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 286 | #else |
2ae88149 NP |
287 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
288 | { | |
289 | return alloc_pages(gfp, 0); | |
290 | } | |
291 | #endif | |
292 | ||
1da177e4 LT |
293 | static inline struct page *page_cache_alloc(struct address_space *x) |
294 | { | |
2ae88149 | 295 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
296 | } |
297 | ||
8a5c743e | 298 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 299 | { |
453f85d4 | 300 | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; |
7b1de586 WF |
301 | } |
302 | ||
1da177e4 LT |
303 | typedef int filler_t(void *, struct page *); |
304 | ||
0d3f9296 | 305 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
e7b563bb | 306 | pgoff_t index, unsigned long max_scan); |
0d3f9296 | 307 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
e7b563bb JW |
308 | pgoff_t index, unsigned long max_scan); |
309 | ||
2457aec6 MG |
310 | #define FGP_ACCESSED 0x00000001 |
311 | #define FGP_LOCK 0x00000002 | |
312 | #define FGP_CREAT 0x00000004 | |
313 | #define FGP_WRITE 0x00000008 | |
314 | #define FGP_NOFS 0x00000010 | |
315 | #define FGP_NOWAIT 0x00000020 | |
a75d4c33 | 316 | #define FGP_FOR_MMAP 0x00000040 |
a8cf7f27 | 317 | #define FGP_HEAD 0x00000080 |
44835d20 | 318 | #define FGP_ENTRY 0x00000100 |
2457aec6 MG |
319 | |
320 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 321 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
322 | |
323 | /** | |
324 | * find_get_page - find and get a page reference | |
325 | * @mapping: the address_space to search | |
326 | * @offset: the page index | |
327 | * | |
328 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
329 | * page cache page, it is returned with an increased refcount. | |
330 | * | |
331 | * Otherwise, %NULL is returned. | |
332 | */ | |
333 | static inline struct page *find_get_page(struct address_space *mapping, | |
334 | pgoff_t offset) | |
335 | { | |
45f87de5 | 336 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
337 | } |
338 | ||
339 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
340 | pgoff_t offset, int fgp_flags) | |
341 | { | |
45f87de5 | 342 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
343 | } |
344 | ||
345 | /** | |
346 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 | 347 | * @mapping: the address_space to search |
89b42235 | 348 | * @index: the page index |
2457aec6 | 349 | * |
89b42235 | 350 | * Looks up the page cache entry at @mapping & @index. If there is a |
2457aec6 MG |
351 | * page cache page, it is returned locked and with an increased |
352 | * refcount. | |
353 | * | |
a8cf7f27 MWO |
354 | * Context: May sleep. |
355 | * Return: A struct page or %NULL if there is no page in the cache for this | |
356 | * index. | |
2457aec6 MG |
357 | */ |
358 | static inline struct page *find_lock_page(struct address_space *mapping, | |
a8cf7f27 MWO |
359 | pgoff_t index) |
360 | { | |
361 | return pagecache_get_page(mapping, index, FGP_LOCK, 0); | |
362 | } | |
363 | ||
364 | /** | |
365 | * find_lock_head - Locate, pin and lock a pagecache page. | |
366 | * @mapping: The address_space to search. | |
89b42235 | 367 | * @index: The page index. |
a8cf7f27 | 368 | * |
89b42235 | 369 | * Looks up the page cache entry at @mapping & @index. If there is a |
a8cf7f27 MWO |
370 | * page cache page, its head page is returned locked and with an increased |
371 | * refcount. | |
372 | * | |
373 | * Context: May sleep. | |
374 | * Return: A struct page which is !PageTail, or %NULL if there is no page | |
375 | * in the cache for this index. | |
376 | */ | |
377 | static inline struct page *find_lock_head(struct address_space *mapping, | |
378 | pgoff_t index) | |
2457aec6 | 379 | { |
a8cf7f27 | 380 | return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0); |
2457aec6 MG |
381 | } |
382 | ||
383 | /** | |
384 | * find_or_create_page - locate or add a pagecache page | |
385 | * @mapping: the page's address_space | |
386 | * @index: the page's index into the mapping | |
387 | * @gfp_mask: page allocation mode | |
388 | * | |
389 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
390 | * page cache page, it is returned locked and with an increased | |
391 | * refcount. | |
392 | * | |
393 | * If the page is not present, a new page is allocated using @gfp_mask | |
394 | * and added to the page cache and the VM's LRU list. The page is | |
395 | * returned locked and with an increased refcount. | |
396 | * | |
397 | * On memory exhaustion, %NULL is returned. | |
398 | * | |
399 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
400 | * atomic allocation! | |
401 | */ | |
402 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
767e5ee5 | 403 | pgoff_t index, gfp_t gfp_mask) |
2457aec6 | 404 | { |
767e5ee5 | 405 | return pagecache_get_page(mapping, index, |
2457aec6 | 406 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, |
45f87de5 | 407 | gfp_mask); |
2457aec6 MG |
408 | } |
409 | ||
410 | /** | |
411 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
412 | * @mapping: target address_space | |
413 | * @index: the page index | |
414 | * | |
415 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
416 | * This is intended for speculative data generators, where the data can | |
417 | * be regenerated if the page couldn't be grabbed. This routine should | |
418 | * be safe to call while holding the lock for another page. | |
419 | * | |
420 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
421 | * and deadlock against the caller's locked page. | |
422 | */ | |
423 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
424 | pgoff_t index) | |
425 | { | |
426 | return pagecache_get_page(mapping, index, | |
427 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 428 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
429 | } |
430 | ||
63ec1973 MWO |
431 | /* Does this page contain this index? */ |
432 | static inline bool thp_contains(struct page *head, pgoff_t index) | |
433 | { | |
434 | /* HugeTLBfs indexes the page cache in units of hpage_size */ | |
435 | if (PageHuge(head)) | |
436 | return head->index == index; | |
437 | return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); | |
438 | } | |
439 | ||
ec848215 MWO |
440 | /* |
441 | * Given the page we found in the page cache, return the page corresponding | |
442 | * to this index in the file | |
443 | */ | |
444 | static inline struct page *find_subpage(struct page *head, pgoff_t index) | |
4101196b | 445 | { |
ec848215 MWO |
446 | /* HugeTLBfs wants the head page regardless */ |
447 | if (PageHuge(head)) | |
448 | return head; | |
4101196b | 449 | |
6c357848 | 450 | return head + (index & (thp_nr_pages(head) - 1)); |
4101196b MWO |
451 | } |
452 | ||
0cd6144a | 453 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
cf2039af | 454 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); |
b947cee4 JK |
455 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
456 | pgoff_t end, unsigned int nr_pages, | |
457 | struct page **pages); | |
458 | static inline unsigned find_get_pages(struct address_space *mapping, | |
459 | pgoff_t *start, unsigned int nr_pages, | |
460 | struct page **pages) | |
461 | { | |
462 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
463 | pages); | |
464 | } | |
ebf43500 JA |
465 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
466 | unsigned int nr_pages, struct page **pages); | |
72b045ae | 467 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
a6906972 | 468 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
469 | struct page **pages); |
470 | static inline unsigned find_get_pages_tag(struct address_space *mapping, | |
a6906972 | 471 | pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
472 | struct page **pages) |
473 | { | |
474 | return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, | |
475 | nr_pages, pages); | |
476 | } | |
1da177e4 | 477 | |
54566b2c NP |
478 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
479 | pgoff_t index, unsigned flags); | |
afddba49 | 480 | |
1da177e4 LT |
481 | /* |
482 | * Returns locked page at given index in given cache, creating it if needed. | |
483 | */ | |
57f6b96c FW |
484 | static inline struct page *grab_cache_page(struct address_space *mapping, |
485 | pgoff_t index) | |
1da177e4 LT |
486 | { |
487 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
488 | } | |
489 | ||
1da177e4 | 490 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 491 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
492 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
493 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
494 | extern int read_cache_pages(struct address_space *mapping, |
495 | struct list_head *pages, filler_t *filler, void *data); | |
496 | ||
090d2b18 | 497 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 498 | pgoff_t index, void *data) |
090d2b18 | 499 | { |
6c45b454 | 500 | return read_cache_page(mapping, index, NULL, data); |
090d2b18 PE |
501 | } |
502 | ||
a0f7a756 | 503 | /* |
5cbc198a KS |
504 | * Get index of the page with in radix-tree |
505 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) | |
a0f7a756 | 506 | */ |
5cbc198a | 507 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 508 | { |
e9b61f19 KS |
509 | pgoff_t pgoff; |
510 | ||
e9b61f19 | 511 | if (likely(!PageTransTail(page))) |
09cbfeaf | 512 | return page->index; |
e9b61f19 KS |
513 | |
514 | /* | |
515 | * We don't initialize ->index for tail pages: calculate based on | |
516 | * head page | |
517 | */ | |
09cbfeaf | 518 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
519 | pgoff += page - compound_head(page); |
520 | return pgoff; | |
a0f7a756 NH |
521 | } |
522 | ||
5cbc198a KS |
523 | /* |
524 | * Get the offset in PAGE_SIZE. | |
525 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
526 | */ | |
527 | static inline pgoff_t page_to_pgoff(struct page *page) | |
528 | { | |
529 | if (unlikely(PageHeadHuge(page))) | |
530 | return page->index << compound_order(page); | |
531 | ||
532 | return page_to_index(page); | |
533 | } | |
534 | ||
1da177e4 LT |
535 | /* |
536 | * Return byte-offset into filesystem object for page. | |
537 | */ | |
538 | static inline loff_t page_offset(struct page *page) | |
539 | { | |
09cbfeaf | 540 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
541 | } |
542 | ||
f981c595 MG |
543 | static inline loff_t page_file_offset(struct page *page) |
544 | { | |
8cd79788 | 545 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
546 | } |
547 | ||
0fe6e20b NH |
548 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
549 | unsigned long address); | |
550 | ||
1da177e4 LT |
551 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
552 | unsigned long address) | |
553 | { | |
0fe6e20b NH |
554 | pgoff_t pgoff; |
555 | if (unlikely(is_vm_hugetlb_page(vma))) | |
556 | return linear_hugepage_index(vma, address); | |
557 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 558 | pgoff += vma->vm_pgoff; |
09cbfeaf | 559 | return pgoff; |
1da177e4 LT |
560 | } |
561 | ||
c7510ab2 JA |
562 | struct wait_page_key { |
563 | struct page *page; | |
564 | int bit_nr; | |
565 | int page_match; | |
566 | }; | |
567 | ||
568 | struct wait_page_queue { | |
569 | struct page *page; | |
570 | int bit_nr; | |
571 | wait_queue_entry_t wait; | |
572 | }; | |
573 | ||
cdc8fcb4 | 574 | static inline bool wake_page_match(struct wait_page_queue *wait_page, |
c7510ab2 JA |
575 | struct wait_page_key *key) |
576 | { | |
577 | if (wait_page->page != key->page) | |
cdc8fcb4 | 578 | return false; |
c7510ab2 JA |
579 | key->page_match = 1; |
580 | ||
581 | if (wait_page->bit_nr != key->bit_nr) | |
cdc8fcb4 | 582 | return false; |
d1932dc3 | 583 | |
cdc8fcb4 | 584 | return true; |
d1932dc3 JA |
585 | } |
586 | ||
b3c97528 HH |
587 | extern void __lock_page(struct page *page); |
588 | extern int __lock_page_killable(struct page *page); | |
dd3e6d50 | 589 | extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); |
d065bd81 ML |
590 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
591 | unsigned int flags); | |
b3c97528 | 592 | extern void unlock_page(struct page *page); |
1da177e4 | 593 | |
f4458845 AM |
594 | /* |
595 | * Return true if the page was successfully locked | |
596 | */ | |
529ae9aa NP |
597 | static inline int trylock_page(struct page *page) |
598 | { | |
48c935ad | 599 | page = compound_head(page); |
8413ac9d | 600 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
601 | } |
602 | ||
db37648c NP |
603 | /* |
604 | * lock_page may only be called if we have the page's inode pinned. | |
605 | */ | |
1da177e4 LT |
606 | static inline void lock_page(struct page *page) |
607 | { | |
608 | might_sleep(); | |
529ae9aa | 609 | if (!trylock_page(page)) |
1da177e4 LT |
610 | __lock_page(page); |
611 | } | |
db37648c | 612 | |
2687a356 MW |
613 | /* |
614 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
615 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
616 | * killed while waiting. | |
617 | */ | |
618 | static inline int lock_page_killable(struct page *page) | |
619 | { | |
620 | might_sleep(); | |
529ae9aa | 621 | if (!trylock_page(page)) |
2687a356 MW |
622 | return __lock_page_killable(page); |
623 | return 0; | |
624 | } | |
625 | ||
dd3e6d50 JA |
626 | /* |
627 | * lock_page_async - Lock the page, unless this would block. If the page | |
628 | * is already locked, then queue a callback when the page becomes unlocked. | |
629 | * This callback can then retry the operation. | |
630 | * | |
631 | * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page | |
632 | * was already locked and the callback defined in 'wait' was queued. | |
633 | */ | |
634 | static inline int lock_page_async(struct page *page, | |
635 | struct wait_page_queue *wait) | |
636 | { | |
637 | if (!trylock_page(page)) | |
638 | return __lock_page_async(page, wait); | |
639 | return 0; | |
640 | } | |
641 | ||
d065bd81 ML |
642 | /* |
643 | * lock_page_or_retry - Lock the page, unless this would block and the | |
644 | * caller indicated that it can handle a retry. | |
9a95f3cf | 645 | * |
c1e8d7c6 | 646 | * Return value and mmap_lock implications depend on flags; see |
9a95f3cf | 647 | * __lock_page_or_retry(). |
d065bd81 ML |
648 | */ |
649 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
650 | unsigned int flags) | |
651 | { | |
652 | might_sleep(); | |
653 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
654 | } | |
655 | ||
1da177e4 | 656 | /* |
74d81bfa NP |
657 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
658 | * and should not be used directly. | |
1da177e4 | 659 | */ |
b3c97528 | 660 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 661 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 662 | |
1da177e4 LT |
663 | /* |
664 | * Wait for a page to be unlocked. | |
665 | * | |
666 | * This must be called with the caller "holding" the page, | |
667 | * ie with increased "page->count" so that the page won't | |
668 | * go away during the wait.. | |
669 | */ | |
670 | static inline void wait_on_page_locked(struct page *page) | |
671 | { | |
672 | if (PageLocked(page)) | |
48c935ad | 673 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
674 | } |
675 | ||
62906027 NP |
676 | static inline int wait_on_page_locked_killable(struct page *page) |
677 | { | |
678 | if (!PageLocked(page)) | |
679 | return 0; | |
680 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
681 | } | |
682 | ||
48054625 | 683 | int put_and_wait_on_page_locked(struct page *page, int state); |
19343b5b | 684 | void wait_on_page_writeback(struct page *page); |
e5dbd332 | 685 | int wait_on_page_writeback_killable(struct page *page); |
1da177e4 | 686 | extern void end_page_writeback(struct page *page); |
1d1d1a76 | 687 | void wait_for_stable_page(struct page *page); |
1da177e4 | 688 | |
c11f0c0b | 689 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 690 | |
385e1ca5 DH |
691 | /* |
692 | * Add an arbitrary waiter to a page's wait queue | |
693 | */ | |
ac6424b9 | 694 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 695 | |
1da177e4 | 696 | /* |
4bce9f6e | 697 | * Fault everything in given userspace address range in. |
1da177e4 LT |
698 | */ |
699 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 700 | { |
9923777d | 701 | char __user *end = uaddr + size - 1; |
f56f821f DV |
702 | |
703 | if (unlikely(size == 0)) | |
e23d4159 | 704 | return 0; |
f56f821f | 705 | |
e23d4159 AV |
706 | if (unlikely(uaddr > end)) |
707 | return -EFAULT; | |
f56f821f DV |
708 | /* |
709 | * Writing zeroes into userspace here is OK, because we know that if | |
710 | * the zero gets there, we'll be overwriting it. | |
711 | */ | |
e23d4159 AV |
712 | do { |
713 | if (unlikely(__put_user(0, uaddr) != 0)) | |
714 | return -EFAULT; | |
f56f821f | 715 | uaddr += PAGE_SIZE; |
e23d4159 | 716 | } while (uaddr <= end); |
f56f821f DV |
717 | |
718 | /* Check whether the range spilled into the next page. */ | |
719 | if (((unsigned long)uaddr & PAGE_MASK) == | |
720 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 721 | return __put_user(0, end); |
f56f821f | 722 | |
e23d4159 | 723 | return 0; |
f56f821f DV |
724 | } |
725 | ||
4bce9f6e | 726 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
727 | { |
728 | volatile char c; | |
f56f821f DV |
729 | const char __user *end = uaddr + size - 1; |
730 | ||
731 | if (unlikely(size == 0)) | |
e23d4159 | 732 | return 0; |
f56f821f | 733 | |
e23d4159 AV |
734 | if (unlikely(uaddr > end)) |
735 | return -EFAULT; | |
736 | ||
737 | do { | |
738 | if (unlikely(__get_user(c, uaddr) != 0)) | |
739 | return -EFAULT; | |
f56f821f | 740 | uaddr += PAGE_SIZE; |
e23d4159 | 741 | } while (uaddr <= end); |
f56f821f DV |
742 | |
743 | /* Check whether the range spilled into the next page. */ | |
744 | if (((unsigned long)uaddr & PAGE_MASK) == | |
745 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 746 | return __get_user(c, end); |
f56f821f DV |
747 | } |
748 | ||
90b75db6 | 749 | (void)c; |
e23d4159 | 750 | return 0; |
f56f821f DV |
751 | } |
752 | ||
529ae9aa NP |
753 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
754 | pgoff_t index, gfp_t gfp_mask); | |
755 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
756 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 757 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 758 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
1f7ef657 | 759 | void replace_page_cache_page(struct page *old, struct page *new); |
aa65c29c JK |
760 | void delete_from_page_cache_batch(struct address_space *mapping, |
761 | struct pagevec *pvec); | |
41139aa4 MWO |
762 | loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, |
763 | int whence); | |
529ae9aa NP |
764 | |
765 | /* | |
766 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 767 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
768 | */ |
769 | static inline int add_to_page_cache(struct page *page, | |
770 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
771 | { | |
772 | int error; | |
773 | ||
48c935ad | 774 | __SetPageLocked(page); |
529ae9aa NP |
775 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
776 | if (unlikely(error)) | |
48c935ad | 777 | __ClearPageLocked(page); |
529ae9aa NP |
778 | return error; |
779 | } | |
780 | ||
042124cc MWO |
781 | /** |
782 | * struct readahead_control - Describes a readahead request. | |
783 | * | |
784 | * A readahead request is for consecutive pages. Filesystems which | |
785 | * implement the ->readahead method should call readahead_page() or | |
786 | * readahead_page_batch() in a loop and attempt to start I/O against | |
787 | * each page in the request. | |
788 | * | |
789 | * Most of the fields in this struct are private and should be accessed | |
790 | * by the functions below. | |
791 | * | |
792 | * @file: The file, used primarily by network filesystems for authentication. | |
793 | * May be NULL if invoked internally by the filesystem. | |
794 | * @mapping: Readahead this filesystem object. | |
795 | */ | |
796 | struct readahead_control { | |
797 | struct file *file; | |
798 | struct address_space *mapping; | |
799 | /* private: use the readahead_* accessors instead */ | |
800 | pgoff_t _index; | |
801 | unsigned int _nr_pages; | |
802 | unsigned int _batch_count; | |
803 | }; | |
804 | ||
1aa83cfa MWO |
805 | #define DEFINE_READAHEAD(rac, f, m, i) \ |
806 | struct readahead_control rac = { \ | |
807 | .file = f, \ | |
808 | .mapping = m, \ | |
809 | ._index = i, \ | |
810 | } | |
811 | ||
fefa7c47 MWO |
812 | #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) |
813 | ||
814 | void page_cache_ra_unbounded(struct readahead_control *, | |
815 | unsigned long nr_to_read, unsigned long lookahead_count); | |
816 | void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *, | |
817 | unsigned long req_count); | |
818 | void page_cache_async_ra(struct readahead_control *, struct file_ra_state *, | |
819 | struct page *, unsigned long req_count); | |
820 | ||
821 | /** | |
822 | * page_cache_sync_readahead - generic file readahead | |
823 | * @mapping: address_space which holds the pagecache and I/O vectors | |
824 | * @ra: file_ra_state which holds the readahead state | |
825 | * @file: Used by the filesystem for authentication. | |
826 | * @index: Index of first page to be read. | |
827 | * @req_count: Total number of pages being read by the caller. | |
828 | * | |
829 | * page_cache_sync_readahead() should be called when a cache miss happened: | |
830 | * it will submit the read. The readahead logic may decide to piggyback more | |
831 | * pages onto the read request if access patterns suggest it will improve | |
832 | * performance. | |
833 | */ | |
834 | static inline | |
835 | void page_cache_sync_readahead(struct address_space *mapping, | |
836 | struct file_ra_state *ra, struct file *file, pgoff_t index, | |
837 | unsigned long req_count) | |
838 | { | |
839 | DEFINE_READAHEAD(ractl, file, mapping, index); | |
840 | page_cache_sync_ra(&ractl, ra, req_count); | |
841 | } | |
842 | ||
843 | /** | |
844 | * page_cache_async_readahead - file readahead for marked pages | |
845 | * @mapping: address_space which holds the pagecache and I/O vectors | |
846 | * @ra: file_ra_state which holds the readahead state | |
847 | * @file: Used by the filesystem for authentication. | |
848 | * @page: The page at @index which triggered the readahead call. | |
849 | * @index: Index of first page to be read. | |
850 | * @req_count: Total number of pages being read by the caller. | |
851 | * | |
852 | * page_cache_async_readahead() should be called when a page is used which | |
853 | * is marked as PageReadahead; this is a marker to suggest that the application | |
854 | * has used up enough of the readahead window that we should start pulling in | |
855 | * more pages. | |
856 | */ | |
857 | static inline | |
858 | void page_cache_async_readahead(struct address_space *mapping, | |
859 | struct file_ra_state *ra, struct file *file, | |
860 | struct page *page, pgoff_t index, unsigned long req_count) | |
861 | { | |
862 | DEFINE_READAHEAD(ractl, file, mapping, index); | |
863 | page_cache_async_ra(&ractl, ra, page, req_count); | |
864 | } | |
865 | ||
042124cc MWO |
866 | /** |
867 | * readahead_page - Get the next page to read. | |
868 | * @rac: The current readahead request. | |
869 | * | |
870 | * Context: The page is locked and has an elevated refcount. The caller | |
871 | * should decreases the refcount once the page has been submitted for I/O | |
872 | * and unlock the page once all I/O to that page has completed. | |
873 | * Return: A pointer to the next page, or %NULL if we are done. | |
874 | */ | |
875 | static inline struct page *readahead_page(struct readahead_control *rac) | |
876 | { | |
877 | struct page *page; | |
878 | ||
879 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
880 | rac->_nr_pages -= rac->_batch_count; | |
881 | rac->_index += rac->_batch_count; | |
882 | ||
883 | if (!rac->_nr_pages) { | |
884 | rac->_batch_count = 0; | |
885 | return NULL; | |
886 | } | |
887 | ||
888 | page = xa_load(&rac->mapping->i_pages, rac->_index); | |
889 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
6c357848 | 890 | rac->_batch_count = thp_nr_pages(page); |
042124cc MWO |
891 | |
892 | return page; | |
893 | } | |
894 | ||
895 | static inline unsigned int __readahead_batch(struct readahead_control *rac, | |
896 | struct page **array, unsigned int array_sz) | |
897 | { | |
898 | unsigned int i = 0; | |
899 | XA_STATE(xas, &rac->mapping->i_pages, 0); | |
900 | struct page *page; | |
901 | ||
902 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
903 | rac->_nr_pages -= rac->_batch_count; | |
904 | rac->_index += rac->_batch_count; | |
905 | rac->_batch_count = 0; | |
906 | ||
907 | xas_set(&xas, rac->_index); | |
908 | rcu_read_lock(); | |
909 | xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { | |
4349a83a MWO |
910 | if (xas_retry(&xas, page)) |
911 | continue; | |
042124cc MWO |
912 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
913 | VM_BUG_ON_PAGE(PageTail(page), page); | |
914 | array[i++] = page; | |
6c357848 | 915 | rac->_batch_count += thp_nr_pages(page); |
042124cc MWO |
916 | |
917 | /* | |
918 | * The page cache isn't using multi-index entries yet, | |
919 | * so the xas cursor needs to be manually moved to the | |
920 | * next index. This can be removed once the page cache | |
921 | * is converted. | |
922 | */ | |
923 | if (PageHead(page)) | |
924 | xas_set(&xas, rac->_index + rac->_batch_count); | |
925 | ||
926 | if (i == array_sz) | |
927 | break; | |
928 | } | |
929 | rcu_read_unlock(); | |
930 | ||
931 | return i; | |
932 | } | |
933 | ||
934 | /** | |
935 | * readahead_page_batch - Get a batch of pages to read. | |
936 | * @rac: The current readahead request. | |
937 | * @array: An array of pointers to struct page. | |
938 | * | |
939 | * Context: The pages are locked and have an elevated refcount. The caller | |
940 | * should decreases the refcount once the page has been submitted for I/O | |
941 | * and unlock the page once all I/O to that page has completed. | |
942 | * Return: The number of pages placed in the array. 0 indicates the request | |
943 | * is complete. | |
944 | */ | |
945 | #define readahead_page_batch(rac, array) \ | |
946 | __readahead_batch(rac, array, ARRAY_SIZE(array)) | |
947 | ||
948 | /** | |
949 | * readahead_pos - The byte offset into the file of this readahead request. | |
950 | * @rac: The readahead request. | |
951 | */ | |
952 | static inline loff_t readahead_pos(struct readahead_control *rac) | |
953 | { | |
954 | return (loff_t)rac->_index * PAGE_SIZE; | |
955 | } | |
956 | ||
957 | /** | |
958 | * readahead_length - The number of bytes in this readahead request. | |
959 | * @rac: The readahead request. | |
960 | */ | |
961 | static inline loff_t readahead_length(struct readahead_control *rac) | |
962 | { | |
963 | return (loff_t)rac->_nr_pages * PAGE_SIZE; | |
964 | } | |
965 | ||
966 | /** | |
967 | * readahead_index - The index of the first page in this readahead request. | |
968 | * @rac: The readahead request. | |
969 | */ | |
970 | static inline pgoff_t readahead_index(struct readahead_control *rac) | |
971 | { | |
972 | return rac->_index; | |
973 | } | |
974 | ||
975 | /** | |
976 | * readahead_count - The number of pages in this readahead request. | |
977 | * @rac: The readahead request. | |
978 | */ | |
979 | static inline unsigned int readahead_count(struct readahead_control *rac) | |
980 | { | |
981 | return rac->_nr_pages; | |
982 | } | |
983 | ||
b57c2cb9 FF |
984 | static inline unsigned long dir_pages(struct inode *inode) |
985 | { | |
09cbfeaf KS |
986 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
987 | PAGE_SHIFT; | |
b57c2cb9 FF |
988 | } |
989 | ||
243145bc AG |
990 | /** |
991 | * page_mkwrite_check_truncate - check if page was truncated | |
992 | * @page: the page to check | |
993 | * @inode: the inode to check the page against | |
994 | * | |
995 | * Returns the number of bytes in the page up to EOF, | |
996 | * or -EFAULT if the page was truncated. | |
997 | */ | |
998 | static inline int page_mkwrite_check_truncate(struct page *page, | |
999 | struct inode *inode) | |
1000 | { | |
1001 | loff_t size = i_size_read(inode); | |
1002 | pgoff_t index = size >> PAGE_SHIFT; | |
1003 | int offset = offset_in_page(size); | |
1004 | ||
1005 | if (page->mapping != inode->i_mapping) | |
1006 | return -EFAULT; | |
1007 | ||
1008 | /* page is wholly inside EOF */ | |
1009 | if (page->index < index) | |
1010 | return PAGE_SIZE; | |
1011 | /* page is wholly past EOF */ | |
1012 | if (page->index > index || !offset) | |
1013 | return -EFAULT; | |
1014 | /* page is partially inside EOF */ | |
1015 | return offset; | |
1016 | } | |
1017 | ||
24addd84 MWO |
1018 | /** |
1019 | * i_blocks_per_page - How many blocks fit in this page. | |
1020 | * @inode: The inode which contains the blocks. | |
1021 | * @page: The page (head page if the page is a THP). | |
1022 | * | |
1023 | * If the block size is larger than the size of this page, return zero. | |
1024 | * | |
1025 | * Context: The caller should hold a refcount on the page to prevent it | |
1026 | * from being split. | |
1027 | * Return: The number of filesystem blocks covered by this page. | |
1028 | */ | |
1029 | static inline | |
1030 | unsigned int i_blocks_per_page(struct inode *inode, struct page *page) | |
1031 | { | |
1032 | return thp_size(page) >> inode->i_blkbits; | |
1033 | } | |
1da177e4 | 1034 | #endif /* _LINUX_PAGEMAP_H */ |