mm, debug_pagealloc: use a page type instead of page_ext flag
[linux-block.git] / mm / filemap.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/filemap.c
4 *
5 * Copyright (C) 1994-1999 Linus Torvalds
6 */
7
8/*
9 * This file handles the generic file mmap semantics used by
10 * most "normal" filesystems (but you don't /have/ to use this:
11 * the NFS filesystem used to do this differently, for example)
12 */
b95f1b31 13#include <linux/export.h>
1da177e4 14#include <linux/compiler.h>
f9fe48be 15#include <linux/dax.h>
1da177e4 16#include <linux/fs.h>
3f07c014 17#include <linux/sched/signal.h>
c22ce143 18#include <linux/uaccess.h>
c59ede7b 19#include <linux/capability.h>
1da177e4 20#include <linux/kernel_stat.h>
5a0e3ad6 21#include <linux/gfp.h>
1da177e4
LT
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/mman.h>
25#include <linux/pagemap.h>
26#include <linux/file.h>
27#include <linux/uio.h>
cfcbfb13 28#include <linux/error-injection.h>
1da177e4
LT
29#include <linux/hash.h>
30#include <linux/writeback.h>
53253383 31#include <linux/backing-dev.h>
1da177e4
LT
32#include <linux/pagevec.h>
33#include <linux/blkdev.h>
34#include <linux/security.h>
44110fe3 35#include <linux/cpuset.h>
00501b53 36#include <linux/hugetlb.h>
8a9f3ccd 37#include <linux/memcontrol.h>
c515e1fd 38#include <linux/cleancache.h>
c7df8ad2 39#include <linux/shmem_fs.h>
f1820361 40#include <linux/rmap.h>
b1d29ba8 41#include <linux/delayacct.h>
eb414681 42#include <linux/psi.h>
0f8053a5
NP
43#include "internal.h"
44
fe0bfaaf
RJ
45#define CREATE_TRACE_POINTS
46#include <trace/events/filemap.h>
47
1da177e4 48/*
1da177e4
LT
49 * FIXME: remove all knowledge of the buffer layer from the core VM
50 */
148f948b 51#include <linux/buffer_head.h> /* for try_to_free_buffers */
1da177e4 52
1da177e4
LT
53#include <asm/mman.h>
54
55/*
56 * Shared mappings implemented 30.11.1994. It's not fully working yet,
57 * though.
58 *
59 * Shared mappings now work. 15.8.1995 Bruno.
60 *
61 * finished 'unifying' the page and buffer cache and SMP-threaded the
62 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
63 *
64 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
65 */
66
67/*
68 * Lock ordering:
69 *
c8c06efa 70 * ->i_mmap_rwsem (truncate_pagecache)
1da177e4 71 * ->private_lock (__free_pte->__set_page_dirty_buffers)
5d337b91 72 * ->swap_lock (exclusive_swap_page, others)
b93b0163 73 * ->i_pages lock
1da177e4 74 *
1b1dcc1b 75 * ->i_mutex
c8c06efa 76 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
1da177e4
LT
77 *
78 * ->mmap_sem
c8c06efa 79 * ->i_mmap_rwsem
b8072f09 80 * ->page_table_lock or pte_lock (various, mainly in memory.c)
b93b0163 81 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
1da177e4
LT
82 *
83 * ->mmap_sem
84 * ->lock_page (access_process_vm)
85 *
ccad2365 86 * ->i_mutex (generic_perform_write)
82591e6e 87 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
1da177e4 88 *
f758eeab 89 * bdi->wb.list_lock
a66979ab 90 * sb_lock (fs/fs-writeback.c)
b93b0163 91 * ->i_pages lock (__sync_single_inode)
1da177e4 92 *
c8c06efa 93 * ->i_mmap_rwsem
1da177e4
LT
94 * ->anon_vma.lock (vma_adjust)
95 *
96 * ->anon_vma.lock
b8072f09 97 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
1da177e4 98 *
b8072f09 99 * ->page_table_lock or pte_lock
5d337b91 100 * ->swap_lock (try_to_unmap_one)
1da177e4 101 * ->private_lock (try_to_unmap_one)
b93b0163 102 * ->i_pages lock (try_to_unmap_one)
f4b7e272
AR
103 * ->pgdat->lru_lock (follow_page->mark_page_accessed)
104 * ->pgdat->lru_lock (check_pte_range->isolate_lru_page)
1da177e4 105 * ->private_lock (page_remove_rmap->set_page_dirty)
b93b0163 106 * ->i_pages lock (page_remove_rmap->set_page_dirty)
f758eeab 107 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
250df6ed 108 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
81f8c3a4 109 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
f758eeab 110 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
250df6ed 111 * ->inode->i_lock (zap_pte_range->set_page_dirty)
1da177e4
LT
112 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
113 *
c8c06efa 114 * ->i_mmap_rwsem
9a3c531d 115 * ->tasklist_lock (memory_failure, collect_procs_ao)
1da177e4
LT
116 */
117
5c024e6a 118static void page_cache_delete(struct address_space *mapping,
91b0abe3
JW
119 struct page *page, void *shadow)
120{
5c024e6a
MW
121 XA_STATE(xas, &mapping->i_pages, page->index);
122 unsigned int nr = 1;
c70b647d 123
5c024e6a 124 mapping_set_update(&xas, mapping);
c70b647d 125
5c024e6a
MW
126 /* hugetlb pages are represented by a single entry in the xarray */
127 if (!PageHuge(page)) {
128 xas_set_order(&xas, page->index, compound_order(page));
129 nr = 1U << compound_order(page);
130 }
91b0abe3 131
83929372
KS
132 VM_BUG_ON_PAGE(!PageLocked(page), page);
133 VM_BUG_ON_PAGE(PageTail(page), page);
134 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
449dd698 135
5c024e6a
MW
136 xas_store(&xas, shadow);
137 xas_init_marks(&xas);
d3798ae8 138
2300638b
JK
139 page->mapping = NULL;
140 /* Leave page->index set: truncation lookup relies upon it */
141
d3798ae8
JW
142 if (shadow) {
143 mapping->nrexceptional += nr;
144 /*
145 * Make sure the nrexceptional update is committed before
146 * the nrpages update so that final truncate racing
147 * with reclaim does not see both counters 0 at the
148 * same time and miss a shadow entry.
149 */
150 smp_wmb();
151 }
152 mapping->nrpages -= nr;
91b0abe3
JW
153}
154
5ecc4d85
JK
155static void unaccount_page_cache_page(struct address_space *mapping,
156 struct page *page)
1da177e4 157{
5ecc4d85 158 int nr;
1da177e4 159
c515e1fd
DM
160 /*
161 * if we're uptodate, flush out into the cleancache, otherwise
162 * invalidate any existing cleancache entries. We can't leave
163 * stale data around in the cleancache once our page is gone
164 */
165 if (PageUptodate(page) && PageMappedToDisk(page))
166 cleancache_put_page(page);
167 else
3167760f 168 cleancache_invalidate_page(mapping, page);
c515e1fd 169
83929372 170 VM_BUG_ON_PAGE(PageTail(page), page);
06b241f3
HD
171 VM_BUG_ON_PAGE(page_mapped(page), page);
172 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
173 int mapcount;
174
175 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
176 current->comm, page_to_pfn(page));
177 dump_page(page, "still mapped when deleted");
178 dump_stack();
179 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
180
181 mapcount = page_mapcount(page);
182 if (mapping_exiting(mapping) &&
183 page_count(page) >= mapcount + 2) {
184 /*
185 * All vmas have already been torn down, so it's
186 * a good bet that actually the page is unmapped,
187 * and we'd prefer not to leak it: if we're wrong,
188 * some other bad page check should catch it later.
189 */
190 page_mapcount_reset(page);
6d061f9f 191 page_ref_sub(page, mapcount);
06b241f3
HD
192 }
193 }
194
4165b9b4 195 /* hugetlb pages do not participate in page cache accounting. */
5ecc4d85
JK
196 if (PageHuge(page))
197 return;
09612fa6 198
5ecc4d85
JK
199 nr = hpage_nr_pages(page);
200
201 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
202 if (PageSwapBacked(page)) {
203 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
204 if (PageTransHuge(page))
205 __dec_node_page_state(page, NR_SHMEM_THPS);
206 } else {
207 VM_BUG_ON_PAGE(PageTransHuge(page), page);
800d8c63 208 }
5ecc4d85
JK
209
210 /*
211 * At this point page must be either written or cleaned by
212 * truncate. Dirty page here signals a bug and loss of
213 * unwritten data.
214 *
215 * This fixes dirty accounting after removing the page entirely
216 * but leaves PageDirty set: it has no effect for truncated
217 * page and anyway will be cleared before returning page into
218 * buddy allocator.
219 */
220 if (WARN_ON_ONCE(PageDirty(page)))
221 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
222}
223
224/*
225 * Delete a page from the page cache and free it. Caller has to make
226 * sure the page is locked and that nobody else uses it - or that usage
b93b0163 227 * is safe. The caller must hold the i_pages lock.
5ecc4d85
JK
228 */
229void __delete_from_page_cache(struct page *page, void *shadow)
230{
231 struct address_space *mapping = page->mapping;
232
233 trace_mm_filemap_delete_from_page_cache(page);
234
235 unaccount_page_cache_page(mapping, page);
5c024e6a 236 page_cache_delete(mapping, page, shadow);
1da177e4
LT
237}
238
59c66c5f
JK
239static void page_cache_free_page(struct address_space *mapping,
240 struct page *page)
241{
242 void (*freepage)(struct page *);
243
244 freepage = mapping->a_ops->freepage;
245 if (freepage)
246 freepage(page);
247
248 if (PageTransHuge(page) && !PageHuge(page)) {
249 page_ref_sub(page, HPAGE_PMD_NR);
250 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
251 } else {
252 put_page(page);
253 }
254}
255
702cfbf9
MK
256/**
257 * delete_from_page_cache - delete page from page cache
258 * @page: the page which the kernel is trying to remove from page cache
259 *
260 * This must be called only on pages that have been verified to be in the page
261 * cache and locked. It will never put the page into the free list, the caller
262 * has a reference on the page.
263 */
264void delete_from_page_cache(struct page *page)
1da177e4 265{
83929372 266 struct address_space *mapping = page_mapping(page);
c4843a75 267 unsigned long flags;
1da177e4 268
cd7619d6 269 BUG_ON(!PageLocked(page));
b93b0163 270 xa_lock_irqsave(&mapping->i_pages, flags);
62cccb8c 271 __delete_from_page_cache(page, NULL);
b93b0163 272 xa_unlock_irqrestore(&mapping->i_pages, flags);
6072d13c 273
59c66c5f 274 page_cache_free_page(mapping, page);
97cecb5a
MK
275}
276EXPORT_SYMBOL(delete_from_page_cache);
277
aa65c29c 278/*
ef8e5717 279 * page_cache_delete_batch - delete several pages from page cache
aa65c29c
JK
280 * @mapping: the mapping to which pages belong
281 * @pvec: pagevec with pages to delete
282 *
b93b0163 283 * The function walks over mapping->i_pages and removes pages passed in @pvec
69bf4b6b 284 * from the mapping. The function expects @pvec to be sorted by page index.
b93b0163 285 * It tolerates holes in @pvec (mapping entries at those indices are not
aa65c29c 286 * modified). The function expects only THP head pages to be present in the
69bf4b6b
LT
287 * @pvec and takes care to delete all corresponding tail pages from the
288 * mapping as well.
aa65c29c 289 *
b93b0163 290 * The function expects the i_pages lock to be held.
aa65c29c 291 */
ef8e5717 292static void page_cache_delete_batch(struct address_space *mapping,
aa65c29c
JK
293 struct pagevec *pvec)
294{
ef8e5717 295 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
aa65c29c 296 int total_pages = 0;
69bf4b6b 297 int i = 0, tail_pages = 0;
aa65c29c 298 struct page *page;
aa65c29c 299
ef8e5717
MW
300 mapping_set_update(&xas, mapping);
301 xas_for_each(&xas, page, ULONG_MAX) {
69bf4b6b 302 if (i >= pagevec_count(pvec) && !tail_pages)
aa65c29c 303 break;
3159f943 304 if (xa_is_value(page))
aa65c29c 305 continue;
69bf4b6b
LT
306 if (!tail_pages) {
307 /*
308 * Some page got inserted in our range? Skip it. We
309 * have our pages locked so they are protected from
310 * being removed.
311 */
312 if (page != pvec->pages[i]) {
313 VM_BUG_ON_PAGE(page->index >
314 pvec->pages[i]->index, page);
315 continue;
316 }
317 WARN_ON_ONCE(!PageLocked(page));
318 if (PageTransHuge(page) && !PageHuge(page))
319 tail_pages = HPAGE_PMD_NR - 1;
aa65c29c 320 page->mapping = NULL;
69bf4b6b
LT
321 /*
322 * Leave page->index set: truncation lookup relies
323 * upon it
324 */
aa65c29c 325 i++;
69bf4b6b
LT
326 } else {
327 VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
328 != pvec->pages[i]->index, page);
329 tail_pages--;
330 }
ef8e5717 331 xas_store(&xas, NULL);
aa65c29c
JK
332 total_pages++;
333 }
334 mapping->nrpages -= total_pages;
335}
336
337void delete_from_page_cache_batch(struct address_space *mapping,
338 struct pagevec *pvec)
339{
340 int i;
341 unsigned long flags;
342
343 if (!pagevec_count(pvec))
344 return;
345
b93b0163 346 xa_lock_irqsave(&mapping->i_pages, flags);
aa65c29c
JK
347 for (i = 0; i < pagevec_count(pvec); i++) {
348 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
349
350 unaccount_page_cache_page(mapping, pvec->pages[i]);
351 }
ef8e5717 352 page_cache_delete_batch(mapping, pvec);
b93b0163 353 xa_unlock_irqrestore(&mapping->i_pages, flags);
aa65c29c
JK
354
355 for (i = 0; i < pagevec_count(pvec); i++)
356 page_cache_free_page(mapping, pvec->pages[i]);
357}
358
d72d9e2a 359int filemap_check_errors(struct address_space *mapping)
865ffef3
DM
360{
361 int ret = 0;
362 /* Check for outstanding write errors */
7fcbbaf1
JA
363 if (test_bit(AS_ENOSPC, &mapping->flags) &&
364 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
865ffef3 365 ret = -ENOSPC;
7fcbbaf1
JA
366 if (test_bit(AS_EIO, &mapping->flags) &&
367 test_and_clear_bit(AS_EIO, &mapping->flags))
865ffef3
DM
368 ret = -EIO;
369 return ret;
370}
d72d9e2a 371EXPORT_SYMBOL(filemap_check_errors);
865ffef3 372
76341cab
JL
373static int filemap_check_and_keep_errors(struct address_space *mapping)
374{
375 /* Check for outstanding write errors */
376 if (test_bit(AS_EIO, &mapping->flags))
377 return -EIO;
378 if (test_bit(AS_ENOSPC, &mapping->flags))
379 return -ENOSPC;
380 return 0;
381}
382
1da177e4 383/**
485bb99b 384 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
67be2dd1
MW
385 * @mapping: address space structure to write
386 * @start: offset in bytes where the range starts
469eb4d0 387 * @end: offset in bytes where the range ends (inclusive)
67be2dd1 388 * @sync_mode: enable synchronous operation
1da177e4 389 *
485bb99b
RD
390 * Start writeback against all of a mapping's dirty pages that lie
391 * within the byte offsets <start, end> inclusive.
392 *
1da177e4 393 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
485bb99b 394 * opposed to a regular memory cleansing writeback. The difference between
1da177e4
LT
395 * these two operations is that if a dirty page/buffer is encountered, it must
396 * be waited upon, and not just skipped over.
a862f68a
MR
397 *
398 * Return: %0 on success, negative error code otherwise.
1da177e4 399 */
ebcf28e1
AM
400int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
401 loff_t end, int sync_mode)
1da177e4
LT
402{
403 int ret;
404 struct writeback_control wbc = {
405 .sync_mode = sync_mode,
05fe478d 406 .nr_to_write = LONG_MAX,
111ebb6e
OH
407 .range_start = start,
408 .range_end = end,
1da177e4
LT
409 };
410
411 if (!mapping_cap_writeback_dirty(mapping))
412 return 0;
413
b16b1deb 414 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
1da177e4 415 ret = do_writepages(mapping, &wbc);
b16b1deb 416 wbc_detach_inode(&wbc);
1da177e4
LT
417 return ret;
418}
419
420static inline int __filemap_fdatawrite(struct address_space *mapping,
421 int sync_mode)
422{
111ebb6e 423 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
1da177e4
LT
424}
425
426int filemap_fdatawrite(struct address_space *mapping)
427{
428 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
429}
430EXPORT_SYMBOL(filemap_fdatawrite);
431
f4c0a0fd 432int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
ebcf28e1 433 loff_t end)
1da177e4
LT
434{
435 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
436}
f4c0a0fd 437EXPORT_SYMBOL(filemap_fdatawrite_range);
1da177e4 438
485bb99b
RD
439/**
440 * filemap_flush - mostly a non-blocking flush
441 * @mapping: target address_space
442 *
1da177e4
LT
443 * This is a mostly non-blocking flush. Not suitable for data-integrity
444 * purposes - I/O may not be started against all dirty pages.
a862f68a
MR
445 *
446 * Return: %0 on success, negative error code otherwise.
1da177e4
LT
447 */
448int filemap_flush(struct address_space *mapping)
449{
450 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
451}
452EXPORT_SYMBOL(filemap_flush);
453
7fc9e472
GR
454/**
455 * filemap_range_has_page - check if a page exists in range.
456 * @mapping: address space within which to check
457 * @start_byte: offset in bytes where the range starts
458 * @end_byte: offset in bytes where the range ends (inclusive)
459 *
460 * Find at least one page in the range supplied, usually used to check if
461 * direct writing in this range will trigger a writeback.
a862f68a
MR
462 *
463 * Return: %true if at least one page exists in the specified range,
464 * %false otherwise.
7fc9e472
GR
465 */
466bool filemap_range_has_page(struct address_space *mapping,
467 loff_t start_byte, loff_t end_byte)
468{
f7b68046 469 struct page *page;
8fa8e538
MW
470 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
471 pgoff_t max = end_byte >> PAGE_SHIFT;
7fc9e472
GR
472
473 if (end_byte < start_byte)
474 return false;
475
8fa8e538
MW
476 rcu_read_lock();
477 for (;;) {
478 page = xas_find(&xas, max);
479 if (xas_retry(&xas, page))
480 continue;
481 /* Shadow entries don't count */
482 if (xa_is_value(page))
483 continue;
484 /*
485 * We don't need to try to pin this page; we're about to
486 * release the RCU lock anyway. It is enough to know that
487 * there was a page here recently.
488 */
489 break;
490 }
491 rcu_read_unlock();
7fc9e472 492
8fa8e538 493 return page != NULL;
7fc9e472
GR
494}
495EXPORT_SYMBOL(filemap_range_has_page);
496
5e8fcc1a 497static void __filemap_fdatawait_range(struct address_space *mapping,
aa750fd7 498 loff_t start_byte, loff_t end_byte)
1da177e4 499{
09cbfeaf
KS
500 pgoff_t index = start_byte >> PAGE_SHIFT;
501 pgoff_t end = end_byte >> PAGE_SHIFT;
1da177e4
LT
502 struct pagevec pvec;
503 int nr_pages;
1da177e4 504
94004ed7 505 if (end_byte < start_byte)
5e8fcc1a 506 return;
1da177e4 507
86679820 508 pagevec_init(&pvec);
312e9d2f 509 while (index <= end) {
1da177e4
LT
510 unsigned i;
511
312e9d2f 512 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
67fd707f 513 end, PAGECACHE_TAG_WRITEBACK);
312e9d2f
JK
514 if (!nr_pages)
515 break;
516
1da177e4
LT
517 for (i = 0; i < nr_pages; i++) {
518 struct page *page = pvec.pages[i];
519
1da177e4 520 wait_on_page_writeback(page);
5e8fcc1a 521 ClearPageError(page);
1da177e4
LT
522 }
523 pagevec_release(&pvec);
524 cond_resched();
525 }
aa750fd7
JN
526}
527
528/**
529 * filemap_fdatawait_range - wait for writeback to complete
530 * @mapping: address space structure to wait for
531 * @start_byte: offset in bytes where the range starts
532 * @end_byte: offset in bytes where the range ends (inclusive)
533 *
534 * Walk the list of under-writeback pages of the given address space
535 * in the given range and wait for all of them. Check error status of
536 * the address space and return it.
537 *
538 * Since the error status of the address space is cleared by this function,
539 * callers are responsible for checking the return value and handling and/or
540 * reporting the error.
a862f68a
MR
541 *
542 * Return: error status of the address space.
aa750fd7
JN
543 */
544int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
545 loff_t end_byte)
546{
5e8fcc1a
JL
547 __filemap_fdatawait_range(mapping, start_byte, end_byte);
548 return filemap_check_errors(mapping);
1da177e4 549}
d3bccb6f
JK
550EXPORT_SYMBOL(filemap_fdatawait_range);
551
aa0bfcd9
RZ
552/**
553 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
554 * @mapping: address space structure to wait for
555 * @start_byte: offset in bytes where the range starts
556 * @end_byte: offset in bytes where the range ends (inclusive)
557 *
558 * Walk the list of under-writeback pages of the given address space in the
559 * given range and wait for all of them. Unlike filemap_fdatawait_range(),
560 * this function does not clear error status of the address space.
561 *
562 * Use this function if callers don't handle errors themselves. Expected
563 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
564 * fsfreeze(8)
565 */
566int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
567 loff_t start_byte, loff_t end_byte)
568{
569 __filemap_fdatawait_range(mapping, start_byte, end_byte);
570 return filemap_check_and_keep_errors(mapping);
571}
572EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
573
a823e458
JL
574/**
575 * file_fdatawait_range - wait for writeback to complete
576 * @file: file pointing to address space structure to wait for
577 * @start_byte: offset in bytes where the range starts
578 * @end_byte: offset in bytes where the range ends (inclusive)
579 *
580 * Walk the list of under-writeback pages of the address space that file
581 * refers to, in the given range and wait for all of them. Check error
582 * status of the address space vs. the file->f_wb_err cursor and return it.
583 *
584 * Since the error status of the file is advanced by this function,
585 * callers are responsible for checking the return value and handling and/or
586 * reporting the error.
a862f68a
MR
587 *
588 * Return: error status of the address space vs. the file->f_wb_err cursor.
a823e458
JL
589 */
590int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
591{
592 struct address_space *mapping = file->f_mapping;
593
594 __filemap_fdatawait_range(mapping, start_byte, end_byte);
595 return file_check_and_advance_wb_err(file);
596}
597EXPORT_SYMBOL(file_fdatawait_range);
d3bccb6f 598
aa750fd7
JN
599/**
600 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
601 * @mapping: address space structure to wait for
602 *
603 * Walk the list of under-writeback pages of the given address space
604 * and wait for all of them. Unlike filemap_fdatawait(), this function
605 * does not clear error status of the address space.
606 *
607 * Use this function if callers don't handle errors themselves. Expected
608 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
609 * fsfreeze(8)
a862f68a
MR
610 *
611 * Return: error status of the address space.
aa750fd7 612 */
76341cab 613int filemap_fdatawait_keep_errors(struct address_space *mapping)
aa750fd7 614{
ffb959bb 615 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
76341cab 616 return filemap_check_and_keep_errors(mapping);
aa750fd7 617}
76341cab 618EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
aa750fd7 619
9326c9b2 620static bool mapping_needs_writeback(struct address_space *mapping)
1da177e4 621{
9326c9b2
JL
622 return (!dax_mapping(mapping) && mapping->nrpages) ||
623 (dax_mapping(mapping) && mapping->nrexceptional);
1da177e4 624}
1da177e4
LT
625
626int filemap_write_and_wait(struct address_space *mapping)
627{
28fd1298 628 int err = 0;
1da177e4 629
9326c9b2 630 if (mapping_needs_writeback(mapping)) {
28fd1298
OH
631 err = filemap_fdatawrite(mapping);
632 /*
633 * Even if the above returned error, the pages may be
634 * written partially (e.g. -ENOSPC), so we wait for it.
635 * But the -EIO is special case, it may indicate the worst
636 * thing (e.g. bug) happened, so we avoid waiting for it.
637 */
638 if (err != -EIO) {
639 int err2 = filemap_fdatawait(mapping);
640 if (!err)
641 err = err2;
cbeaf951
JL
642 } else {
643 /* Clear any previously stored errors */
644 filemap_check_errors(mapping);
28fd1298 645 }
865ffef3
DM
646 } else {
647 err = filemap_check_errors(mapping);
1da177e4 648 }
28fd1298 649 return err;
1da177e4 650}
28fd1298 651EXPORT_SYMBOL(filemap_write_and_wait);
1da177e4 652
485bb99b
RD
653/**
654 * filemap_write_and_wait_range - write out & wait on a file range
655 * @mapping: the address_space for the pages
656 * @lstart: offset in bytes where the range starts
657 * @lend: offset in bytes where the range ends (inclusive)
658 *
469eb4d0
AM
659 * Write out and wait upon file offsets lstart->lend, inclusive.
660 *
0e056eb5 661 * Note that @lend is inclusive (describes the last byte to be written) so
469eb4d0 662 * that this function can be used to write to the very end-of-file (end = -1).
a862f68a
MR
663 *
664 * Return: error status of the address space.
469eb4d0 665 */
1da177e4
LT
666int filemap_write_and_wait_range(struct address_space *mapping,
667 loff_t lstart, loff_t lend)
668{
28fd1298 669 int err = 0;
1da177e4 670
9326c9b2 671 if (mapping_needs_writeback(mapping)) {
28fd1298
OH
672 err = __filemap_fdatawrite_range(mapping, lstart, lend,
673 WB_SYNC_ALL);
674 /* See comment of filemap_write_and_wait() */
675 if (err != -EIO) {
94004ed7
CH
676 int err2 = filemap_fdatawait_range(mapping,
677 lstart, lend);
28fd1298
OH
678 if (!err)
679 err = err2;
cbeaf951
JL
680 } else {
681 /* Clear any previously stored errors */
682 filemap_check_errors(mapping);
28fd1298 683 }
865ffef3
DM
684 } else {
685 err = filemap_check_errors(mapping);
1da177e4 686 }
28fd1298 687 return err;
1da177e4 688}
f6995585 689EXPORT_SYMBOL(filemap_write_and_wait_range);
1da177e4 690
5660e13d
JL
691void __filemap_set_wb_err(struct address_space *mapping, int err)
692{
3acdfd28 693 errseq_t eseq = errseq_set(&mapping->wb_err, err);
5660e13d
JL
694
695 trace_filemap_set_wb_err(mapping, eseq);
696}
697EXPORT_SYMBOL(__filemap_set_wb_err);
698
699/**
700 * file_check_and_advance_wb_err - report wb error (if any) that was previously
701 * and advance wb_err to current one
702 * @file: struct file on which the error is being reported
703 *
704 * When userland calls fsync (or something like nfsd does the equivalent), we
705 * want to report any writeback errors that occurred since the last fsync (or
706 * since the file was opened if there haven't been any).
707 *
708 * Grab the wb_err from the mapping. If it matches what we have in the file,
709 * then just quickly return 0. The file is all caught up.
710 *
711 * If it doesn't match, then take the mapping value, set the "seen" flag in
712 * it and try to swap it into place. If it works, or another task beat us
713 * to it with the new value, then update the f_wb_err and return the error
714 * portion. The error at this point must be reported via proper channels
715 * (a'la fsync, or NFS COMMIT operation, etc.).
716 *
717 * While we handle mapping->wb_err with atomic operations, the f_wb_err
718 * value is protected by the f_lock since we must ensure that it reflects
719 * the latest value swapped in for this file descriptor.
a862f68a
MR
720 *
721 * Return: %0 on success, negative error code otherwise.
5660e13d
JL
722 */
723int file_check_and_advance_wb_err(struct file *file)
724{
725 int err = 0;
726 errseq_t old = READ_ONCE(file->f_wb_err);
727 struct address_space *mapping = file->f_mapping;
728
729 /* Locklessly handle the common case where nothing has changed */
730 if (errseq_check(&mapping->wb_err, old)) {
731 /* Something changed, must use slow path */
732 spin_lock(&file->f_lock);
733 old = file->f_wb_err;
734 err = errseq_check_and_advance(&mapping->wb_err,
735 &file->f_wb_err);
736 trace_file_check_and_advance_wb_err(file, old);
737 spin_unlock(&file->f_lock);
738 }
f4e222c5
JL
739
740 /*
741 * We're mostly using this function as a drop in replacement for
742 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
743 * that the legacy code would have had on these flags.
744 */
745 clear_bit(AS_EIO, &mapping->flags);
746 clear_bit(AS_ENOSPC, &mapping->flags);
5660e13d
JL
747 return err;
748}
749EXPORT_SYMBOL(file_check_and_advance_wb_err);
750
751/**
752 * file_write_and_wait_range - write out & wait on a file range
753 * @file: file pointing to address_space with pages
754 * @lstart: offset in bytes where the range starts
755 * @lend: offset in bytes where the range ends (inclusive)
756 *
757 * Write out and wait upon file offsets lstart->lend, inclusive.
758 *
759 * Note that @lend is inclusive (describes the last byte to be written) so
760 * that this function can be used to write to the very end-of-file (end = -1).
761 *
762 * After writing out and waiting on the data, we check and advance the
763 * f_wb_err cursor to the latest value, and return any errors detected there.
a862f68a
MR
764 *
765 * Return: %0 on success, negative error code otherwise.
5660e13d
JL
766 */
767int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
768{
769 int err = 0, err2;
770 struct address_space *mapping = file->f_mapping;
771
9326c9b2 772 if (mapping_needs_writeback(mapping)) {
5660e13d
JL
773 err = __filemap_fdatawrite_range(mapping, lstart, lend,
774 WB_SYNC_ALL);
775 /* See comment of filemap_write_and_wait() */
776 if (err != -EIO)
777 __filemap_fdatawait_range(mapping, lstart, lend);
778 }
779 err2 = file_check_and_advance_wb_err(file);
780 if (!err)
781 err = err2;
782 return err;
783}
784EXPORT_SYMBOL(file_write_and_wait_range);
785
ef6a3c63
MS
786/**
787 * replace_page_cache_page - replace a pagecache page with a new one
788 * @old: page to be replaced
789 * @new: page to replace with
790 * @gfp_mask: allocation mode
791 *
792 * This function replaces a page in the pagecache with a new one. On
793 * success it acquires the pagecache reference for the new page and
794 * drops it for the old page. Both the old and new pages must be
795 * locked. This function does not add the new page to the LRU, the
796 * caller must do that.
797 *
74d60958 798 * The remove + add is atomic. This function cannot fail.
a862f68a
MR
799 *
800 * Return: %0
ef6a3c63
MS
801 */
802int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
803{
74d60958
MW
804 struct address_space *mapping = old->mapping;
805 void (*freepage)(struct page *) = mapping->a_ops->freepage;
806 pgoff_t offset = old->index;
807 XA_STATE(xas, &mapping->i_pages, offset);
808 unsigned long flags;
ef6a3c63 809
309381fe
SL
810 VM_BUG_ON_PAGE(!PageLocked(old), old);
811 VM_BUG_ON_PAGE(!PageLocked(new), new);
812 VM_BUG_ON_PAGE(new->mapping, new);
ef6a3c63 813
74d60958
MW
814 get_page(new);
815 new->mapping = mapping;
816 new->index = offset;
ef6a3c63 817
74d60958
MW
818 xas_lock_irqsave(&xas, flags);
819 xas_store(&xas, new);
4165b9b4 820
74d60958
MW
821 old->mapping = NULL;
822 /* hugetlb pages do not participate in page cache accounting. */
823 if (!PageHuge(old))
824 __dec_node_page_state(new, NR_FILE_PAGES);
825 if (!PageHuge(new))
826 __inc_node_page_state(new, NR_FILE_PAGES);
827 if (PageSwapBacked(old))
828 __dec_node_page_state(new, NR_SHMEM);
829 if (PageSwapBacked(new))
830 __inc_node_page_state(new, NR_SHMEM);
831 xas_unlock_irqrestore(&xas, flags);
832 mem_cgroup_migrate(old, new);
833 if (freepage)
834 freepage(old);
835 put_page(old);
ef6a3c63 836
74d60958 837 return 0;
ef6a3c63
MS
838}
839EXPORT_SYMBOL_GPL(replace_page_cache_page);
840
a528910e
JW
841static int __add_to_page_cache_locked(struct page *page,
842 struct address_space *mapping,
843 pgoff_t offset, gfp_t gfp_mask,
844 void **shadowp)
1da177e4 845{
74d60958 846 XA_STATE(xas, &mapping->i_pages, offset);
00501b53
JW
847 int huge = PageHuge(page);
848 struct mem_cgroup *memcg;
e286781d 849 int error;
74d60958 850 void *old;
e286781d 851
309381fe
SL
852 VM_BUG_ON_PAGE(!PageLocked(page), page);
853 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
74d60958 854 mapping_set_update(&xas, mapping);
e286781d 855
00501b53
JW
856 if (!huge) {
857 error = mem_cgroup_try_charge(page, current->mm,
f627c2f5 858 gfp_mask, &memcg, false);
00501b53
JW
859 if (error)
860 return error;
861 }
1da177e4 862
09cbfeaf 863 get_page(page);
66a0c8ee
KS
864 page->mapping = mapping;
865 page->index = offset;
866
74d60958
MW
867 do {
868 xas_lock_irq(&xas);
869 old = xas_load(&xas);
870 if (old && !xa_is_value(old))
871 xas_set_err(&xas, -EEXIST);
872 xas_store(&xas, page);
873 if (xas_error(&xas))
874 goto unlock;
875
876 if (xa_is_value(old)) {
877 mapping->nrexceptional--;
878 if (shadowp)
879 *shadowp = old;
880 }
881 mapping->nrpages++;
882
883 /* hugetlb pages do not participate in page cache accounting */
884 if (!huge)
885 __inc_node_page_state(page, NR_FILE_PAGES);
886unlock:
887 xas_unlock_irq(&xas);
888 } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
889
890 if (xas_error(&xas))
891 goto error;
4165b9b4 892
00501b53 893 if (!huge)
f627c2f5 894 mem_cgroup_commit_charge(page, memcg, false, false);
66a0c8ee
KS
895 trace_mm_filemap_add_to_page_cache(page);
896 return 0;
74d60958 897error:
66a0c8ee
KS
898 page->mapping = NULL;
899 /* Leave page->index set: truncation relies upon it */
00501b53 900 if (!huge)
f627c2f5 901 mem_cgroup_cancel_charge(page, memcg, false);
09cbfeaf 902 put_page(page);
74d60958 903 return xas_error(&xas);
1da177e4 904}
cfcbfb13 905ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
a528910e
JW
906
907/**
908 * add_to_page_cache_locked - add a locked page to the pagecache
909 * @page: page to add
910 * @mapping: the page's address_space
911 * @offset: page index
912 * @gfp_mask: page allocation mode
913 *
914 * This function is used to add a page to the pagecache. It must be locked.
915 * This function does not add the page to the LRU. The caller must do that.
a862f68a
MR
916 *
917 * Return: %0 on success, negative error code otherwise.
a528910e
JW
918 */
919int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
920 pgoff_t offset, gfp_t gfp_mask)
921{
922 return __add_to_page_cache_locked(page, mapping, offset,
923 gfp_mask, NULL);
924}
e286781d 925EXPORT_SYMBOL(add_to_page_cache_locked);
1da177e4
LT
926
927int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
6daa0e28 928 pgoff_t offset, gfp_t gfp_mask)
1da177e4 929{
a528910e 930 void *shadow = NULL;
4f98a2fe
RR
931 int ret;
932
48c935ad 933 __SetPageLocked(page);
a528910e
JW
934 ret = __add_to_page_cache_locked(page, mapping, offset,
935 gfp_mask, &shadow);
936 if (unlikely(ret))
48c935ad 937 __ClearPageLocked(page);
a528910e
JW
938 else {
939 /*
940 * The page might have been evicted from cache only
941 * recently, in which case it should be activated like
942 * any other repeatedly accessed page.
f0281a00
RR
943 * The exception is pages getting rewritten; evicting other
944 * data from the working set, only to cache data that will
945 * get overwritten with something else, is a waste of memory.
a528910e 946 */
1899ad18
JW
947 WARN_ON_ONCE(PageActive(page));
948 if (!(gfp_mask & __GFP_WRITE) && shadow)
949 workingset_refault(page, shadow);
a528910e
JW
950 lru_cache_add(page);
951 }
1da177e4
LT
952 return ret;
953}
18bc0bbd 954EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
1da177e4 955
44110fe3 956#ifdef CONFIG_NUMA
2ae88149 957struct page *__page_cache_alloc(gfp_t gfp)
44110fe3 958{
c0ff7453
MX
959 int n;
960 struct page *page;
961
44110fe3 962 if (cpuset_do_page_mem_spread()) {
cc9a6c87
MG
963 unsigned int cpuset_mems_cookie;
964 do {
d26914d1 965 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 966 n = cpuset_mem_spread_node();
96db800f 967 page = __alloc_pages_node(n, gfp, 0);
d26914d1 968 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
cc9a6c87 969
c0ff7453 970 return page;
44110fe3 971 }
2ae88149 972 return alloc_pages(gfp, 0);
44110fe3 973}
2ae88149 974EXPORT_SYMBOL(__page_cache_alloc);
44110fe3
PJ
975#endif
976
1da177e4
LT
977/*
978 * In order to wait for pages to become available there must be
979 * waitqueues associated with pages. By using a hash table of
980 * waitqueues where the bucket discipline is to maintain all
981 * waiters on the same queue and wake all when any of the pages
982 * become available, and for the woken contexts to check to be
983 * sure the appropriate page became available, this saves space
984 * at a cost of "thundering herd" phenomena during rare hash
985 * collisions.
986 */
62906027
NP
987#define PAGE_WAIT_TABLE_BITS 8
988#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
989static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
990
991static wait_queue_head_t *page_waitqueue(struct page *page)
1da177e4 992{
62906027 993 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
1da177e4 994}
1da177e4 995
62906027 996void __init pagecache_init(void)
1da177e4 997{
62906027 998 int i;
1da177e4 999
62906027
NP
1000 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1001 init_waitqueue_head(&page_wait_table[i]);
1002
1003 page_writeback_init();
1da177e4 1004}
1da177e4 1005
3510ca20 1006/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
62906027
NP
1007struct wait_page_key {
1008 struct page *page;
1009 int bit_nr;
1010 int page_match;
1011};
1012
1013struct wait_page_queue {
1014 struct page *page;
1015 int bit_nr;
ac6424b9 1016 wait_queue_entry_t wait;
62906027
NP
1017};
1018
ac6424b9 1019static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
f62e00cc 1020{
62906027
NP
1021 struct wait_page_key *key = arg;
1022 struct wait_page_queue *wait_page
1023 = container_of(wait, struct wait_page_queue, wait);
1024
1025 if (wait_page->page != key->page)
1026 return 0;
1027 key->page_match = 1;
f62e00cc 1028
62906027
NP
1029 if (wait_page->bit_nr != key->bit_nr)
1030 return 0;
3510ca20 1031
9a1ea439
HD
1032 /*
1033 * Stop walking if it's locked.
1034 * Is this safe if put_and_wait_on_page_locked() is in use?
1035 * Yes: the waker must hold a reference to this page, and if PG_locked
1036 * has now already been set by another task, that task must also hold
1037 * a reference to the *same usage* of this page; so there is no need
1038 * to walk on to wake even the put_and_wait_on_page_locked() callers.
1039 */
62906027 1040 if (test_bit(key->bit_nr, &key->page->flags))
3510ca20 1041 return -1;
f62e00cc 1042
62906027 1043 return autoremove_wake_function(wait, mode, sync, key);
f62e00cc
KM
1044}
1045
74d81bfa 1046static void wake_up_page_bit(struct page *page, int bit_nr)
cbbce822 1047{
62906027
NP
1048 wait_queue_head_t *q = page_waitqueue(page);
1049 struct wait_page_key key;
1050 unsigned long flags;
11a19c7b 1051 wait_queue_entry_t bookmark;
cbbce822 1052
62906027
NP
1053 key.page = page;
1054 key.bit_nr = bit_nr;
1055 key.page_match = 0;
1056
11a19c7b
TC
1057 bookmark.flags = 0;
1058 bookmark.private = NULL;
1059 bookmark.func = NULL;
1060 INIT_LIST_HEAD(&bookmark.entry);
1061
62906027 1062 spin_lock_irqsave(&q->lock, flags);
11a19c7b
TC
1063 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1064
1065 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1066 /*
1067 * Take a breather from holding the lock,
1068 * allow pages that finish wake up asynchronously
1069 * to acquire the lock and remove themselves
1070 * from wait queue
1071 */
1072 spin_unlock_irqrestore(&q->lock, flags);
1073 cpu_relax();
1074 spin_lock_irqsave(&q->lock, flags);
1075 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1076 }
1077
62906027
NP
1078 /*
1079 * It is possible for other pages to have collided on the waitqueue
1080 * hash, so in that case check for a page match. That prevents a long-
1081 * term waiter
1082 *
1083 * It is still possible to miss a case here, when we woke page waiters
1084 * and removed them from the waitqueue, but there are still other
1085 * page waiters.
1086 */
1087 if (!waitqueue_active(q) || !key.page_match) {
1088 ClearPageWaiters(page);
1089 /*
1090 * It's possible to miss clearing Waiters here, when we woke
1091 * our page waiters, but the hashed waitqueue has waiters for
1092 * other pages on it.
1093 *
1094 * That's okay, it's a rare case. The next waker will clear it.
1095 */
1096 }
1097 spin_unlock_irqrestore(&q->lock, flags);
1098}
74d81bfa
NP
1099
1100static void wake_up_page(struct page *page, int bit)
1101{
1102 if (!PageWaiters(page))
1103 return;
1104 wake_up_page_bit(page, bit);
1105}
62906027 1106
9a1ea439
HD
1107/*
1108 * A choice of three behaviors for wait_on_page_bit_common():
1109 */
1110enum behavior {
1111 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1112 * __lock_page() waiting on then setting PG_locked.
1113 */
1114 SHARED, /* Hold ref to page and check the bit when woken, like
1115 * wait_on_page_writeback() waiting on PG_writeback.
1116 */
1117 DROP, /* Drop ref to page before wait, no check when woken,
1118 * like put_and_wait_on_page_locked() on PG_locked.
1119 */
1120};
1121
62906027 1122static inline int wait_on_page_bit_common(wait_queue_head_t *q,
9a1ea439 1123 struct page *page, int bit_nr, int state, enum behavior behavior)
62906027
NP
1124{
1125 struct wait_page_queue wait_page;
ac6424b9 1126 wait_queue_entry_t *wait = &wait_page.wait;
9a1ea439 1127 bool bit_is_set;
b1d29ba8 1128 bool thrashing = false;
9a1ea439 1129 bool delayacct = false;
eb414681 1130 unsigned long pflags;
62906027
NP
1131 int ret = 0;
1132
eb414681 1133 if (bit_nr == PG_locked &&
b1d29ba8 1134 !PageUptodate(page) && PageWorkingset(page)) {
9a1ea439 1135 if (!PageSwapBacked(page)) {
eb414681 1136 delayacct_thrashing_start();
9a1ea439
HD
1137 delayacct = true;
1138 }
eb414681 1139 psi_memstall_enter(&pflags);
b1d29ba8
JW
1140 thrashing = true;
1141 }
1142
62906027 1143 init_wait(wait);
9a1ea439 1144 wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
62906027
NP
1145 wait->func = wake_page_function;
1146 wait_page.page = page;
1147 wait_page.bit_nr = bit_nr;
1148
1149 for (;;) {
1150 spin_lock_irq(&q->lock);
1151
2055da97 1152 if (likely(list_empty(&wait->entry))) {
3510ca20 1153 __add_wait_queue_entry_tail(q, wait);
62906027
NP
1154 SetPageWaiters(page);
1155 }
1156
1157 set_current_state(state);
1158
1159 spin_unlock_irq(&q->lock);
1160
9a1ea439
HD
1161 bit_is_set = test_bit(bit_nr, &page->flags);
1162 if (behavior == DROP)
1163 put_page(page);
1164
1165 if (likely(bit_is_set))
62906027 1166 io_schedule();
62906027 1167
9a1ea439 1168 if (behavior == EXCLUSIVE) {
62906027
NP
1169 if (!test_and_set_bit_lock(bit_nr, &page->flags))
1170 break;
9a1ea439 1171 } else if (behavior == SHARED) {
62906027
NP
1172 if (!test_bit(bit_nr, &page->flags))
1173 break;
1174 }
a8b169af 1175
fa45f116 1176 if (signal_pending_state(state, current)) {
a8b169af
LT
1177 ret = -EINTR;
1178 break;
1179 }
9a1ea439
HD
1180
1181 if (behavior == DROP) {
1182 /*
1183 * We can no longer safely access page->flags:
1184 * even if CONFIG_MEMORY_HOTREMOVE is not enabled,
1185 * there is a risk of waiting forever on a page reused
1186 * for something that keeps it locked indefinitely.
1187 * But best check for -EINTR above before breaking.
1188 */
1189 break;
1190 }
62906027
NP
1191 }
1192
1193 finish_wait(q, wait);
1194
eb414681 1195 if (thrashing) {
9a1ea439 1196 if (delayacct)
eb414681
JW
1197 delayacct_thrashing_end();
1198 psi_memstall_leave(&pflags);
1199 }
b1d29ba8 1200
62906027
NP
1201 /*
1202 * A signal could leave PageWaiters set. Clearing it here if
1203 * !waitqueue_active would be possible (by open-coding finish_wait),
1204 * but still fail to catch it in the case of wait hash collision. We
1205 * already can fail to clear wait hash collision cases, so don't
1206 * bother with signals either.
1207 */
1208
1209 return ret;
1210}
1211
1212void wait_on_page_bit(struct page *page, int bit_nr)
1213{
1214 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439 1215 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
62906027
NP
1216}
1217EXPORT_SYMBOL(wait_on_page_bit);
1218
1219int wait_on_page_bit_killable(struct page *page, int bit_nr)
1220{
1221 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439 1222 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
cbbce822 1223}
4343d008 1224EXPORT_SYMBOL(wait_on_page_bit_killable);
cbbce822 1225
9a1ea439
HD
1226/**
1227 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
1228 * @page: The page to wait for.
1229 *
1230 * The caller should hold a reference on @page. They expect the page to
1231 * become unlocked relatively soon, but do not wish to hold up migration
1232 * (for example) by holding the reference while waiting for the page to
1233 * come unlocked. After this function returns, the caller should not
1234 * dereference @page.
1235 */
1236void put_and_wait_on_page_locked(struct page *page)
1237{
1238 wait_queue_head_t *q;
1239
1240 page = compound_head(page);
1241 q = page_waitqueue(page);
1242 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
1243}
1244
385e1ca5
DH
1245/**
1246 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
697f619f
RD
1247 * @page: Page defining the wait queue of interest
1248 * @waiter: Waiter to add to the queue
385e1ca5
DH
1249 *
1250 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1251 */
ac6424b9 1252void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
385e1ca5
DH
1253{
1254 wait_queue_head_t *q = page_waitqueue(page);
1255 unsigned long flags;
1256
1257 spin_lock_irqsave(&q->lock, flags);
9c3a815f 1258 __add_wait_queue_entry_tail(q, waiter);
62906027 1259 SetPageWaiters(page);
385e1ca5
DH
1260 spin_unlock_irqrestore(&q->lock, flags);
1261}
1262EXPORT_SYMBOL_GPL(add_page_wait_queue);
1263
b91e1302
LT
1264#ifndef clear_bit_unlock_is_negative_byte
1265
1266/*
1267 * PG_waiters is the high bit in the same byte as PG_lock.
1268 *
1269 * On x86 (and on many other architectures), we can clear PG_lock and
1270 * test the sign bit at the same time. But if the architecture does
1271 * not support that special operation, we just do this all by hand
1272 * instead.
1273 *
1274 * The read of PG_waiters has to be after (or concurrently with) PG_locked
1275 * being cleared, but a memory barrier should be unneccssary since it is
1276 * in the same byte as PG_locked.
1277 */
1278static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1279{
1280 clear_bit_unlock(nr, mem);
1281 /* smp_mb__after_atomic(); */
98473f9f 1282 return test_bit(PG_waiters, mem);
b91e1302
LT
1283}
1284
1285#endif
1286
1da177e4 1287/**
485bb99b 1288 * unlock_page - unlock a locked page
1da177e4
LT
1289 * @page: the page
1290 *
1291 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
1292 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
da3dae54 1293 * mechanism between PageLocked pages and PageWriteback pages is shared.
1da177e4
LT
1294 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1295 *
b91e1302
LT
1296 * Note that this depends on PG_waiters being the sign bit in the byte
1297 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1298 * clear the PG_locked bit and test PG_waiters at the same time fairly
1299 * portably (architectures that do LL/SC can test any bit, while x86 can
1300 * test the sign bit).
1da177e4 1301 */
920c7a5d 1302void unlock_page(struct page *page)
1da177e4 1303{
b91e1302 1304 BUILD_BUG_ON(PG_waiters != 7);
48c935ad 1305 page = compound_head(page);
309381fe 1306 VM_BUG_ON_PAGE(!PageLocked(page), page);
b91e1302
LT
1307 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
1308 wake_up_page_bit(page, PG_locked);
1da177e4
LT
1309}
1310EXPORT_SYMBOL(unlock_page);
1311
485bb99b
RD
1312/**
1313 * end_page_writeback - end writeback against a page
1314 * @page: the page
1da177e4
LT
1315 */
1316void end_page_writeback(struct page *page)
1317{
888cf2db
MG
1318 /*
1319 * TestClearPageReclaim could be used here but it is an atomic
1320 * operation and overkill in this particular case. Failing to
1321 * shuffle a page marked for immediate reclaim is too mild to
1322 * justify taking an atomic operation penalty at the end of
1323 * ever page writeback.
1324 */
1325 if (PageReclaim(page)) {
1326 ClearPageReclaim(page);
ac6aadb2 1327 rotate_reclaimable_page(page);
888cf2db 1328 }
ac6aadb2
MS
1329
1330 if (!test_clear_page_writeback(page))
1331 BUG();
1332
4e857c58 1333 smp_mb__after_atomic();
1da177e4
LT
1334 wake_up_page(page, PG_writeback);
1335}
1336EXPORT_SYMBOL(end_page_writeback);
1337
57d99845
MW
1338/*
1339 * After completing I/O on a page, call this routine to update the page
1340 * flags appropriately
1341 */
c11f0c0b 1342void page_endio(struct page *page, bool is_write, int err)
57d99845 1343{
c11f0c0b 1344 if (!is_write) {
57d99845
MW
1345 if (!err) {
1346 SetPageUptodate(page);
1347 } else {
1348 ClearPageUptodate(page);
1349 SetPageError(page);
1350 }
1351 unlock_page(page);
abf54548 1352 } else {
57d99845 1353 if (err) {
dd8416c4
MK
1354 struct address_space *mapping;
1355
57d99845 1356 SetPageError(page);
dd8416c4
MK
1357 mapping = page_mapping(page);
1358 if (mapping)
1359 mapping_set_error(mapping, err);
57d99845
MW
1360 }
1361 end_page_writeback(page);
1362 }
1363}
1364EXPORT_SYMBOL_GPL(page_endio);
1365
485bb99b
RD
1366/**
1367 * __lock_page - get a lock on the page, assuming we need to sleep to get it
87066755 1368 * @__page: the page to lock
1da177e4 1369 */
62906027 1370void __lock_page(struct page *__page)
1da177e4 1371{
62906027
NP
1372 struct page *page = compound_head(__page);
1373 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439
HD
1374 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
1375 EXCLUSIVE);
1da177e4
LT
1376}
1377EXPORT_SYMBOL(__lock_page);
1378
62906027 1379int __lock_page_killable(struct page *__page)
2687a356 1380{
62906027
NP
1381 struct page *page = compound_head(__page);
1382 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439
HD
1383 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
1384 EXCLUSIVE);
2687a356 1385}
18bc0bbd 1386EXPORT_SYMBOL_GPL(__lock_page_killable);
2687a356 1387
9a95f3cf
PC
1388/*
1389 * Return values:
1390 * 1 - page is locked; mmap_sem is still held.
1391 * 0 - page is not locked.
1392 * mmap_sem has been released (up_read()), unless flags had both
1393 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1394 * which case mmap_sem is still held.
1395 *
1396 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1397 * with the page locked and the mmap_sem unperturbed.
1398 */
d065bd81
ML
1399int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1400 unsigned int flags)
1401{
37b23e05
KM
1402 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1403 /*
1404 * CAUTION! In this case, mmap_sem is not released
1405 * even though return 0.
1406 */
1407 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1408 return 0;
1409
1410 up_read(&mm->mmap_sem);
1411 if (flags & FAULT_FLAG_KILLABLE)
1412 wait_on_page_locked_killable(page);
1413 else
318b275f 1414 wait_on_page_locked(page);
d065bd81 1415 return 0;
37b23e05
KM
1416 } else {
1417 if (flags & FAULT_FLAG_KILLABLE) {
1418 int ret;
1419
1420 ret = __lock_page_killable(page);
1421 if (ret) {
1422 up_read(&mm->mmap_sem);
1423 return 0;
1424 }
1425 } else
1426 __lock_page(page);
1427 return 1;
d065bd81
ML
1428 }
1429}
1430
e7b563bb 1431/**
0d3f9296
MW
1432 * page_cache_next_miss() - Find the next gap in the page cache.
1433 * @mapping: Mapping.
1434 * @index: Index.
1435 * @max_scan: Maximum range to search.
e7b563bb 1436 *
0d3f9296
MW
1437 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1438 * gap with the lowest index.
e7b563bb 1439 *
0d3f9296
MW
1440 * This function may be called under the rcu_read_lock. However, this will
1441 * not atomically search a snapshot of the cache at a single point in time.
1442 * For example, if a gap is created at index 5, then subsequently a gap is
1443 * created at index 10, page_cache_next_miss covering both indices may
1444 * return 10 if called under the rcu_read_lock.
e7b563bb 1445 *
0d3f9296
MW
1446 * Return: The index of the gap if found, otherwise an index outside the
1447 * range specified (in which case 'return - index >= max_scan' will be true).
1448 * In the rare case of index wrap-around, 0 will be returned.
e7b563bb 1449 */
0d3f9296 1450pgoff_t page_cache_next_miss(struct address_space *mapping,
e7b563bb
JW
1451 pgoff_t index, unsigned long max_scan)
1452{
0d3f9296 1453 XA_STATE(xas, &mapping->i_pages, index);
e7b563bb 1454
0d3f9296
MW
1455 while (max_scan--) {
1456 void *entry = xas_next(&xas);
1457 if (!entry || xa_is_value(entry))
e7b563bb 1458 break;
0d3f9296 1459 if (xas.xa_index == 0)
e7b563bb
JW
1460 break;
1461 }
1462
0d3f9296 1463 return xas.xa_index;
e7b563bb 1464}
0d3f9296 1465EXPORT_SYMBOL(page_cache_next_miss);
e7b563bb
JW
1466
1467/**
2346a560 1468 * page_cache_prev_miss() - Find the previous gap in the page cache.
0d3f9296
MW
1469 * @mapping: Mapping.
1470 * @index: Index.
1471 * @max_scan: Maximum range to search.
e7b563bb 1472 *
0d3f9296
MW
1473 * Search the range [max(index - max_scan + 1, 0), index] for the
1474 * gap with the highest index.
e7b563bb 1475 *
0d3f9296
MW
1476 * This function may be called under the rcu_read_lock. However, this will
1477 * not atomically search a snapshot of the cache at a single point in time.
1478 * For example, if a gap is created at index 10, then subsequently a gap is
1479 * created at index 5, page_cache_prev_miss() covering both indices may
1480 * return 5 if called under the rcu_read_lock.
e7b563bb 1481 *
0d3f9296
MW
1482 * Return: The index of the gap if found, otherwise an index outside the
1483 * range specified (in which case 'index - return >= max_scan' will be true).
1484 * In the rare case of wrap-around, ULONG_MAX will be returned.
e7b563bb 1485 */
0d3f9296 1486pgoff_t page_cache_prev_miss(struct address_space *mapping,
e7b563bb
JW
1487 pgoff_t index, unsigned long max_scan)
1488{
0d3f9296 1489 XA_STATE(xas, &mapping->i_pages, index);
e7b563bb 1490
0d3f9296
MW
1491 while (max_scan--) {
1492 void *entry = xas_prev(&xas);
1493 if (!entry || xa_is_value(entry))
e7b563bb 1494 break;
0d3f9296 1495 if (xas.xa_index == ULONG_MAX)
e7b563bb
JW
1496 break;
1497 }
1498
0d3f9296 1499 return xas.xa_index;
e7b563bb 1500}
0d3f9296 1501EXPORT_SYMBOL(page_cache_prev_miss);
e7b563bb 1502
485bb99b 1503/**
0cd6144a 1504 * find_get_entry - find and get a page cache entry
485bb99b 1505 * @mapping: the address_space to search
0cd6144a
JW
1506 * @offset: the page cache index
1507 *
1508 * Looks up the page cache slot at @mapping & @offset. If there is a
1509 * page cache page, it is returned with an increased refcount.
485bb99b 1510 *
139b6a6f
JW
1511 * If the slot holds a shadow entry of a previously evicted page, or a
1512 * swap entry from shmem/tmpfs, it is returned.
0cd6144a 1513 *
a862f68a 1514 * Return: the found page or shadow entry, %NULL if nothing is found.
1da177e4 1515 */
0cd6144a 1516struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1da177e4 1517{
4c7472c0 1518 XA_STATE(xas, &mapping->i_pages, offset);
69bf4b6b 1519 struct page *head, *page;
1da177e4 1520
a60637c8
NP
1521 rcu_read_lock();
1522repeat:
4c7472c0
MW
1523 xas_reset(&xas);
1524 page = xas_load(&xas);
1525 if (xas_retry(&xas, page))
1526 goto repeat;
1527 /*
1528 * A shadow entry of a recently evicted page, or a swap entry from
1529 * shmem/tmpfs. Return it without attempting to raise page count.
1530 */
1531 if (!page || xa_is_value(page))
1532 goto out;
83929372 1533
69bf4b6b
LT
1534 head = compound_head(page);
1535 if (!page_cache_get_speculative(head))
1536 goto repeat;
1537
1538 /* The page was split under us? */
1539 if (compound_head(page) != head) {
1540 put_page(head);
4c7472c0 1541 goto repeat;
69bf4b6b 1542 }
83929372 1543
4c7472c0 1544 /*
69bf4b6b 1545 * Has the page moved?
4c7472c0
MW
1546 * This is part of the lockless pagecache protocol. See
1547 * include/linux/pagemap.h for details.
1548 */
1549 if (unlikely(page != xas_reload(&xas))) {
69bf4b6b 1550 put_page(head);
4c7472c0 1551 goto repeat;
a60637c8 1552 }
27d20fdd 1553out:
a60637c8
NP
1554 rcu_read_unlock();
1555
1da177e4
LT
1556 return page;
1557}
0cd6144a 1558EXPORT_SYMBOL(find_get_entry);
1da177e4 1559
0cd6144a
JW
1560/**
1561 * find_lock_entry - locate, pin and lock a page cache entry
1562 * @mapping: the address_space to search
1563 * @offset: the page cache index
1564 *
1565 * Looks up the page cache slot at @mapping & @offset. If there is a
1566 * page cache page, it is returned locked and with an increased
1567 * refcount.
1568 *
139b6a6f
JW
1569 * If the slot holds a shadow entry of a previously evicted page, or a
1570 * swap entry from shmem/tmpfs, it is returned.
0cd6144a 1571 *
0cd6144a 1572 * find_lock_entry() may sleep.
a862f68a
MR
1573 *
1574 * Return: the found page or shadow entry, %NULL if nothing is found.
0cd6144a
JW
1575 */
1576struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1da177e4
LT
1577{
1578 struct page *page;
1579
1da177e4 1580repeat:
0cd6144a 1581 page = find_get_entry(mapping, offset);
4c7472c0 1582 if (page && !xa_is_value(page)) {
a60637c8
NP
1583 lock_page(page);
1584 /* Has the page been truncated? */
83929372 1585 if (unlikely(page_mapping(page) != mapping)) {
a60637c8 1586 unlock_page(page);
09cbfeaf 1587 put_page(page);
a60637c8 1588 goto repeat;
1da177e4 1589 }
83929372 1590 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
1da177e4 1591 }
1da177e4
LT
1592 return page;
1593}
0cd6144a
JW
1594EXPORT_SYMBOL(find_lock_entry);
1595
1596/**
2457aec6 1597 * pagecache_get_page - find and get a page reference
0cd6144a
JW
1598 * @mapping: the address_space to search
1599 * @offset: the page index
2457aec6 1600 * @fgp_flags: PCG flags
45f87de5 1601 * @gfp_mask: gfp mask to use for the page cache data page allocation
0cd6144a 1602 *
2457aec6 1603 * Looks up the page cache slot at @mapping & @offset.
1da177e4 1604 *
75325189 1605 * PCG flags modify how the page is returned.
0cd6144a 1606 *
0e056eb5 1607 * @fgp_flags can be:
1608 *
1609 * - FGP_ACCESSED: the page will be marked accessed
1610 * - FGP_LOCK: Page is return locked
1611 * - FGP_CREAT: If page is not present then a new page is allocated using
1612 * @gfp_mask and added to the page cache and the VM's LRU
1613 * list. The page is returned locked and with an increased
a862f68a 1614 * refcount.
a75d4c33
JB
1615 * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
1616 * its own locking dance if the page is already in cache, or unlock the page
1617 * before returning if we had to add the page to pagecache.
1da177e4 1618 *
2457aec6
MG
1619 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1620 * if the GFP flags specified for FGP_CREAT are atomic.
1da177e4 1621 *
2457aec6 1622 * If there is a page cache page, it is returned with an increased refcount.
a862f68a
MR
1623 *
1624 * Return: the found page or %NULL otherwise.
1da177e4 1625 */
2457aec6 1626struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de5 1627 int fgp_flags, gfp_t gfp_mask)
1da177e4 1628{
eb2be189 1629 struct page *page;
2457aec6 1630
1da177e4 1631repeat:
2457aec6 1632 page = find_get_entry(mapping, offset);
3159f943 1633 if (xa_is_value(page))
2457aec6
MG
1634 page = NULL;
1635 if (!page)
1636 goto no_page;
1637
1638 if (fgp_flags & FGP_LOCK) {
1639 if (fgp_flags & FGP_NOWAIT) {
1640 if (!trylock_page(page)) {
09cbfeaf 1641 put_page(page);
2457aec6
MG
1642 return NULL;
1643 }
1644 } else {
1645 lock_page(page);
1646 }
1647
1648 /* Has the page been truncated? */
1649 if (unlikely(page->mapping != mapping)) {
1650 unlock_page(page);
09cbfeaf 1651 put_page(page);
2457aec6
MG
1652 goto repeat;
1653 }
1654 VM_BUG_ON_PAGE(page->index != offset, page);
1655 }
1656
c16eb000 1657 if (fgp_flags & FGP_ACCESSED)
2457aec6
MG
1658 mark_page_accessed(page);
1659
1660no_page:
1661 if (!page && (fgp_flags & FGP_CREAT)) {
1662 int err;
1663 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
45f87de5
MH
1664 gfp_mask |= __GFP_WRITE;
1665 if (fgp_flags & FGP_NOFS)
1666 gfp_mask &= ~__GFP_FS;
2457aec6 1667
45f87de5 1668 page = __page_cache_alloc(gfp_mask);
eb2be189
NP
1669 if (!page)
1670 return NULL;
2457aec6 1671
a75d4c33 1672 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
2457aec6
MG
1673 fgp_flags |= FGP_LOCK;
1674
eb39d618 1675 /* Init accessed so avoid atomic mark_page_accessed later */
2457aec6 1676 if (fgp_flags & FGP_ACCESSED)
eb39d618 1677 __SetPageReferenced(page);
2457aec6 1678
abc1be13 1679 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
eb2be189 1680 if (unlikely(err)) {
09cbfeaf 1681 put_page(page);
eb2be189
NP
1682 page = NULL;
1683 if (err == -EEXIST)
1684 goto repeat;
1da177e4 1685 }
a75d4c33
JB
1686
1687 /*
1688 * add_to_page_cache_lru locks the page, and for mmap we expect
1689 * an unlocked page.
1690 */
1691 if (page && (fgp_flags & FGP_FOR_MMAP))
1692 unlock_page(page);
1da177e4 1693 }
2457aec6 1694
1da177e4
LT
1695 return page;
1696}
2457aec6 1697EXPORT_SYMBOL(pagecache_get_page);
1da177e4 1698
0cd6144a
JW
1699/**
1700 * find_get_entries - gang pagecache lookup
1701 * @mapping: The address_space to search
1702 * @start: The starting page cache index
1703 * @nr_entries: The maximum number of entries
1704 * @entries: Where the resulting entries are placed
1705 * @indices: The cache indices corresponding to the entries in @entries
1706 *
1707 * find_get_entries() will search for and return a group of up to
1708 * @nr_entries entries in the mapping. The entries are placed at
1709 * @entries. find_get_entries() takes a reference against any actual
1710 * pages it returns.
1711 *
1712 * The search returns a group of mapping-contiguous page cache entries
1713 * with ascending indexes. There may be holes in the indices due to
1714 * not-present pages.
1715 *
139b6a6f
JW
1716 * Any shadow entries of evicted pages, or swap entries from
1717 * shmem/tmpfs, are included in the returned array.
0cd6144a 1718 *
a862f68a 1719 * Return: the number of pages and shadow entries which were found.
0cd6144a
JW
1720 */
1721unsigned find_get_entries(struct address_space *mapping,
1722 pgoff_t start, unsigned int nr_entries,
1723 struct page **entries, pgoff_t *indices)
1724{
f280bf09
MW
1725 XA_STATE(xas, &mapping->i_pages, start);
1726 struct page *page;
0cd6144a 1727 unsigned int ret = 0;
0cd6144a
JW
1728
1729 if (!nr_entries)
1730 return 0;
1731
1732 rcu_read_lock();
f280bf09 1733 xas_for_each(&xas, page, ULONG_MAX) {
69bf4b6b 1734 struct page *head;
f280bf09 1735 if (xas_retry(&xas, page))
0cd6144a 1736 continue;
f280bf09
MW
1737 /*
1738 * A shadow entry of a recently evicted page, a swap
1739 * entry from shmem/tmpfs or a DAX entry. Return it
1740 * without attempting to raise page count.
1741 */
1742 if (xa_is_value(page))
0cd6144a 1743 goto export;
83929372 1744
69bf4b6b
LT
1745 head = compound_head(page);
1746 if (!page_cache_get_speculative(head))
f280bf09 1747 goto retry;
83929372 1748
69bf4b6b
LT
1749 /* The page was split under us? */
1750 if (compound_head(page) != head)
1751 goto put_page;
1752
1753 /* Has the page moved? */
f280bf09
MW
1754 if (unlikely(page != xas_reload(&xas)))
1755 goto put_page;
1756
0cd6144a 1757export:
f280bf09 1758 indices[ret] = xas.xa_index;
0cd6144a
JW
1759 entries[ret] = page;
1760 if (++ret == nr_entries)
1761 break;
f280bf09
MW
1762 continue;
1763put_page:
69bf4b6b 1764 put_page(head);
f280bf09
MW
1765retry:
1766 xas_reset(&xas);
0cd6144a
JW
1767 }
1768 rcu_read_unlock();
1769 return ret;
1770}
1771
1da177e4 1772/**
b947cee4 1773 * find_get_pages_range - gang pagecache lookup
1da177e4
LT
1774 * @mapping: The address_space to search
1775 * @start: The starting page index
b947cee4 1776 * @end: The final page index (inclusive)
1da177e4
LT
1777 * @nr_pages: The maximum number of pages
1778 * @pages: Where the resulting pages are placed
1779 *
b947cee4
JK
1780 * find_get_pages_range() will search for and return a group of up to @nr_pages
1781 * pages in the mapping starting at index @start and up to index @end
1782 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
1783 * a reference against the returned pages.
1da177e4
LT
1784 *
1785 * The search returns a group of mapping-contiguous pages with ascending
1786 * indexes. There may be holes in the indices due to not-present pages.
d72dc8a2 1787 * We also update @start to index the next page for the traversal.
1da177e4 1788 *
a862f68a
MR
1789 * Return: the number of pages which were found. If this number is
1790 * smaller than @nr_pages, the end of specified range has been
b947cee4 1791 * reached.
1da177e4 1792 */
b947cee4
JK
1793unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1794 pgoff_t end, unsigned int nr_pages,
1795 struct page **pages)
1da177e4 1796{
fd1b3cee
MW
1797 XA_STATE(xas, &mapping->i_pages, *start);
1798 struct page *page;
0fc9d104
KK
1799 unsigned ret = 0;
1800
1801 if (unlikely(!nr_pages))
1802 return 0;
a60637c8
NP
1803
1804 rcu_read_lock();
fd1b3cee 1805 xas_for_each(&xas, page, end) {
69bf4b6b 1806 struct page *head;
fd1b3cee 1807 if (xas_retry(&xas, page))
a60637c8 1808 continue;
fd1b3cee
MW
1809 /* Skip over shadow, swap and DAX entries */
1810 if (xa_is_value(page))
8079b1c8 1811 continue;
a60637c8 1812
69bf4b6b
LT
1813 head = compound_head(page);
1814 if (!page_cache_get_speculative(head))
fd1b3cee 1815 goto retry;
83929372 1816
69bf4b6b
LT
1817 /* The page was split under us? */
1818 if (compound_head(page) != head)
1819 goto put_page;
1820
1821 /* Has the page moved? */
fd1b3cee
MW
1822 if (unlikely(page != xas_reload(&xas)))
1823 goto put_page;
1da177e4 1824
69bf4b6b 1825 pages[ret] = page;
b947cee4 1826 if (++ret == nr_pages) {
5d3ee42f 1827 *start = xas.xa_index + 1;
b947cee4
JK
1828 goto out;
1829 }
fd1b3cee
MW
1830 continue;
1831put_page:
69bf4b6b 1832 put_page(head);
fd1b3cee
MW
1833retry:
1834 xas_reset(&xas);
a60637c8 1835 }
5b280c0c 1836
b947cee4
JK
1837 /*
1838 * We come here when there is no page beyond @end. We take care to not
1839 * overflow the index @start as it confuses some of the callers. This
fd1b3cee 1840 * breaks the iteration when there is a page at index -1 but that is
b947cee4
JK
1841 * already broken anyway.
1842 */
1843 if (end == (pgoff_t)-1)
1844 *start = (pgoff_t)-1;
1845 else
1846 *start = end + 1;
1847out:
a60637c8 1848 rcu_read_unlock();
d72dc8a2 1849
1da177e4
LT
1850 return ret;
1851}
1852
ebf43500
JA
1853/**
1854 * find_get_pages_contig - gang contiguous pagecache lookup
1855 * @mapping: The address_space to search
1856 * @index: The starting page index
1857 * @nr_pages: The maximum number of pages
1858 * @pages: Where the resulting pages are placed
1859 *
1860 * find_get_pages_contig() works exactly like find_get_pages(), except
1861 * that the returned number of pages are guaranteed to be contiguous.
1862 *
a862f68a 1863 * Return: the number of pages which were found.
ebf43500
JA
1864 */
1865unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1866 unsigned int nr_pages, struct page **pages)
1867{
3ece58a2
MW
1868 XA_STATE(xas, &mapping->i_pages, index);
1869 struct page *page;
0fc9d104
KK
1870 unsigned int ret = 0;
1871
1872 if (unlikely(!nr_pages))
1873 return 0;
a60637c8
NP
1874
1875 rcu_read_lock();
3ece58a2 1876 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
69bf4b6b 1877 struct page *head;
3ece58a2
MW
1878 if (xas_retry(&xas, page))
1879 continue;
1880 /*
1881 * If the entry has been swapped out, we can stop looking.
1882 * No current caller is looking for DAX entries.
1883 */
1884 if (xa_is_value(page))
8079b1c8 1885 break;
ebf43500 1886
69bf4b6b
LT
1887 head = compound_head(page);
1888 if (!page_cache_get_speculative(head))
3ece58a2 1889 goto retry;
83929372 1890
69bf4b6b
LT
1891 /* The page was split under us? */
1892 if (compound_head(page) != head)
1893 goto put_page;
1894
1895 /* Has the page moved? */
3ece58a2
MW
1896 if (unlikely(page != xas_reload(&xas)))
1897 goto put_page;
a60637c8 1898
69bf4b6b 1899 pages[ret] = page;
0fc9d104
KK
1900 if (++ret == nr_pages)
1901 break;
3ece58a2
MW
1902 continue;
1903put_page:
69bf4b6b 1904 put_page(head);
3ece58a2
MW
1905retry:
1906 xas_reset(&xas);
ebf43500 1907 }
a60637c8
NP
1908 rcu_read_unlock();
1909 return ret;
ebf43500 1910}
ef71c15c 1911EXPORT_SYMBOL(find_get_pages_contig);
ebf43500 1912
485bb99b 1913/**
72b045ae 1914 * find_get_pages_range_tag - find and return pages in given range matching @tag
485bb99b
RD
1915 * @mapping: the address_space to search
1916 * @index: the starting page index
72b045ae 1917 * @end: The final page index (inclusive)
485bb99b
RD
1918 * @tag: the tag index
1919 * @nr_pages: the maximum number of pages
1920 * @pages: where the resulting pages are placed
1921 *
1da177e4 1922 * Like find_get_pages, except we only return pages which are tagged with
485bb99b 1923 * @tag. We update @index to index the next page for the traversal.
a862f68a
MR
1924 *
1925 * Return: the number of pages which were found.
1da177e4 1926 */
72b045ae 1927unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
a6906972 1928 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
72b045ae 1929 struct page **pages)
1da177e4 1930{
a6906972
MW
1931 XA_STATE(xas, &mapping->i_pages, *index);
1932 struct page *page;
0fc9d104
KK
1933 unsigned ret = 0;
1934
1935 if (unlikely(!nr_pages))
1936 return 0;
a60637c8
NP
1937
1938 rcu_read_lock();
a6906972 1939 xas_for_each_marked(&xas, page, end, tag) {
69bf4b6b 1940 struct page *head;
a6906972 1941 if (xas_retry(&xas, page))
a60637c8 1942 continue;
a6906972
MW
1943 /*
1944 * Shadow entries should never be tagged, but this iteration
1945 * is lockless so there is a window for page reclaim to evict
1946 * a page we saw tagged. Skip over it.
1947 */
1948 if (xa_is_value(page))
139b6a6f 1949 continue;
a60637c8 1950
69bf4b6b
LT
1951 head = compound_head(page);
1952 if (!page_cache_get_speculative(head))
a6906972 1953 goto retry;
a60637c8 1954
69bf4b6b
LT
1955 /* The page was split under us? */
1956 if (compound_head(page) != head)
1957 goto put_page;
1958
1959 /* Has the page moved? */
a6906972
MW
1960 if (unlikely(page != xas_reload(&xas)))
1961 goto put_page;
a60637c8 1962
69bf4b6b 1963 pages[ret] = page;
72b045ae 1964 if (++ret == nr_pages) {
5d3ee42f 1965 *index = xas.xa_index + 1;
72b045ae
JK
1966 goto out;
1967 }
a6906972
MW
1968 continue;
1969put_page:
69bf4b6b 1970 put_page(head);
a6906972
MW
1971retry:
1972 xas_reset(&xas);
a60637c8 1973 }
5b280c0c 1974
72b045ae 1975 /*
a6906972 1976 * We come here when we got to @end. We take care to not overflow the
72b045ae 1977 * index @index as it confuses some of the callers. This breaks the
a6906972
MW
1978 * iteration when there is a page at index -1 but that is already
1979 * broken anyway.
72b045ae
JK
1980 */
1981 if (end == (pgoff_t)-1)
1982 *index = (pgoff_t)-1;
1983 else
1984 *index = end + 1;
1985out:
a60637c8 1986 rcu_read_unlock();
1da177e4 1987
1da177e4
LT
1988 return ret;
1989}
72b045ae 1990EXPORT_SYMBOL(find_get_pages_range_tag);
1da177e4 1991
76d42bd9
WF
1992/*
1993 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1994 * a _large_ part of the i/o request. Imagine the worst scenario:
1995 *
1996 * ---R__________________________________________B__________
1997 * ^ reading here ^ bad block(assume 4k)
1998 *
1999 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2000 * => failing the whole request => read(R) => read(R+1) =>
2001 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2002 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2003 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2004 *
2005 * It is going insane. Fix it by quickly scaling down the readahead size.
2006 */
2007static void shrink_readahead_size_eio(struct file *filp,
2008 struct file_ra_state *ra)
2009{
76d42bd9 2010 ra->ra_pages /= 4;
76d42bd9
WF
2011}
2012
485bb99b 2013/**
47c27bc4
CH
2014 * generic_file_buffered_read - generic file read routine
2015 * @iocb: the iocb to read
6e58e79d
AV
2016 * @iter: data destination
2017 * @written: already copied
485bb99b 2018 *
1da177e4 2019 * This is a generic file read routine, and uses the
485bb99b 2020 * mapping->a_ops->readpage() function for the actual low-level stuff.
1da177e4
LT
2021 *
2022 * This is really ugly. But the goto's actually try to clarify some
2023 * of the logic when it comes to error handling etc.
a862f68a
MR
2024 *
2025 * Return:
2026 * * total number of bytes copied, including those the were already @written
2027 * * negative error code if nothing was copied
1da177e4 2028 */
47c27bc4 2029static ssize_t generic_file_buffered_read(struct kiocb *iocb,
6e58e79d 2030 struct iov_iter *iter, ssize_t written)
1da177e4 2031{
47c27bc4 2032 struct file *filp = iocb->ki_filp;
36e78914 2033 struct address_space *mapping = filp->f_mapping;
1da177e4 2034 struct inode *inode = mapping->host;
36e78914 2035 struct file_ra_state *ra = &filp->f_ra;
47c27bc4 2036 loff_t *ppos = &iocb->ki_pos;
57f6b96c
FW
2037 pgoff_t index;
2038 pgoff_t last_index;
2039 pgoff_t prev_index;
2040 unsigned long offset; /* offset into pagecache page */
ec0f1637 2041 unsigned int prev_offset;
6e58e79d 2042 int error = 0;
1da177e4 2043
c2a9737f 2044 if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
d05c5f7b 2045 return 0;
c2a9737f
WF
2046 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2047
09cbfeaf
KS
2048 index = *ppos >> PAGE_SHIFT;
2049 prev_index = ra->prev_pos >> PAGE_SHIFT;
2050 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
2051 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
2052 offset = *ppos & ~PAGE_MASK;
1da177e4 2053
1da177e4
LT
2054 for (;;) {
2055 struct page *page;
57f6b96c 2056 pgoff_t end_index;
a32ea1e1 2057 loff_t isize;
1da177e4
LT
2058 unsigned long nr, ret;
2059
1da177e4 2060 cond_resched();
1da177e4 2061find_page:
5abf186a
MH
2062 if (fatal_signal_pending(current)) {
2063 error = -EINTR;
2064 goto out;
2065 }
2066
1da177e4 2067 page = find_get_page(mapping, index);
3ea89ee8 2068 if (!page) {
3239d834
MT
2069 if (iocb->ki_flags & IOCB_NOWAIT)
2070 goto would_block;
cf914a7d 2071 page_cache_sync_readahead(mapping,
7ff81078 2072 ra, filp,
3ea89ee8
FW
2073 index, last_index - index);
2074 page = find_get_page(mapping, index);
2075 if (unlikely(page == NULL))
2076 goto no_cached_page;
2077 }
2078 if (PageReadahead(page)) {
cf914a7d 2079 page_cache_async_readahead(mapping,
7ff81078 2080 ra, filp, page,
3ea89ee8 2081 index, last_index - index);
1da177e4 2082 }
8ab22b9a 2083 if (!PageUptodate(page)) {
3239d834
MT
2084 if (iocb->ki_flags & IOCB_NOWAIT) {
2085 put_page(page);
2086 goto would_block;
2087 }
2088
ebded027
MG
2089 /*
2090 * See comment in do_read_cache_page on why
2091 * wait_on_page_locked is used to avoid unnecessarily
2092 * serialisations and why it's safe.
2093 */
c4b209a4
BVA
2094 error = wait_on_page_locked_killable(page);
2095 if (unlikely(error))
2096 goto readpage_error;
ebded027
MG
2097 if (PageUptodate(page))
2098 goto page_ok;
2099
09cbfeaf 2100 if (inode->i_blkbits == PAGE_SHIFT ||
8ab22b9a
HH
2101 !mapping->a_ops->is_partially_uptodate)
2102 goto page_not_up_to_date;
6d6d36bc 2103 /* pipes can't handle partially uptodate pages */
00e23707 2104 if (unlikely(iov_iter_is_pipe(iter)))
6d6d36bc 2105 goto page_not_up_to_date;
529ae9aa 2106 if (!trylock_page(page))
8ab22b9a 2107 goto page_not_up_to_date;
8d056cb9
DH
2108 /* Did it get truncated before we got the lock? */
2109 if (!page->mapping)
2110 goto page_not_up_to_date_locked;
8ab22b9a 2111 if (!mapping->a_ops->is_partially_uptodate(page,
6e58e79d 2112 offset, iter->count))
8ab22b9a
HH
2113 goto page_not_up_to_date_locked;
2114 unlock_page(page);
2115 }
1da177e4 2116page_ok:
a32ea1e1
N
2117 /*
2118 * i_size must be checked after we know the page is Uptodate.
2119 *
2120 * Checking i_size after the check allows us to calculate
2121 * the correct value for "nr", which means the zero-filled
2122 * part of the page is not copied back to userspace (unless
2123 * another truncate extends the file - this is desired though).
2124 */
2125
2126 isize = i_size_read(inode);
09cbfeaf 2127 end_index = (isize - 1) >> PAGE_SHIFT;
a32ea1e1 2128 if (unlikely(!isize || index > end_index)) {
09cbfeaf 2129 put_page(page);
a32ea1e1
N
2130 goto out;
2131 }
2132
2133 /* nr is the maximum number of bytes to copy from this page */
09cbfeaf 2134 nr = PAGE_SIZE;
a32ea1e1 2135 if (index == end_index) {
09cbfeaf 2136 nr = ((isize - 1) & ~PAGE_MASK) + 1;
a32ea1e1 2137 if (nr <= offset) {
09cbfeaf 2138 put_page(page);
a32ea1e1
N
2139 goto out;
2140 }
2141 }
2142 nr = nr - offset;
1da177e4
LT
2143
2144 /* If users can be writing to this page using arbitrary
2145 * virtual addresses, take care about potential aliasing
2146 * before reading the page on the kernel side.
2147 */
2148 if (mapping_writably_mapped(mapping))
2149 flush_dcache_page(page);
2150
2151 /*
ec0f1637
JK
2152 * When a sequential read accesses a page several times,
2153 * only mark it as accessed the first time.
1da177e4 2154 */
ec0f1637 2155 if (prev_index != index || offset != prev_offset)
1da177e4
LT
2156 mark_page_accessed(page);
2157 prev_index = index;
2158
2159 /*
2160 * Ok, we have the page, and it's up-to-date, so
2161 * now we can copy it to user space...
1da177e4 2162 */
6e58e79d
AV
2163
2164 ret = copy_page_to_iter(page, offset, nr, iter);
1da177e4 2165 offset += ret;
09cbfeaf
KS
2166 index += offset >> PAGE_SHIFT;
2167 offset &= ~PAGE_MASK;
6ce745ed 2168 prev_offset = offset;
1da177e4 2169
09cbfeaf 2170 put_page(page);
6e58e79d
AV
2171 written += ret;
2172 if (!iov_iter_count(iter))
2173 goto out;
2174 if (ret < nr) {
2175 error = -EFAULT;
2176 goto out;
2177 }
2178 continue;
1da177e4
LT
2179
2180page_not_up_to_date:
2181 /* Get exclusive access to the page ... */
85462323
ON
2182 error = lock_page_killable(page);
2183 if (unlikely(error))
2184 goto readpage_error;
1da177e4 2185
8ab22b9a 2186page_not_up_to_date_locked:
da6052f7 2187 /* Did it get truncated before we got the lock? */
1da177e4
LT
2188 if (!page->mapping) {
2189 unlock_page(page);
09cbfeaf 2190 put_page(page);
1da177e4
LT
2191 continue;
2192 }
2193
2194 /* Did somebody else fill it already? */
2195 if (PageUptodate(page)) {
2196 unlock_page(page);
2197 goto page_ok;
2198 }
2199
2200readpage:
91803b49
JM
2201 /*
2202 * A previous I/O error may have been due to temporary
2203 * failures, eg. multipath errors.
2204 * PG_error will be set again if readpage fails.
2205 */
2206 ClearPageError(page);
1da177e4
LT
2207 /* Start the actual read. The read will unlock the page. */
2208 error = mapping->a_ops->readpage(filp, page);
2209
994fc28c
ZB
2210 if (unlikely(error)) {
2211 if (error == AOP_TRUNCATED_PAGE) {
09cbfeaf 2212 put_page(page);
6e58e79d 2213 error = 0;
994fc28c
ZB
2214 goto find_page;
2215 }
1da177e4 2216 goto readpage_error;
994fc28c 2217 }
1da177e4
LT
2218
2219 if (!PageUptodate(page)) {
85462323
ON
2220 error = lock_page_killable(page);
2221 if (unlikely(error))
2222 goto readpage_error;
1da177e4
LT
2223 if (!PageUptodate(page)) {
2224 if (page->mapping == NULL) {
2225 /*
2ecdc82e 2226 * invalidate_mapping_pages got it
1da177e4
LT
2227 */
2228 unlock_page(page);
09cbfeaf 2229 put_page(page);
1da177e4
LT
2230 goto find_page;
2231 }
2232 unlock_page(page);
7ff81078 2233 shrink_readahead_size_eio(filp, ra);
85462323
ON
2234 error = -EIO;
2235 goto readpage_error;
1da177e4
LT
2236 }
2237 unlock_page(page);
2238 }
2239
1da177e4
LT
2240 goto page_ok;
2241
2242readpage_error:
2243 /* UHHUH! A synchronous read error occurred. Report it */
09cbfeaf 2244 put_page(page);
1da177e4
LT
2245 goto out;
2246
2247no_cached_page:
2248 /*
2249 * Ok, it wasn't cached, so we need to create a new
2250 * page..
2251 */
453f85d4 2252 page = page_cache_alloc(mapping);
eb2be189 2253 if (!page) {
6e58e79d 2254 error = -ENOMEM;
eb2be189 2255 goto out;
1da177e4 2256 }
6afdb859 2257 error = add_to_page_cache_lru(page, mapping, index,
c62d2555 2258 mapping_gfp_constraint(mapping, GFP_KERNEL));
1da177e4 2259 if (error) {
09cbfeaf 2260 put_page(page);
6e58e79d
AV
2261 if (error == -EEXIST) {
2262 error = 0;
1da177e4 2263 goto find_page;
6e58e79d 2264 }
1da177e4
LT
2265 goto out;
2266 }
1da177e4
LT
2267 goto readpage;
2268 }
2269
3239d834
MT
2270would_block:
2271 error = -EAGAIN;
1da177e4 2272out:
7ff81078 2273 ra->prev_pos = prev_index;
09cbfeaf 2274 ra->prev_pos <<= PAGE_SHIFT;
7ff81078 2275 ra->prev_pos |= prev_offset;
1da177e4 2276
09cbfeaf 2277 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
0c6aa263 2278 file_accessed(filp);
6e58e79d 2279 return written ? written : error;
1da177e4
LT
2280}
2281
485bb99b 2282/**
6abd2322 2283 * generic_file_read_iter - generic filesystem read routine
485bb99b 2284 * @iocb: kernel I/O control block
6abd2322 2285 * @iter: destination for the data read
485bb99b 2286 *
6abd2322 2287 * This is the "read_iter()" routine for all filesystems
1da177e4 2288 * that can use the page cache directly.
a862f68a
MR
2289 * Return:
2290 * * number of bytes copied, even for partial reads
2291 * * negative error code if nothing was read
1da177e4
LT
2292 */
2293ssize_t
ed978a81 2294generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1da177e4 2295{
e7080a43 2296 size_t count = iov_iter_count(iter);
47c27bc4 2297 ssize_t retval = 0;
e7080a43
NS
2298
2299 if (!count)
2300 goto out; /* skip atime */
1da177e4 2301
2ba48ce5 2302 if (iocb->ki_flags & IOCB_DIRECT) {
47c27bc4 2303 struct file *file = iocb->ki_filp;
ed978a81
AV
2304 struct address_space *mapping = file->f_mapping;
2305 struct inode *inode = mapping->host;
543ade1f 2306 loff_t size;
1da177e4 2307
1da177e4 2308 size = i_size_read(inode);
6be96d3a
GR
2309 if (iocb->ki_flags & IOCB_NOWAIT) {
2310 if (filemap_range_has_page(mapping, iocb->ki_pos,
2311 iocb->ki_pos + count - 1))
2312 return -EAGAIN;
2313 } else {
2314 retval = filemap_write_and_wait_range(mapping,
2315 iocb->ki_pos,
2316 iocb->ki_pos + count - 1);
2317 if (retval < 0)
2318 goto out;
2319 }
d8d3d94b 2320
0d5b0cf2
CH
2321 file_accessed(file);
2322
5ecda137 2323 retval = mapping->a_ops->direct_IO(iocb, iter);
c3a69024 2324 if (retval >= 0) {
c64fb5c7 2325 iocb->ki_pos += retval;
5ecda137 2326 count -= retval;
9fe55eea 2327 }
5b47d59a 2328 iov_iter_revert(iter, count - iov_iter_count(iter));
66f998f6 2329
9fe55eea
SW
2330 /*
2331 * Btrfs can have a short DIO read if we encounter
2332 * compressed extents, so if there was an error, or if
2333 * we've already read everything we wanted to, or if
2334 * there was a short read because we hit EOF, go ahead
2335 * and return. Otherwise fallthrough to buffered io for
fbbbad4b
MW
2336 * the rest of the read. Buffered reads will not work for
2337 * DAX files, so don't bother trying.
9fe55eea 2338 */
5ecda137 2339 if (retval < 0 || !count || iocb->ki_pos >= size ||
0d5b0cf2 2340 IS_DAX(inode))
9fe55eea 2341 goto out;
1da177e4
LT
2342 }
2343
47c27bc4 2344 retval = generic_file_buffered_read(iocb, iter, retval);
1da177e4
LT
2345out:
2346 return retval;
2347}
ed978a81 2348EXPORT_SYMBOL(generic_file_read_iter);
1da177e4 2349
1da177e4 2350#ifdef CONFIG_MMU
1da177e4 2351#define MMAP_LOTSAMISS (100)
6b4c9f44
JB
2352static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
2353 struct file *fpin)
2354{
2355 int flags = vmf->flags;
2356
2357 if (fpin)
2358 return fpin;
2359
2360 /*
2361 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
2362 * anything, so we only pin the file and drop the mmap_sem if only
2363 * FAULT_FLAG_ALLOW_RETRY is set.
2364 */
2365 if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
2366 FAULT_FLAG_ALLOW_RETRY) {
2367 fpin = get_file(vmf->vma->vm_file);
2368 up_read(&vmf->vma->vm_mm->mmap_sem);
2369 }
2370 return fpin;
2371}
2372
2373/*
2374 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
2375 * @vmf - the vm_fault for this fault.
2376 * @page - the page to lock.
2377 * @fpin - the pointer to the file we may pin (or is already pinned).
2378 *
2379 * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
2380 * It differs in that it actually returns the page locked if it returns 1 and 0
2381 * if it couldn't lock the page. If we did have to drop the mmap_sem then fpin
2382 * will point to the pinned file and needs to be fput()'ed at a later point.
2383 */
2384static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
2385 struct file **fpin)
2386{
2387 if (trylock_page(page))
2388 return 1;
2389
8b0f9fa2
LT
2390 /*
2391 * NOTE! This will make us return with VM_FAULT_RETRY, but with
2392 * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
2393 * is supposed to work. We have way too many special cases..
2394 */
6b4c9f44
JB
2395 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2396 return 0;
2397
2398 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2399 if (vmf->flags & FAULT_FLAG_KILLABLE) {
2400 if (__lock_page_killable(page)) {
2401 /*
2402 * We didn't have the right flags to drop the mmap_sem,
2403 * but all fault_handlers only check for fatal signals
2404 * if we return VM_FAULT_RETRY, so we need to drop the
2405 * mmap_sem here and return 0 if we don't have a fpin.
2406 */
2407 if (*fpin == NULL)
2408 up_read(&vmf->vma->vm_mm->mmap_sem);
2409 return 0;
2410 }
2411 } else
2412 __lock_page(page);
2413 return 1;
2414}
2415
1da177e4 2416
ef00e08e 2417/*
6b4c9f44
JB
2418 * Synchronous readahead happens when we don't even find a page in the page
2419 * cache at all. We don't want to perform IO under the mmap sem, so if we have
2420 * to drop the mmap sem we return the file that was pinned in order for us to do
2421 * that. If we didn't pin a file then we return NULL. The file that is
2422 * returned needs to be fput()'ed when we're done with it.
ef00e08e 2423 */
6b4c9f44 2424static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
ef00e08e 2425{
2a1180f1
JB
2426 struct file *file = vmf->vma->vm_file;
2427 struct file_ra_state *ra = &file->f_ra;
ef00e08e 2428 struct address_space *mapping = file->f_mapping;
6b4c9f44 2429 struct file *fpin = NULL;
2a1180f1 2430 pgoff_t offset = vmf->pgoff;
ef00e08e
LT
2431
2432 /* If we don't want any read-ahead, don't bother */
2a1180f1 2433 if (vmf->vma->vm_flags & VM_RAND_READ)
6b4c9f44 2434 return fpin;
275b12bf 2435 if (!ra->ra_pages)
6b4c9f44 2436 return fpin;
ef00e08e 2437
2a1180f1 2438 if (vmf->vma->vm_flags & VM_SEQ_READ) {
6b4c9f44 2439 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
7ffc59b4
WF
2440 page_cache_sync_readahead(mapping, ra, file, offset,
2441 ra->ra_pages);
6b4c9f44 2442 return fpin;
ef00e08e
LT
2443 }
2444
207d04ba
AK
2445 /* Avoid banging the cache line if not needed */
2446 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
ef00e08e
LT
2447 ra->mmap_miss++;
2448
2449 /*
2450 * Do we miss much more than hit in this file? If so,
2451 * stop bothering with read-ahead. It will only hurt.
2452 */
2453 if (ra->mmap_miss > MMAP_LOTSAMISS)
6b4c9f44 2454 return fpin;
ef00e08e 2455
d30a1100
WF
2456 /*
2457 * mmap read-around
2458 */
6b4c9f44 2459 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
600e19af
RG
2460 ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
2461 ra->size = ra->ra_pages;
2462 ra->async_size = ra->ra_pages / 4;
275b12bf 2463 ra_submit(ra, mapping, file);
6b4c9f44 2464 return fpin;
ef00e08e
LT
2465}
2466
2467/*
2468 * Asynchronous readahead happens when we find the page and PG_readahead,
6b4c9f44
JB
2469 * so we want to possibly extend the readahead further. We return the file that
2470 * was pinned if we have to drop the mmap_sem in order to do IO.
ef00e08e 2471 */
6b4c9f44
JB
2472static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
2473 struct page *page)
ef00e08e 2474{
2a1180f1
JB
2475 struct file *file = vmf->vma->vm_file;
2476 struct file_ra_state *ra = &file->f_ra;
ef00e08e 2477 struct address_space *mapping = file->f_mapping;
6b4c9f44 2478 struct file *fpin = NULL;
2a1180f1 2479 pgoff_t offset = vmf->pgoff;
ef00e08e
LT
2480
2481 /* If we don't want any read-ahead, don't bother */
2a1180f1 2482 if (vmf->vma->vm_flags & VM_RAND_READ)
6b4c9f44 2483 return fpin;
ef00e08e
LT
2484 if (ra->mmap_miss > 0)
2485 ra->mmap_miss--;
6b4c9f44
JB
2486 if (PageReadahead(page)) {
2487 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2fad6f5d
WF
2488 page_cache_async_readahead(mapping, ra, file,
2489 page, offset, ra->ra_pages);
6b4c9f44
JB
2490 }
2491 return fpin;
ef00e08e
LT
2492}
2493
485bb99b 2494/**
54cb8821 2495 * filemap_fault - read in file data for page fault handling
d0217ac0 2496 * @vmf: struct vm_fault containing details of the fault
485bb99b 2497 *
54cb8821 2498 * filemap_fault() is invoked via the vma operations vector for a
1da177e4
LT
2499 * mapped memory region to read in file data during a page fault.
2500 *
2501 * The goto's are kind of ugly, but this streamlines the normal case of having
2502 * it in the page cache, and handles the special cases reasonably without
2503 * having a lot of duplicated code.
9a95f3cf
PC
2504 *
2505 * vma->vm_mm->mmap_sem must be held on entry.
2506 *
2507 * If our return value has VM_FAULT_RETRY set, it's because
2508 * lock_page_or_retry() returned 0.
2509 * The mmap_sem has usually been released in this case.
2510 * See __lock_page_or_retry() for the exception.
2511 *
2512 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2513 * has not been released.
2514 *
2515 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
a862f68a
MR
2516 *
2517 * Return: bitwise-OR of %VM_FAULT_ codes.
1da177e4 2518 */
2bcd6454 2519vm_fault_t filemap_fault(struct vm_fault *vmf)
1da177e4
LT
2520{
2521 int error;
11bac800 2522 struct file *file = vmf->vma->vm_file;
6b4c9f44 2523 struct file *fpin = NULL;
1da177e4
LT
2524 struct address_space *mapping = file->f_mapping;
2525 struct file_ra_state *ra = &file->f_ra;
2526 struct inode *inode = mapping->host;
ef00e08e 2527 pgoff_t offset = vmf->pgoff;
9ab2594f 2528 pgoff_t max_off;
1da177e4 2529 struct page *page;
2bcd6454 2530 vm_fault_t ret = 0;
1da177e4 2531
9ab2594f
MW
2532 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2533 if (unlikely(offset >= max_off))
5307cc1a 2534 return VM_FAULT_SIGBUS;
1da177e4 2535
1da177e4 2536 /*
49426420 2537 * Do we have something in the page cache already?
1da177e4 2538 */
ef00e08e 2539 page = find_get_page(mapping, offset);
45cac65b 2540 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1da177e4 2541 /*
ef00e08e
LT
2542 * We found the page, so try async readahead before
2543 * waiting for the lock.
1da177e4 2544 */
6b4c9f44 2545 fpin = do_async_mmap_readahead(vmf, page);
45cac65b 2546 } else if (!page) {
ef00e08e 2547 /* No page in the page cache at all */
ef00e08e 2548 count_vm_event(PGMAJFAULT);
2262185c 2549 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ef00e08e 2550 ret = VM_FAULT_MAJOR;
6b4c9f44 2551 fpin = do_sync_mmap_readahead(vmf);
ef00e08e 2552retry_find:
a75d4c33
JB
2553 page = pagecache_get_page(mapping, offset,
2554 FGP_CREAT|FGP_FOR_MMAP,
2555 vmf->gfp_mask);
6b4c9f44
JB
2556 if (!page) {
2557 if (fpin)
2558 goto out_retry;
a75d4c33 2559 return vmf_error(-ENOMEM);
6b4c9f44 2560 }
1da177e4
LT
2561 }
2562
6b4c9f44
JB
2563 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
2564 goto out_retry;
b522c94d
ML
2565
2566 /* Did it get truncated? */
2567 if (unlikely(page->mapping != mapping)) {
2568 unlock_page(page);
2569 put_page(page);
2570 goto retry_find;
2571 }
309381fe 2572 VM_BUG_ON_PAGE(page->index != offset, page);
b522c94d 2573
1da177e4 2574 /*
d00806b1
NP
2575 * We have a locked page in the page cache, now we need to check
2576 * that it's up-to-date. If not, it is going to be due to an error.
1da177e4 2577 */
d00806b1 2578 if (unlikely(!PageUptodate(page)))
1da177e4
LT
2579 goto page_not_uptodate;
2580
6b4c9f44
JB
2581 /*
2582 * We've made it this far and we had to drop our mmap_sem, now is the
2583 * time to return to the upper layer and have it re-find the vma and
2584 * redo the fault.
2585 */
2586 if (fpin) {
2587 unlock_page(page);
2588 goto out_retry;
2589 }
2590
ef00e08e
LT
2591 /*
2592 * Found the page and have a reference on it.
2593 * We must recheck i_size under page lock.
2594 */
9ab2594f
MW
2595 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2596 if (unlikely(offset >= max_off)) {
d00806b1 2597 unlock_page(page);
09cbfeaf 2598 put_page(page);
5307cc1a 2599 return VM_FAULT_SIGBUS;
d00806b1
NP
2600 }
2601
d0217ac0 2602 vmf->page = page;
83c54070 2603 return ret | VM_FAULT_LOCKED;
1da177e4 2604
1da177e4 2605page_not_uptodate:
1da177e4
LT
2606 /*
2607 * Umm, take care of errors if the page isn't up-to-date.
2608 * Try to re-read it _once_. We do this synchronously,
2609 * because there really aren't any performance issues here
2610 * and we need to check for errors.
2611 */
1da177e4 2612 ClearPageError(page);
6b4c9f44 2613 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
994fc28c 2614 error = mapping->a_ops->readpage(file, page);
3ef0f720
MS
2615 if (!error) {
2616 wait_on_page_locked(page);
2617 if (!PageUptodate(page))
2618 error = -EIO;
2619 }
6b4c9f44
JB
2620 if (fpin)
2621 goto out_retry;
09cbfeaf 2622 put_page(page);
d00806b1
NP
2623
2624 if (!error || error == AOP_TRUNCATED_PAGE)
994fc28c 2625 goto retry_find;
1da177e4 2626
d00806b1 2627 /* Things didn't work out. Return zero to tell the mm layer so. */
76d42bd9 2628 shrink_readahead_size_eio(file, ra);
d0217ac0 2629 return VM_FAULT_SIGBUS;
6b4c9f44
JB
2630
2631out_retry:
2632 /*
2633 * We dropped the mmap_sem, we need to return to the fault handler to
2634 * re-find the vma and come back and find our hopefully still populated
2635 * page.
2636 */
2637 if (page)
2638 put_page(page);
2639 if (fpin)
2640 fput(fpin);
2641 return ret | VM_FAULT_RETRY;
54cb8821
NP
2642}
2643EXPORT_SYMBOL(filemap_fault);
2644
82b0f8c3 2645void filemap_map_pages(struct vm_fault *vmf,
bae473a4 2646 pgoff_t start_pgoff, pgoff_t end_pgoff)
f1820361 2647{
82b0f8c3 2648 struct file *file = vmf->vma->vm_file;
f1820361 2649 struct address_space *mapping = file->f_mapping;
bae473a4 2650 pgoff_t last_pgoff = start_pgoff;
9ab2594f 2651 unsigned long max_idx;
070e807c 2652 XA_STATE(xas, &mapping->i_pages, start_pgoff);
69bf4b6b 2653 struct page *head, *page;
f1820361
KS
2654
2655 rcu_read_lock();
070e807c
MW
2656 xas_for_each(&xas, page, end_pgoff) {
2657 if (xas_retry(&xas, page))
2658 continue;
2659 if (xa_is_value(page))
2cf938aa 2660 goto next;
f1820361 2661
69bf4b6b
LT
2662 head = compound_head(page);
2663
e0975b2a
MH
2664 /*
2665 * Check for a locked page first, as a speculative
2666 * reference may adversely influence page migration.
2667 */
69bf4b6b 2668 if (PageLocked(head))
e0975b2a 2669 goto next;
69bf4b6b 2670 if (!page_cache_get_speculative(head))
070e807c 2671 goto next;
f1820361 2672
69bf4b6b
LT
2673 /* The page was split under us? */
2674 if (compound_head(page) != head)
2675 goto skip;
2676
2677 /* Has the page moved? */
070e807c
MW
2678 if (unlikely(page != xas_reload(&xas)))
2679 goto skip;
f1820361
KS
2680
2681 if (!PageUptodate(page) ||
2682 PageReadahead(page) ||
2683 PageHWPoison(page))
2684 goto skip;
2685 if (!trylock_page(page))
2686 goto skip;
2687
2688 if (page->mapping != mapping || !PageUptodate(page))
2689 goto unlock;
2690
9ab2594f
MW
2691 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2692 if (page->index >= max_idx)
f1820361
KS
2693 goto unlock;
2694
f1820361
KS
2695 if (file->f_ra.mmap_miss > 0)
2696 file->f_ra.mmap_miss--;
7267ec00 2697
070e807c 2698 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
82b0f8c3 2699 if (vmf->pte)
070e807c
MW
2700 vmf->pte += xas.xa_index - last_pgoff;
2701 last_pgoff = xas.xa_index;
82b0f8c3 2702 if (alloc_set_pte(vmf, NULL, page))
7267ec00 2703 goto unlock;
f1820361
KS
2704 unlock_page(page);
2705 goto next;
2706unlock:
2707 unlock_page(page);
2708skip:
09cbfeaf 2709 put_page(page);
f1820361 2710next:
7267ec00 2711 /* Huge page is mapped? No need to proceed. */
82b0f8c3 2712 if (pmd_trans_huge(*vmf->pmd))
7267ec00 2713 break;
f1820361
KS
2714 }
2715 rcu_read_unlock();
2716}
2717EXPORT_SYMBOL(filemap_map_pages);
2718
2bcd6454 2719vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
4fcf1c62
JK
2720{
2721 struct page *page = vmf->page;
11bac800 2722 struct inode *inode = file_inode(vmf->vma->vm_file);
2bcd6454 2723 vm_fault_t ret = VM_FAULT_LOCKED;
4fcf1c62 2724
14da9200 2725 sb_start_pagefault(inode->i_sb);
11bac800 2726 file_update_time(vmf->vma->vm_file);
4fcf1c62
JK
2727 lock_page(page);
2728 if (page->mapping != inode->i_mapping) {
2729 unlock_page(page);
2730 ret = VM_FAULT_NOPAGE;
2731 goto out;
2732 }
14da9200
JK
2733 /*
2734 * We mark the page dirty already here so that when freeze is in
2735 * progress, we are guaranteed that writeback during freezing will
2736 * see the dirty page and writeprotect it again.
2737 */
2738 set_page_dirty(page);
1d1d1a76 2739 wait_for_stable_page(page);
4fcf1c62 2740out:
14da9200 2741 sb_end_pagefault(inode->i_sb);
4fcf1c62
JK
2742 return ret;
2743}
4fcf1c62 2744
f0f37e2f 2745const struct vm_operations_struct generic_file_vm_ops = {
54cb8821 2746 .fault = filemap_fault,
f1820361 2747 .map_pages = filemap_map_pages,
4fcf1c62 2748 .page_mkwrite = filemap_page_mkwrite,
1da177e4
LT
2749};
2750
2751/* This is used for a general mmap of a disk file */
2752
2753int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2754{
2755 struct address_space *mapping = file->f_mapping;
2756
2757 if (!mapping->a_ops->readpage)
2758 return -ENOEXEC;
2759 file_accessed(file);
2760 vma->vm_ops = &generic_file_vm_ops;
2761 return 0;
2762}
1da177e4
LT
2763
2764/*
2765 * This is for filesystems which do not implement ->writepage.
2766 */
2767int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2768{
2769 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2770 return -EINVAL;
2771 return generic_file_mmap(file, vma);
2772}
2773#else
4b96a37d 2774vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
45397228 2775{
4b96a37d 2776 return VM_FAULT_SIGBUS;
45397228 2777}
1da177e4
LT
2778int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2779{
2780 return -ENOSYS;
2781}
2782int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2783{
2784 return -ENOSYS;
2785}
2786#endif /* CONFIG_MMU */
2787
45397228 2788EXPORT_SYMBOL(filemap_page_mkwrite);
1da177e4
LT
2789EXPORT_SYMBOL(generic_file_mmap);
2790EXPORT_SYMBOL(generic_file_readonly_mmap);
2791
67f9fd91
SL
2792static struct page *wait_on_page_read(struct page *page)
2793{
2794 if (!IS_ERR(page)) {
2795 wait_on_page_locked(page);
2796 if (!PageUptodate(page)) {
09cbfeaf 2797 put_page(page);
67f9fd91
SL
2798 page = ERR_PTR(-EIO);
2799 }
2800 }
2801 return page;
2802}
2803
32b63529 2804static struct page *do_read_cache_page(struct address_space *mapping,
57f6b96c 2805 pgoff_t index,
5e5358e7 2806 int (*filler)(void *, struct page *),
0531b2aa
LT
2807 void *data,
2808 gfp_t gfp)
1da177e4 2809{
eb2be189 2810 struct page *page;
1da177e4
LT
2811 int err;
2812repeat:
2813 page = find_get_page(mapping, index);
2814 if (!page) {
453f85d4 2815 page = __page_cache_alloc(gfp);
eb2be189
NP
2816 if (!page)
2817 return ERR_PTR(-ENOMEM);
e6f67b8c 2818 err = add_to_page_cache_lru(page, mapping, index, gfp);
eb2be189 2819 if (unlikely(err)) {
09cbfeaf 2820 put_page(page);
eb2be189
NP
2821 if (err == -EEXIST)
2822 goto repeat;
22ecdb4f 2823 /* Presumably ENOMEM for xarray node */
1da177e4
LT
2824 return ERR_PTR(err);
2825 }
32b63529
MG
2826
2827filler:
1da177e4
LT
2828 err = filler(data, page);
2829 if (err < 0) {
09cbfeaf 2830 put_page(page);
32b63529 2831 return ERR_PTR(err);
1da177e4 2832 }
1da177e4 2833
32b63529
MG
2834 page = wait_on_page_read(page);
2835 if (IS_ERR(page))
2836 return page;
2837 goto out;
2838 }
1da177e4
LT
2839 if (PageUptodate(page))
2840 goto out;
2841
ebded027
MG
2842 /*
2843 * Page is not up to date and may be locked due one of the following
2844 * case a: Page is being filled and the page lock is held
2845 * case b: Read/write error clearing the page uptodate status
2846 * case c: Truncation in progress (page locked)
2847 * case d: Reclaim in progress
2848 *
2849 * Case a, the page will be up to date when the page is unlocked.
2850 * There is no need to serialise on the page lock here as the page
2851 * is pinned so the lock gives no additional protection. Even if the
2852 * the page is truncated, the data is still valid if PageUptodate as
2853 * it's a race vs truncate race.
2854 * Case b, the page will not be up to date
2855 * Case c, the page may be truncated but in itself, the data may still
2856 * be valid after IO completes as it's a read vs truncate race. The
2857 * operation must restart if the page is not uptodate on unlock but
2858 * otherwise serialising on page lock to stabilise the mapping gives
2859 * no additional guarantees to the caller as the page lock is
2860 * released before return.
2861 * Case d, similar to truncation. If reclaim holds the page lock, it
2862 * will be a race with remove_mapping that determines if the mapping
2863 * is valid on unlock but otherwise the data is valid and there is
2864 * no need to serialise with page lock.
2865 *
2866 * As the page lock gives no additional guarantee, we optimistically
2867 * wait on the page to be unlocked and check if it's up to date and
2868 * use the page if it is. Otherwise, the page lock is required to
2869 * distinguish between the different cases. The motivation is that we
2870 * avoid spurious serialisations and wakeups when multiple processes
2871 * wait on the same page for IO to complete.
2872 */
2873 wait_on_page_locked(page);
2874 if (PageUptodate(page))
2875 goto out;
2876
2877 /* Distinguish between all the cases under the safety of the lock */
1da177e4 2878 lock_page(page);
ebded027
MG
2879
2880 /* Case c or d, restart the operation */
1da177e4
LT
2881 if (!page->mapping) {
2882 unlock_page(page);
09cbfeaf 2883 put_page(page);
32b63529 2884 goto repeat;
1da177e4 2885 }
ebded027
MG
2886
2887 /* Someone else locked and filled the page in a very small window */
1da177e4
LT
2888 if (PageUptodate(page)) {
2889 unlock_page(page);
2890 goto out;
2891 }
32b63529
MG
2892 goto filler;
2893
c855ff37 2894out:
6fe6900e
NP
2895 mark_page_accessed(page);
2896 return page;
2897}
0531b2aa
LT
2898
2899/**
67f9fd91 2900 * read_cache_page - read into page cache, fill it if needed
0531b2aa
LT
2901 * @mapping: the page's address_space
2902 * @index: the page index
2903 * @filler: function to perform the read
5e5358e7 2904 * @data: first arg to filler(data, page) function, often left as NULL
0531b2aa 2905 *
0531b2aa 2906 * Read into the page cache. If a page already exists, and PageUptodate() is
67f9fd91 2907 * not set, try to fill the page and wait for it to become unlocked.
0531b2aa
LT
2908 *
2909 * If the page does not get brought uptodate, return -EIO.
a862f68a
MR
2910 *
2911 * Return: up to date page on success, ERR_PTR() on failure.
0531b2aa 2912 */
67f9fd91 2913struct page *read_cache_page(struct address_space *mapping,
0531b2aa 2914 pgoff_t index,
5e5358e7 2915 int (*filler)(void *, struct page *),
0531b2aa
LT
2916 void *data)
2917{
2918 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2919}
67f9fd91 2920EXPORT_SYMBOL(read_cache_page);
0531b2aa
LT
2921
2922/**
2923 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2924 * @mapping: the page's address_space
2925 * @index: the page index
2926 * @gfp: the page allocator flags to use if allocating
2927 *
2928 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
e6f67b8c 2929 * any new page allocations done using the specified allocation flags.
0531b2aa
LT
2930 *
2931 * If the page does not get brought uptodate, return -EIO.
a862f68a
MR
2932 *
2933 * Return: up to date page on success, ERR_PTR() on failure.
0531b2aa
LT
2934 */
2935struct page *read_cache_page_gfp(struct address_space *mapping,
2936 pgoff_t index,
2937 gfp_t gfp)
2938{
2939 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2940
67f9fd91 2941 return do_read_cache_page(mapping, index, filler, NULL, gfp);
0531b2aa
LT
2942}
2943EXPORT_SYMBOL(read_cache_page_gfp);
2944
9fd91a90
DW
2945/*
2946 * Don't operate on ranges the page cache doesn't support, and don't exceed the
2947 * LFS limits. If pos is under the limit it becomes a short access. If it
2948 * exceeds the limit we return -EFBIG.
2949 */
9fd91a90
DW
2950static int generic_write_check_limits(struct file *file, loff_t pos,
2951 loff_t *count)
2952{
646955cd
AG
2953 struct inode *inode = file->f_mapping->host;
2954 loff_t max_size = inode->i_sb->s_maxbytes;
9fd91a90
DW
2955 loff_t limit = rlimit(RLIMIT_FSIZE);
2956
2957 if (limit != RLIM_INFINITY) {
2958 if (pos >= limit) {
2959 send_sig(SIGXFSZ, current, 0);
2960 return -EFBIG;
2961 }
2962 *count = min(*count, limit - pos);
2963 }
2964
646955cd
AG
2965 if (!(file->f_flags & O_LARGEFILE))
2966 max_size = MAX_NON_LFS;
2967
2968 if (unlikely(pos >= max_size))
2969 return -EFBIG;
2970
2971 *count = min(*count, max_size - pos);
2972
2973 return 0;
9fd91a90
DW
2974}
2975
1da177e4
LT
2976/*
2977 * Performs necessary checks before doing a write
2978 *
485bb99b 2979 * Can adjust writing position or amount of bytes to write.
1da177e4
LT
2980 * Returns appropriate error code that caller should return or
2981 * zero in case that write should be allowed.
2982 */
3309dd04 2983inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
1da177e4 2984{
3309dd04 2985 struct file *file = iocb->ki_filp;
1da177e4 2986 struct inode *inode = file->f_mapping->host;
9fd91a90
DW
2987 loff_t count;
2988 int ret;
1da177e4 2989
3309dd04
AV
2990 if (!iov_iter_count(from))
2991 return 0;
1da177e4 2992
0fa6b005 2993 /* FIXME: this is for backwards compatibility with 2.4 */
2ba48ce5 2994 if (iocb->ki_flags & IOCB_APPEND)
3309dd04 2995 iocb->ki_pos = i_size_read(inode);
1da177e4 2996
6be96d3a
GR
2997 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
2998 return -EINVAL;
2999
9fd91a90
DW
3000 count = iov_iter_count(from);
3001 ret = generic_write_check_limits(file, iocb->ki_pos, &count);
3002 if (ret)
3003 return ret;
1da177e4 3004
9fd91a90 3005 iov_iter_truncate(from, count);
3309dd04 3006 return iov_iter_count(from);
1da177e4
LT
3007}
3008EXPORT_SYMBOL(generic_write_checks);
3009
1383a7ed
DW
3010/*
3011 * Performs necessary checks before doing a clone.
3012 *
646955cd 3013 * Can adjust amount of bytes to clone via @req_count argument.
1383a7ed
DW
3014 * Returns appropriate error code that caller should return or
3015 * zero in case the clone should be allowed.
3016 */
3017int generic_remap_checks(struct file *file_in, loff_t pos_in,
3018 struct file *file_out, loff_t pos_out,
42ec3d4c 3019 loff_t *req_count, unsigned int remap_flags)
1383a7ed
DW
3020{
3021 struct inode *inode_in = file_in->f_mapping->host;
3022 struct inode *inode_out = file_out->f_mapping->host;
3023 uint64_t count = *req_count;
3024 uint64_t bcount;
3025 loff_t size_in, size_out;
3026 loff_t bs = inode_out->i_sb->s_blocksize;
9fd91a90 3027 int ret;
1383a7ed
DW
3028
3029 /* The start of both ranges must be aligned to an fs block. */
3030 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
3031 return -EINVAL;
3032
3033 /* Ensure offsets don't wrap. */
3034 if (pos_in + count < pos_in || pos_out + count < pos_out)
3035 return -EINVAL;
3036
3037 size_in = i_size_read(inode_in);
3038 size_out = i_size_read(inode_out);
3039
3040 /* Dedupe requires both ranges to be within EOF. */
3d28193e 3041 if ((remap_flags & REMAP_FILE_DEDUP) &&
1383a7ed
DW
3042 (pos_in >= size_in || pos_in + count > size_in ||
3043 pos_out >= size_out || pos_out + count > size_out))
3044 return -EINVAL;
3045
3046 /* Ensure the infile range is within the infile. */
3047 if (pos_in >= size_in)
3048 return -EINVAL;
3049 count = min(count, size_in - (uint64_t)pos_in);
3050
9fd91a90
DW
3051 ret = generic_write_check_limits(file_out, pos_out, &count);
3052 if (ret)
3053 return ret;
1da177e4
LT
3054
3055 /*
1383a7ed
DW
3056 * If the user wanted us to link to the infile's EOF, round up to the
3057 * next block boundary for this check.
3058 *
3059 * Otherwise, make sure the count is also block-aligned, having
3060 * already confirmed the starting offsets' block alignment.
1da177e4 3061 */
1383a7ed
DW
3062 if (pos_in + count == size_in) {
3063 bcount = ALIGN(size_in, bs) - pos_in;
3064 } else {
3065 if (!IS_ALIGNED(count, bs))
eca3654e 3066 count = ALIGN_DOWN(count, bs);
1383a7ed 3067 bcount = count;
1da177e4
LT
3068 }
3069
1383a7ed
DW
3070 /* Don't allow overlapped cloning within the same file. */
3071 if (inode_in == inode_out &&
3072 pos_out + bcount > pos_in &&
3073 pos_out < pos_in + bcount)
3074 return -EINVAL;
3075
1da177e4 3076 /*
eca3654e
DW
3077 * We shortened the request but the caller can't deal with that, so
3078 * bounce the request back to userspace.
1da177e4 3079 */
eca3654e 3080 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
1383a7ed 3081 return -EINVAL;
1da177e4 3082
eca3654e 3083 *req_count = count;
1383a7ed 3084 return 0;
1da177e4 3085}
1da177e4 3086
a3171351
AG
3087
3088/*
3089 * Performs common checks before doing a file copy/clone
3090 * from @file_in to @file_out.
3091 */
3092int generic_file_rw_checks(struct file *file_in, struct file *file_out)
3093{
3094 struct inode *inode_in = file_inode(file_in);
3095 struct inode *inode_out = file_inode(file_out);
3096
3097 /* Don't copy dirs, pipes, sockets... */
3098 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
3099 return -EISDIR;
3100 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
3101 return -EINVAL;
3102
3103 if (!(file_in->f_mode & FMODE_READ) ||
3104 !(file_out->f_mode & FMODE_WRITE) ||
3105 (file_out->f_flags & O_APPEND))
3106 return -EBADF;
3107
3108 return 0;
3109}
3110
96e6e8f4
AG
3111/*
3112 * Performs necessary checks before doing a file copy
3113 *
3114 * Can adjust amount of bytes to copy via @req_count argument.
3115 * Returns appropriate error code that caller should return or
3116 * zero in case the copy should be allowed.
3117 */
3118int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
3119 struct file *file_out, loff_t pos_out,
3120 size_t *req_count, unsigned int flags)
3121{
3122 struct inode *inode_in = file_inode(file_in);
3123 struct inode *inode_out = file_inode(file_out);
3124 uint64_t count = *req_count;
3125 loff_t size_in;
3126 int ret;
3127
3128 ret = generic_file_rw_checks(file_in, file_out);
3129 if (ret)
3130 return ret;
3131
3132 /* Don't touch certain kinds of inodes */
3133 if (IS_IMMUTABLE(inode_out))
3134 return -EPERM;
3135
3136 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
3137 return -ETXTBSY;
3138
3139 /* Ensure offsets don't wrap. */
3140 if (pos_in + count < pos_in || pos_out + count < pos_out)
3141 return -EOVERFLOW;
3142
3143 /* Shorten the copy to EOF */
3144 size_in = i_size_read(inode_in);
3145 if (pos_in >= size_in)
3146 count = 0;
3147 else
3148 count = min(count, size_in - (uint64_t)pos_in);
3149
3150 ret = generic_write_check_limits(file_out, pos_out, &count);
3151 if (ret)
3152 return ret;
3153
3154 /* Don't allow overlapped copying within the same file. */
3155 if (inode_in == inode_out &&
3156 pos_out + count > pos_in &&
3157 pos_out < pos_in + count)
3158 return -EINVAL;
3159
3160 *req_count = count;
3161 return 0;
3162}
3163
afddba49
NP
3164int pagecache_write_begin(struct file *file, struct address_space *mapping,
3165 loff_t pos, unsigned len, unsigned flags,
3166 struct page **pagep, void **fsdata)
3167{
3168 const struct address_space_operations *aops = mapping->a_ops;
3169
4e02ed4b 3170 return aops->write_begin(file, mapping, pos, len, flags,
afddba49 3171 pagep, fsdata);
afddba49
NP
3172}
3173EXPORT_SYMBOL(pagecache_write_begin);
3174
3175int pagecache_write_end(struct file *file, struct address_space *mapping,
3176 loff_t pos, unsigned len, unsigned copied,
3177 struct page *page, void *fsdata)
3178{
3179 const struct address_space_operations *aops = mapping->a_ops;
afddba49 3180
4e02ed4b 3181 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
afddba49
NP
3182}
3183EXPORT_SYMBOL(pagecache_write_end);
3184
1da177e4 3185ssize_t
1af5bb49 3186generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
3187{
3188 struct file *file = iocb->ki_filp;
3189 struct address_space *mapping = file->f_mapping;
3190 struct inode *inode = mapping->host;
1af5bb49 3191 loff_t pos = iocb->ki_pos;
1da177e4 3192 ssize_t written;
a969e903
CH
3193 size_t write_len;
3194 pgoff_t end;
1da177e4 3195
0c949334 3196 write_len = iov_iter_count(from);
09cbfeaf 3197 end = (pos + write_len - 1) >> PAGE_SHIFT;
a969e903 3198
6be96d3a
GR
3199 if (iocb->ki_flags & IOCB_NOWAIT) {
3200 /* If there are pages to writeback, return */
3201 if (filemap_range_has_page(inode->i_mapping, pos,
35f12f0f 3202 pos + write_len - 1))
6be96d3a
GR
3203 return -EAGAIN;
3204 } else {
3205 written = filemap_write_and_wait_range(mapping, pos,
3206 pos + write_len - 1);
3207 if (written)
3208 goto out;
3209 }
a969e903
CH
3210
3211 /*
3212 * After a write we want buffered reads to be sure to go to disk to get
3213 * the new data. We invalidate clean cached page from the region we're
3214 * about to write. We do this *before* the write so that we can return
6ccfa806 3215 * without clobbering -EIOCBQUEUED from ->direct_IO().
a969e903 3216 */
55635ba7 3217 written = invalidate_inode_pages2_range(mapping,
09cbfeaf 3218 pos >> PAGE_SHIFT, end);
55635ba7
AR
3219 /*
3220 * If a page can not be invalidated, return 0 to fall back
3221 * to buffered write.
3222 */
3223 if (written) {
3224 if (written == -EBUSY)
3225 return 0;
3226 goto out;
a969e903
CH
3227 }
3228
639a93a5 3229 written = mapping->a_ops->direct_IO(iocb, from);
a969e903
CH
3230
3231 /*
3232 * Finally, try again to invalidate clean pages which might have been
3233 * cached by non-direct readahead, or faulted in by get_user_pages()
3234 * if the source of the write was an mmap'ed region of the file
3235 * we're writing. Either one is a pretty crazy thing to do,
3236 * so we don't support it 100%. If this invalidation
3237 * fails, tough, the write still worked...
332391a9
LC
3238 *
3239 * Most of the time we do not need this since dio_complete() will do
3240 * the invalidation for us. However there are some file systems that
3241 * do not end up with dio_complete() being called, so let's not break
3242 * them by removing it completely
a969e903 3243 */
332391a9
LC
3244 if (mapping->nrpages)
3245 invalidate_inode_pages2_range(mapping,
3246 pos >> PAGE_SHIFT, end);
a969e903 3247
1da177e4 3248 if (written > 0) {
0116651c 3249 pos += written;
639a93a5 3250 write_len -= written;
0116651c
NK
3251 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3252 i_size_write(inode, pos);
1da177e4
LT
3253 mark_inode_dirty(inode);
3254 }
5cb6c6c7 3255 iocb->ki_pos = pos;
1da177e4 3256 }
639a93a5 3257 iov_iter_revert(from, write_len - iov_iter_count(from));
a969e903 3258out:
1da177e4
LT
3259 return written;
3260}
3261EXPORT_SYMBOL(generic_file_direct_write);
3262
eb2be189
NP
3263/*
3264 * Find or create a page at the given pagecache position. Return the locked
3265 * page. This function is specifically for buffered writes.
3266 */
54566b2c
NP
3267struct page *grab_cache_page_write_begin(struct address_space *mapping,
3268 pgoff_t index, unsigned flags)
eb2be189 3269{
eb2be189 3270 struct page *page;
bbddabe2 3271 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
0faa70cb 3272
54566b2c 3273 if (flags & AOP_FLAG_NOFS)
2457aec6
MG
3274 fgp_flags |= FGP_NOFS;
3275
3276 page = pagecache_get_page(mapping, index, fgp_flags,
45f87de5 3277 mapping_gfp_mask(mapping));
c585a267 3278 if (page)
2457aec6 3279 wait_for_stable_page(page);
eb2be189 3280
eb2be189
NP
3281 return page;
3282}
54566b2c 3283EXPORT_SYMBOL(grab_cache_page_write_begin);
eb2be189 3284
3b93f911 3285ssize_t generic_perform_write(struct file *file,
afddba49
NP
3286 struct iov_iter *i, loff_t pos)
3287{
3288 struct address_space *mapping = file->f_mapping;
3289 const struct address_space_operations *a_ops = mapping->a_ops;
3290 long status = 0;
3291 ssize_t written = 0;
674b892e
NP
3292 unsigned int flags = 0;
3293
afddba49
NP
3294 do {
3295 struct page *page;
afddba49
NP
3296 unsigned long offset; /* Offset into pagecache page */
3297 unsigned long bytes; /* Bytes to write to page */
3298 size_t copied; /* Bytes copied from user */
3299 void *fsdata;
3300
09cbfeaf
KS
3301 offset = (pos & (PAGE_SIZE - 1));
3302 bytes = min_t(unsigned long, PAGE_SIZE - offset,
afddba49
NP
3303 iov_iter_count(i));
3304
3305again:
00a3d660
LT
3306 /*
3307 * Bring in the user page that we will copy from _first_.
3308 * Otherwise there's a nasty deadlock on copying from the
3309 * same page as we're writing to, without it being marked
3310 * up-to-date.
3311 *
3312 * Not only is this an optimisation, but it is also required
3313 * to check that the address is actually valid, when atomic
3314 * usercopies are used, below.
3315 */
3316 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
3317 status = -EFAULT;
3318 break;
3319 }
3320
296291cd
JK
3321 if (fatal_signal_pending(current)) {
3322 status = -EINTR;
3323 break;
3324 }
3325
674b892e 3326 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
afddba49 3327 &page, &fsdata);
2457aec6 3328 if (unlikely(status < 0))
afddba49
NP
3329 break;
3330
931e80e4 3331 if (mapping_writably_mapped(mapping))
3332 flush_dcache_page(page);
00a3d660 3333
afddba49 3334 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
afddba49
NP
3335 flush_dcache_page(page);
3336
3337 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3338 page, fsdata);
3339 if (unlikely(status < 0))
3340 break;
3341 copied = status;
3342
3343 cond_resched();
3344
124d3b70 3345 iov_iter_advance(i, copied);
afddba49
NP
3346 if (unlikely(copied == 0)) {
3347 /*
3348 * If we were unable to copy any data at all, we must
3349 * fall back to a single segment length write.
3350 *
3351 * If we didn't fallback here, we could livelock
3352 * because not all segments in the iov can be copied at
3353 * once without a pagefault.
3354 */
09cbfeaf 3355 bytes = min_t(unsigned long, PAGE_SIZE - offset,
afddba49
NP
3356 iov_iter_single_seg_count(i));
3357 goto again;
3358 }
afddba49
NP
3359 pos += copied;
3360 written += copied;
3361
3362 balance_dirty_pages_ratelimited(mapping);
afddba49
NP
3363 } while (iov_iter_count(i));
3364
3365 return written ? written : status;
3366}
3b93f911 3367EXPORT_SYMBOL(generic_perform_write);
1da177e4 3368
e4dd9de3 3369/**
8174202b 3370 * __generic_file_write_iter - write data to a file
e4dd9de3 3371 * @iocb: IO state structure (file, offset, etc.)
8174202b 3372 * @from: iov_iter with data to write
e4dd9de3
JK
3373 *
3374 * This function does all the work needed for actually writing data to a
3375 * file. It does all basic checks, removes SUID from the file, updates
3376 * modification times and calls proper subroutines depending on whether we
3377 * do direct IO or a standard buffered write.
3378 *
3379 * It expects i_mutex to be grabbed unless we work on a block device or similar
3380 * object which does not need locking at all.
3381 *
3382 * This function does *not* take care of syncing data in case of O_SYNC write.
3383 * A caller has to handle it. This is mainly due to the fact that we want to
3384 * avoid syncing under i_mutex.
a862f68a
MR
3385 *
3386 * Return:
3387 * * number of bytes written, even for truncated writes
3388 * * negative error code if no data has been written at all
e4dd9de3 3389 */
8174202b 3390ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
3391{
3392 struct file *file = iocb->ki_filp;
fb5527e6 3393 struct address_space * mapping = file->f_mapping;
1da177e4 3394 struct inode *inode = mapping->host;
3b93f911 3395 ssize_t written = 0;
1da177e4 3396 ssize_t err;
3b93f911 3397 ssize_t status;
1da177e4 3398
1da177e4 3399 /* We can write back this queue in page reclaim */
de1414a6 3400 current->backing_dev_info = inode_to_bdi(inode);
5fa8e0a1 3401 err = file_remove_privs(file);
1da177e4
LT
3402 if (err)
3403 goto out;
3404
c3b2da31
JB
3405 err = file_update_time(file);
3406 if (err)
3407 goto out;
1da177e4 3408
2ba48ce5 3409 if (iocb->ki_flags & IOCB_DIRECT) {
0b8def9d 3410 loff_t pos, endbyte;
fb5527e6 3411
1af5bb49 3412 written = generic_file_direct_write(iocb, from);
1da177e4 3413 /*
fbbbad4b
MW
3414 * If the write stopped short of completing, fall back to
3415 * buffered writes. Some filesystems do this for writes to
3416 * holes, for example. For DAX files, a buffered write will
3417 * not succeed (even if it did, DAX does not handle dirty
3418 * page-cache pages correctly).
1da177e4 3419 */
0b8def9d 3420 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
fbbbad4b
MW
3421 goto out;
3422
0b8def9d 3423 status = generic_perform_write(file, from, pos = iocb->ki_pos);
fb5527e6 3424 /*
3b93f911 3425 * If generic_perform_write() returned a synchronous error
fb5527e6
JM
3426 * then we want to return the number of bytes which were
3427 * direct-written, or the error code if that was zero. Note
3428 * that this differs from normal direct-io semantics, which
3429 * will return -EFOO even if some bytes were written.
3430 */
60bb4529 3431 if (unlikely(status < 0)) {
3b93f911 3432 err = status;
fb5527e6
JM
3433 goto out;
3434 }
fb5527e6
JM
3435 /*
3436 * We need to ensure that the page cache pages are written to
3437 * disk and invalidated to preserve the expected O_DIRECT
3438 * semantics.
3439 */
3b93f911 3440 endbyte = pos + status - 1;
0b8def9d 3441 err = filemap_write_and_wait_range(mapping, pos, endbyte);
fb5527e6 3442 if (err == 0) {
0b8def9d 3443 iocb->ki_pos = endbyte + 1;
3b93f911 3444 written += status;
fb5527e6 3445 invalidate_mapping_pages(mapping,
09cbfeaf
KS
3446 pos >> PAGE_SHIFT,
3447 endbyte >> PAGE_SHIFT);
fb5527e6
JM
3448 } else {
3449 /*
3450 * We don't know how much we wrote, so just return
3451 * the number of bytes which were direct-written
3452 */
3453 }
3454 } else {
0b8def9d
AV
3455 written = generic_perform_write(file, from, iocb->ki_pos);
3456 if (likely(written > 0))
3457 iocb->ki_pos += written;
fb5527e6 3458 }
1da177e4
LT
3459out:
3460 current->backing_dev_info = NULL;
3461 return written ? written : err;
3462}
8174202b 3463EXPORT_SYMBOL(__generic_file_write_iter);
e4dd9de3 3464
e4dd9de3 3465/**
8174202b 3466 * generic_file_write_iter - write data to a file
e4dd9de3 3467 * @iocb: IO state structure
8174202b 3468 * @from: iov_iter with data to write
e4dd9de3 3469 *
8174202b 3470 * This is a wrapper around __generic_file_write_iter() to be used by most
e4dd9de3
JK
3471 * filesystems. It takes care of syncing the file in case of O_SYNC file
3472 * and acquires i_mutex as needed.
a862f68a
MR
3473 * Return:
3474 * * negative error code if no data has been written at all of
3475 * vfs_fsync_range() failed for a synchronous write
3476 * * number of bytes written, even for truncated writes
e4dd9de3 3477 */
8174202b 3478ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
3479{
3480 struct file *file = iocb->ki_filp;
148f948b 3481 struct inode *inode = file->f_mapping->host;
1da177e4 3482 ssize_t ret;
1da177e4 3483
5955102c 3484 inode_lock(inode);
3309dd04
AV
3485 ret = generic_write_checks(iocb, from);
3486 if (ret > 0)
5f380c7f 3487 ret = __generic_file_write_iter(iocb, from);
5955102c 3488 inode_unlock(inode);
1da177e4 3489
e2592217
CH
3490 if (ret > 0)
3491 ret = generic_write_sync(iocb, ret);
1da177e4
LT
3492 return ret;
3493}
8174202b 3494EXPORT_SYMBOL(generic_file_write_iter);
1da177e4 3495
cf9a2ae8
DH
3496/**
3497 * try_to_release_page() - release old fs-specific metadata on a page
3498 *
3499 * @page: the page which the kernel is trying to free
3500 * @gfp_mask: memory allocation flags (and I/O mode)
3501 *
3502 * The address_space is to try to release any data against the page
a862f68a 3503 * (presumably at page->private).
cf9a2ae8 3504 *
266cf658
DH
3505 * This may also be called if PG_fscache is set on a page, indicating that the
3506 * page is known to the local caching routines.
3507 *
cf9a2ae8 3508 * The @gfp_mask argument specifies whether I/O may be performed to release
71baba4b 3509 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
cf9a2ae8 3510 *
a862f68a 3511 * Return: %1 if the release was successful, otherwise return zero.
cf9a2ae8
DH
3512 */
3513int try_to_release_page(struct page *page, gfp_t gfp_mask)
3514{
3515 struct address_space * const mapping = page->mapping;
3516
3517 BUG_ON(!PageLocked(page));
3518 if (PageWriteback(page))
3519 return 0;
3520
3521 if (mapping && mapping->a_ops->releasepage)
3522 return mapping->a_ops->releasepage(page, gfp_mask);
3523 return try_to_free_buffers(page);
3524}
3525
3526EXPORT_SYMBOL(try_to_release_page);