Merge tag 'io_uring-6.16-20250630' of git://git.kernel.dk/linux
[linux-2.6-block.git] / mm / truncate.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * mm/truncate.c - code for taking down pages from address_spaces
4 *
5 * Copyright (C) 2002, Linus Torvalds
6 *
e1f8e874 7 * 10Sep2002 Andrew Morton
1da177e4
LT
8 * Initial version.
9 */
10
11#include <linux/kernel.h>
4af3c9cc 12#include <linux/backing-dev.h>
f9fe48be 13#include <linux/dax.h>
5a0e3ad6 14#include <linux/gfp.h>
1da177e4 15#include <linux/mm.h>
0fd0e6b0 16#include <linux/swap.h>
b95f1b31 17#include <linux/export.h>
1da177e4 18#include <linux/pagemap.h>
01f2705d 19#include <linux/highmem.h>
1da177e4 20#include <linux/pagevec.h>
e08748ce 21#include <linux/task_io_accounting_ops.h>
3a4f8a0b 22#include <linux/shmem_fs.h>
90a80202 23#include <linux/rmap.h>
ba470de4 24#include "internal.h"
1da177e4 25
61c663e0 26static void clear_shadow_entries(struct address_space *mapping,
d3db2c04 27 unsigned long start, unsigned long max)
f2187599 28{
d3db2c04
SB
29 XA_STATE(xas, &mapping->i_pages, start);
30 struct folio *folio;
61c663e0
YZ
31
32 /* Handled by shmem itself, or for DAX we do nothing. */
33 if (shmem_mapping(mapping) || dax_mapping(mapping))
34 return;
35
d3db2c04 36 xas_set_update(&xas, workingset_update_node);
61c663e0 37
d3db2c04
SB
38 spin_lock(&mapping->host->i_lock);
39 xas_lock_irq(&xas);
61c663e0 40
d3db2c04
SB
41 /* Clear all shadow entries from start to max */
42 xas_for_each(&xas, folio, max) {
61c663e0 43 if (xa_is_value(folio))
d3db2c04 44 xas_store(&xas, NULL);
61c663e0
YZ
45 }
46
d3db2c04 47 xas_unlock_irq(&xas);
51b8c1fe
JW
48 if (mapping_shrinkable(mapping))
49 inode_add_lru(mapping->host);
50 spin_unlock(&mapping->host->i_lock);
0cd6144a 51}
1da177e4 52
c6dcf52c 53/*
f2187599 54 * Unconditionally remove exceptional entries. Usually called from truncate
51dcbdac 55 * path. Note that the folio_batch may be altered by this function by removing
1613fac9 56 * exceptional entries similar to what folio_batch_remove_exceptionals() does.
cb8e64be
SB
57 * Please note that indices[] has entries in ascending order as guaranteed by
58 * either find_get_entries() or find_lock_entries().
c6dcf52c 59 */
51dcbdac
MWO
60static void truncate_folio_batch_exceptionals(struct address_space *mapping,
61 struct folio_batch *fbatch, pgoff_t *indices)
c6dcf52c 62{
cb8e64be
SB
63 XA_STATE(xas, &mapping->i_pages, indices[0]);
64 int nr = folio_batch_count(fbatch);
65 struct folio *folio;
f2187599 66 int i, j;
f2187599 67
c6dcf52c
JK
68 /* Handled by shmem itself */
69 if (shmem_mapping(mapping))
70 return;
71
cb8e64be 72 for (j = 0; j < nr; j++)
51dcbdac 73 if (xa_is_value(fbatch->folios[j]))
f2187599
MG
74 break;
75
cb8e64be 76 if (j == nr)
c6dcf52c 77 return;
f2187599 78
cb8e64be
SB
79 if (dax_mapping(mapping)) {
80 for (i = j; i < nr; i++) {
bde708f1
AP
81 if (xa_is_value(fbatch->folios[i])) {
82 /*
83 * File systems should already have called
84 * dax_break_layout_entry() to remove all DAX
85 * entries while holding a lock to prevent
86 * establishing new entries. Therefore we
87 * shouldn't find any here.
88 */
89 WARN_ON_ONCE(1);
90
91 /*
92 * Delete the mapping so truncate_pagecache()
93 * doesn't loop forever.
94 */
cb8e64be 95 dax_delete_mapping_entry(mapping, indices[i]);
bde708f1 96 }
cb8e64be
SB
97 }
98 goto out;
51b8c1fe 99 }
f2187599 100
cb8e64be
SB
101 xas_set(&xas, indices[j]);
102 xas_set_update(&xas, workingset_update_node);
f2187599 103
cb8e64be
SB
104 spin_lock(&mapping->host->i_lock);
105 xas_lock_irq(&xas);
f2187599 106
cb8e64be
SB
107 xas_for_each(&xas, folio, indices[nr-1]) {
108 if (xa_is_value(folio))
109 xas_store(&xas, NULL);
c6dcf52c 110 }
f2187599 111
cb8e64be
SB
112 xas_unlock_irq(&xas);
113 if (mapping_shrinkable(mapping))
114 inode_add_lru(mapping->host);
115 spin_unlock(&mapping->host->i_lock);
116out:
117 folio_batch_remove_exceptionals(fbatch);
0e499ed3
MWO
118}
119
cf9a2ae8 120/**
5ad6b2bd
MWO
121 * folio_invalidate - Invalidate part or all of a folio.
122 * @folio: The folio which is affected.
d47992f8
LC
123 * @offset: start of the range to invalidate
124 * @length: length of the range to invalidate
cf9a2ae8 125 *
5ad6b2bd 126 * folio_invalidate() is called when all or part of the folio has become
cf9a2ae8
DH
127 * invalidated by a truncate operation.
128 *
5ad6b2bd 129 * folio_invalidate() does not have to release all buffers, but it must
cf9a2ae8
DH
130 * ensure that no dirty buffer is left outside @offset and that no I/O
131 * is underway against any of the blocks which are outside the truncation
132 * point. Because the caller is about to free (and possibly reuse) those
133 * blocks on-disk.
134 */
5ad6b2bd 135void folio_invalidate(struct folio *folio, size_t offset, size_t length)
cf9a2ae8 136{
128d1f82 137 const struct address_space_operations *aops = folio->mapping->a_ops;
d47992f8 138
f50015a5 139 if (aops->invalidate_folio)
128d1f82 140 aops->invalidate_folio(folio, offset, length);
cf9a2ae8 141}
5ad6b2bd 142EXPORT_SYMBOL_GPL(folio_invalidate);
cf9a2ae8 143
1da177e4
LT
144/*
145 * If truncate cannot remove the fs-private metadata from the page, the page
62e1c553 146 * becomes orphaned. It will be left on the LRU and may even be mapped into
54cb8821 147 * user pagetables if we're racing with filemap_fault().
1da177e4 148 *
fc3a5ac5 149 * We need to bail out if page->mapping is no longer equal to the original
1da177e4 150 * mapping. This happens a) when the VM reclaimed the page while we waited on
fc0ecff6 151 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4
LT
152 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
153 */
efe99bba 154static void truncate_cleanup_folio(struct folio *folio)
1da177e4 155{
efe99bba 156 if (folio_mapped(folio))
3506659e 157 unmap_mapping_folio(folio);
1da177e4 158
0aa2e1b2 159 if (folio_needs_release(folio))
5ad6b2bd 160 folio_invalidate(folio, 0, folio_size(folio));
1da177e4 161
b9ea2515
KK
162 /*
163 * Some filesystems seem to re-dirty the page even after
164 * the VM has canceled the dirty bit (eg ext3 journaling).
165 * Hence dirty accounting check is placed after invalidation.
166 */
efe99bba 167 folio_cancel_dirty(folio);
1da177e4
LT
168}
169
1e84a3d9 170int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
750b4987 171{
1e84a3d9 172 if (folio->mapping != mapping)
9f4e41f4
JK
173 return -EIO;
174
efe99bba
MWO
175 truncate_cleanup_folio(folio);
176 filemap_remove_folio(folio);
9f4e41f4 177 return 0;
750b4987
NP
178}
179
b9a8a419
MWO
180/*
181 * Handle partial folios. The folio may be entirely within the
182 * range if a split has raced with us. If not, we zero the part of the
183 * folio that's within the [start, end] range, and then split the folio if
184 * it's large. split_page_range() will discard pages which now lie beyond
185 * i_size, and we rely on the caller to discard pages which lie within a
186 * newly created hole.
187 *
188 * Returns false if splitting failed so the caller can avoid
189 * discarding the entire folio which is stubbornly unsplit.
190 */
191bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
192{
193 loff_t pos = folio_pos(folio);
66f28ffb 194 size_t size = folio_size(folio);
b9a8a419 195 unsigned int offset, length;
7460b470 196 struct page *split_at, *split_at2;
b9a8a419
MWO
197
198 if (pos < start)
199 offset = start - pos;
200 else
201 offset = 0;
66f28ffb
ZY
202 if (pos + size <= (u64)end)
203 length = size - offset;
b9a8a419
MWO
204 else
205 length = end + 1 - pos - offset;
206
207 folio_wait_writeback(folio);
66f28ffb 208 if (length == size) {
b9a8a419
MWO
209 truncate_inode_folio(folio->mapping, folio);
210 return true;
211 }
212
213 /*
214 * We may be zeroing pages we're about to discard, but it avoids
215 * doing a complex calculation here, and then doing the zeroing
216 * anyway if the page split fails.
217 */
27e6a24a 218 if (!mapping_inaccessible(folio->mapping))
c72ceafb 219 folio_zero_range(folio, offset, length);
b9a8a419 220
0aa2e1b2 221 if (folio_needs_release(folio))
5ad6b2bd 222 folio_invalidate(folio, offset, length);
b9a8a419
MWO
223 if (!folio_test_large(folio))
224 return true;
7460b470
ZY
225
226 split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
7460b470
ZY
227 if (!try_folio_split(folio, split_at, NULL)) {
228 /*
229 * try to split at offset + length to make sure folios within
230 * the range can be dropped, especially to avoid memory waste
231 * for shmem truncate
232 */
66f28ffb
ZY
233 struct folio *folio2;
234
235 if (offset + length == size)
236 goto no_split;
237
238 split_at2 = folio_page(folio,
239 PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
240 folio2 = page_folio(split_at2);
7460b470
ZY
241
242 if (!folio_try_get(folio2))
243 goto no_split;
244
245 if (!folio_test_large(folio2))
246 goto out;
247
248 if (!folio_trylock(folio2))
249 goto out;
250
251 /*
252 * make sure folio2 is large and does not change its mapping.
253 * Its split result does not matter here.
254 */
255 if (folio_test_large(folio2) &&
256 folio2->mapping == folio->mapping)
257 try_folio_split(folio2, split_at2, NULL);
258
259 folio_unlock(folio2);
260out:
261 folio_put(folio2);
262no_split:
b9a8a419 263 return true;
7460b470 264 }
b9a8a419
MWO
265 if (folio_test_dirty(folio))
266 return false;
267 truncate_inode_folio(folio->mapping, folio);
268 return true;
269}
270
25718736
AK
271/*
272 * Used to get rid of pages on hardware memory corruption.
273 */
af7628d6
MWO
274int generic_error_remove_folio(struct address_space *mapping,
275 struct folio *folio)
25718736
AK
276{
277 if (!mapping)
278 return -EINVAL;
279 /*
280 * Only punch for normal data pages for now.
281 * Handling other types like directories would need more auditing.
282 */
283 if (!S_ISREG(mapping->host->i_mode))
284 return -EIO;
af7628d6 285 return truncate_inode_folio(mapping, folio);
25718736 286}
af7628d6 287EXPORT_SYMBOL(generic_error_remove_folio);
25718736 288
1e12cbb9
MWO
289/**
290 * mapping_evict_folio() - Remove an unused folio from the page-cache.
291 * @mapping: The mapping this folio belongs to.
292 * @folio: The folio to remove.
293 *
294 * Safely remove one folio from the page cache.
295 * It only drops clean, unused folios.
296 *
297 * Context: Folio must be locked.
298 * Return: The number of pages successfully removed.
299 */
300long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
83f78668 301{
1e12cbb9
MWO
302 /* The page may have been truncated before it was locked */
303 if (!mapping)
304 return 0;
44184813 305 if (folio_test_dirty(folio) || folio_test_writeback(folio))
83f78668 306 return 0;
e41c81d0
MWO
307 /* The refcount will be elevated if any page in the folio is mapped */
308 if (folio_ref_count(folio) >
309 folio_nr_pages(folio) + folio_has_private(folio) + 1)
83f78668 310 return 0;
0201ebf2 311 if (!filemap_release_folio(folio, 0))
1b8ddbee
MWO
312 return 0;
313
5100da38 314 return remove_mapping(mapping, folio);
83f78668
WF
315}
316
1da177e4 317/**
73c1e204 318 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
1da177e4
LT
319 * @mapping: mapping to truncate
320 * @lstart: offset from which to truncate
5a720394 321 * @lend: offset to which to truncate (inclusive)
1da177e4 322 *
d7339071 323 * Truncate the page cache, removing the pages that are between
5a720394
LC
324 * specified offsets (and zeroing out partial pages
325 * if lstart or lend + 1 is not page aligned).
1da177e4
LT
326 *
327 * Truncate takes two passes - the first pass is nonblocking. It will not
328 * block on page locks and it will not block on writeback. The second pass
329 * will wait. This is to prevent as much IO as possible in the affected region.
330 * The first pass will remove most pages, so the search cost of the second pass
331 * is low.
332 *
1da177e4
LT
333 * We pass down the cache-hot hint to the page freeing code. Even if the
334 * mapping is large, it is probably the case that the final pages are the most
335 * recently touched, and freeing happens in ascending file offset order.
5a720394 336 *
f50015a5 337 * Note that since ->invalidate_folio() accepts range to invalidate
5a720394
LC
338 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
339 * page aligned properly.
1da177e4 340 */
d7339071
HR
341void truncate_inode_pages_range(struct address_space *mapping,
342 loff_t lstart, loff_t lend)
1da177e4 343{
5a720394
LC
344 pgoff_t start; /* inclusive */
345 pgoff_t end; /* exclusive */
0e499ed3 346 struct folio_batch fbatch;
0cd6144a 347 pgoff_t indices[PAGEVEC_SIZE];
5a720394
LC
348 pgoff_t index;
349 int i;
b9a8a419
MWO
350 struct folio *folio;
351 bool same_folio;
1da177e4 352
7716506a 353 if (mapping_empty(mapping))
0a4ee518 354 return;
1da177e4 355
5a720394
LC
356 /*
357 * 'start' and 'end' always covers the range of pages to be fully
358 * truncated. Partial pages are covered with 'partial_start' at the
359 * start of the range and 'partial_end' at the end of the range.
360 * Note that 'end' is exclusive while 'lend' is inclusive.
361 */
09cbfeaf 362 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
5a720394
LC
363 if (lend == -1)
364 /*
365 * lend == -1 indicates end-of-file so we have to set 'end'
366 * to the highest possible pgoff_t and since the type is
367 * unsigned we're using -1.
368 */
369 end = -1;
370 else
09cbfeaf 371 end = (lend + 1) >> PAGE_SHIFT;
d7339071 372
51dcbdac 373 folio_batch_init(&fbatch);
b85e0eff 374 index = start;
3392ca12 375 while (index < end && find_lock_entries(mapping, &index, end - 1,
51dcbdac 376 &fbatch, indices)) {
51dcbdac
MWO
377 truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
378 for (i = 0; i < folio_batch_count(&fbatch); i++)
379 truncate_cleanup_folio(fbatch.folios[i]);
380 delete_from_page_cache_batch(mapping, &fbatch);
381 for (i = 0; i < folio_batch_count(&fbatch); i++)
382 folio_unlock(fbatch.folios[i]);
383 folio_batch_release(&fbatch);
1da177e4
LT
384 cond_resched();
385 }
5c211ba2 386
b9a8a419
MWO
387 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
388 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
66dabbb6 389 if (!IS_ERR(folio)) {
b9a8a419
MWO
390 same_folio = lend < folio_pos(folio) + folio_size(folio);
391 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
87b11f86 392 start = folio_next_index(folio);
b9a8a419
MWO
393 if (same_folio)
394 end = folio->index;
1da177e4 395 }
b9a8a419
MWO
396 folio_unlock(folio);
397 folio_put(folio);
398 folio = NULL;
1da177e4 399 }
b9a8a419 400
66dabbb6 401 if (!same_folio) {
b9a8a419
MWO
402 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
403 FGP_LOCK, 0);
66dabbb6
CH
404 if (!IS_ERR(folio)) {
405 if (!truncate_inode_partial_folio(folio, lstart, lend))
406 end = folio->index;
407 folio_unlock(folio);
408 folio_put(folio);
409 }
5a720394 410 }
1da177e4 411
b85e0eff 412 index = start;
b9a8a419 413 while (index < end) {
1da177e4 414 cond_resched();
9fb6beea 415 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
38cefeb3 416 indices)) {
792ceaef 417 /* If all gone from start onwards, we're done */
b85e0eff 418 if (index == start)
1da177e4 419 break;
792ceaef 420 /* Otherwise restart to make sure all gone */
b85e0eff 421 index = start;
1da177e4
LT
422 continue;
423 }
f2187599 424
0e499ed3
MWO
425 for (i = 0; i < folio_batch_count(&fbatch); i++) {
426 struct folio *folio = fbatch.folios[i];
1da177e4 427
acc53a0b 428 /* We rely upon deletion not changing folio->index */
b85e0eff 429
0e499ed3 430 if (xa_is_value(folio))
0cd6144a 431 continue;
0cd6144a 432
1e84a3d9 433 folio_lock(folio);
9fb6beea 434 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
1e84a3d9
MWO
435 folio_wait_writeback(folio);
436 truncate_inode_folio(mapping, folio);
437 folio_unlock(folio);
1da177e4 438 }
0e499ed3
MWO
439 truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
440 folio_batch_release(&fbatch);
1da177e4
LT
441 }
442}
d7339071 443EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 444
d7339071
HR
445/**
446 * truncate_inode_pages - truncate *all* the pages from an offset
447 * @mapping: mapping to truncate
448 * @lstart: offset from which to truncate
449 *
730633f0
JK
450 * Called under (and serialised by) inode->i_rwsem and
451 * mapping->invalidate_lock.
08142579
JK
452 *
453 * Note: When this function returns, there can be a page in the process of
6ffcd825 454 * deletion (inside __filemap_remove_folio()) in the specified range. Thus
08142579
JK
455 * mapping->nrpages can be non-zero when this function returns even after
456 * truncation of the whole mapping.
d7339071
HR
457 */
458void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
459{
460 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
461}
1da177e4
LT
462EXPORT_SYMBOL(truncate_inode_pages);
463
91b0abe3
JW
464/**
465 * truncate_inode_pages_final - truncate *all* pages before inode dies
466 * @mapping: mapping to truncate
467 *
9608703e 468 * Called under (and serialized by) inode->i_rwsem.
91b0abe3
JW
469 *
470 * Filesystems have to use this in the .evict_inode path to inform the
471 * VM that this is the final truncate and the inode is going away.
472 */
473void truncate_inode_pages_final(struct address_space *mapping)
474{
91b0abe3
JW
475 /*
476 * Page reclaim can not participate in regular inode lifetime
477 * management (can't call iput()) and thus can race with the
478 * inode teardown. Tell it when the address space is exiting,
479 * so that it does not install eviction information after the
480 * final truncate has begun.
481 */
482 mapping_set_exiting(mapping);
483
7716506a 484 if (!mapping_empty(mapping)) {
91b0abe3
JW
485 /*
486 * As truncation uses a lockless tree lookup, cycle
487 * the tree lock to make sure any ongoing tree
488 * modification that does not see AS_EXITING is
489 * completed before starting the final truncate.
490 */
b93b0163
MW
491 xa_lock_irq(&mapping->i_pages);
492 xa_unlock_irq(&mapping->i_pages);
91b0abe3 493 }
6ff38bd4 494
6ff38bd4 495 truncate_inode_pages(mapping, 0);
91b0abe3
JW
496}
497EXPORT_SYMBOL(truncate_inode_pages_final);
498
c56109dd 499/**
1a0fc811
MWO
500 * mapping_try_invalidate - Invalidate all the evictable folios of one inode
501 * @mapping: the address_space which holds the folios to invalidate
c56109dd
MWO
502 * @start: the offset 'from' which to invalidate
503 * @end: the offset 'to' which to invalidate (inclusive)
1a0fc811 504 * @nr_failed: How many folio invalidations failed
c56109dd 505 *
1a0fc811
MWO
506 * This function is similar to invalidate_mapping_pages(), except that it
507 * returns the number of folios which could not be evicted in @nr_failed.
c56109dd 508 */
1a0fc811
MWO
509unsigned long mapping_try_invalidate(struct address_space *mapping,
510 pgoff_t start, pgoff_t end, unsigned long *nr_failed)
1da177e4 511{
0cd6144a 512 pgoff_t indices[PAGEVEC_SIZE];
51dcbdac 513 struct folio_batch fbatch;
b85e0eff 514 pgoff_t index = start;
31560180
MK
515 unsigned long ret;
516 unsigned long count = 0;
1da177e4
LT
517 int i;
518
51dcbdac 519 folio_batch_init(&fbatch);
3392ca12 520 while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
5f5a3e95 521 bool xa_has_values = false;
d3db2c04
SB
522 int nr = folio_batch_count(&fbatch);
523
524 for (i = 0; i < nr; i++) {
b4545f46 525 struct folio *folio = fbatch.folios[i];
e0f23603 526
b4545f46 527 /* We rely upon deletion not changing folio->index */
e0f23603 528
b4545f46 529 if (xa_is_value(folio)) {
61c663e0
YZ
530 xa_has_values = true;
531 count++;
0cd6144a
JW
532 continue;
533 }
fc127da0 534
b4545f46
MWO
535 ret = mapping_evict_folio(mapping, folio);
536 folio_unlock(folio);
31560180 537 /*
b4545f46 538 * Invalidation is a hint that the folio is no longer
31560180
MK
539 * of interest and try to speed up its reclaim.
540 */
eb1d7a65 541 if (!ret) {
261b6840 542 deactivate_file_folio(folio);
1a0fc811
MWO
543 /* Likely in the lru cache of a remote CPU */
544 if (nr_failed)
545 (*nr_failed)++;
eb1d7a65 546 }
31560180 547 count += ret;
1da177e4 548 }
61c663e0
YZ
549
550 if (xa_has_values)
d3db2c04 551 clear_shadow_entries(mapping, indices[0], indices[nr-1]);
61c663e0 552
51dcbdac
MWO
553 folio_batch_remove_exceptionals(&fbatch);
554 folio_batch_release(&fbatch);
28697355 555 cond_resched();
1da177e4 556 }
31560180 557 return count;
1da177e4 558}
eb1d7a65
YS
559
560/**
7ae12c80
JW
561 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
562 * @mapping: the address_space which holds the cache to invalidate
eb1d7a65
YS
563 * @start: the offset 'from' which to invalidate
564 * @end: the offset 'to' which to invalidate (inclusive)
565 *
7ae12c80
JW
566 * This function removes pages that are clean, unmapped and unlocked,
567 * as well as shadow entries. It will not block on IO activity.
eb1d7a65 568 *
7ae12c80
JW
569 * If you want to remove all the pages of one inode, regardless of
570 * their use and writeback state, use truncate_inode_pages().
eb1d7a65 571 *
1a0fc811 572 * Return: The number of indices that had their contents invalidated
eb1d7a65
YS
573 */
574unsigned long invalidate_mapping_pages(struct address_space *mapping,
575 pgoff_t start, pgoff_t end)
576{
1a0fc811 577 return mapping_try_invalidate(mapping, start, end, NULL);
eb1d7a65 578}
54bc4855 579EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4 580
4a9e2315
JA
581static int folio_launder(struct address_space *mapping, struct folio *folio)
582{
583 if (!folio_test_dirty(folio))
584 return 0;
585 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
586 return 0;
587 return mapping->a_ops->launder_folio(folio);
588}
589
bd4c8ce4 590/*
2033c98c 591 * This is like mapping_evict_folio(), except it ignores the folio's
bd4c8ce4 592 * refcount. We do this because invalidate_inode_pages2() needs stronger
2033c98c 593 * invalidation guarantees, and cannot afford to leave folios behind because
0ba5e806 594 * shrink_folio_list() has a temp ref on them, or because they're transiently
1fec6890 595 * sitting in the folio_add_lru() caches.
bd4c8ce4 596 */
4a9e2315
JA
597int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
598 gfp_t gfp)
bd4c8ce4 599{
4a9e2315
JA
600 int ret;
601
602 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
bd4c8ce4 603
4a9e2315
JA
604 if (folio_mapped(folio))
605 unmap_mapping_folio(folio);
606 BUG_ON(folio_mapped(folio));
607
608 ret = folio_launder(mapping, folio);
609 if (ret)
610 return ret;
611 if (folio->mapping != mapping)
612 return -EBUSY;
613 if (!filemap_release_folio(folio, gfp))
614 return -EBUSY;
bd4c8ce4 615
51b8c1fe 616 spin_lock(&mapping->host->i_lock);
30472509 617 xa_lock_irq(&mapping->i_pages);
78f42660 618 if (folio_test_dirty(folio))
bd4c8ce4
AM
619 goto failed;
620
78f42660
MWO
621 BUG_ON(folio_has_private(folio));
622 __filemap_remove_folio(folio, NULL);
30472509 623 xa_unlock_irq(&mapping->i_pages);
51b8c1fe
JW
624 if (mapping_shrinkable(mapping))
625 inode_add_lru(mapping->host);
626 spin_unlock(&mapping->host->i_lock);
6072d13c 627
78f42660 628 filemap_free_folio(mapping, folio);
bd4c8ce4
AM
629 return 1;
630failed:
30472509 631 xa_unlock_irq(&mapping->i_pages);
51b8c1fe 632 spin_unlock(&mapping->host->i_lock);
4a9e2315 633 return -EBUSY;
e3db7691
TM
634}
635
1da177e4
LT
636/**
637 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 638 * @mapping: the address_space
1da177e4
LT
639 * @start: the page offset 'from' which to invalidate
640 * @end: the page offset 'to' which to invalidate (inclusive)
641 *
642 * Any pages which are found to be mapped into pagetables are unmapped prior to
643 * invalidation.
644 *
a862f68a 645 * Return: -EBUSY if any pages could not be invalidated.
1da177e4
LT
646 */
647int invalidate_inode_pages2_range(struct address_space *mapping,
648 pgoff_t start, pgoff_t end)
649{
0cd6144a 650 pgoff_t indices[PAGEVEC_SIZE];
0e499ed3 651 struct folio_batch fbatch;
b85e0eff 652 pgoff_t index;
1da177e4
LT
653 int i;
654 int ret = 0;
0dd1334f 655 int ret2 = 0;
1da177e4 656 int did_range_unmap = 0;
1da177e4 657
7716506a 658 if (mapping_empty(mapping))
0a4ee518 659 return 0;
32691f0f 660
0e499ed3 661 folio_batch_init(&fbatch);
b85e0eff 662 index = start;
9fb6beea 663 while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
5f5a3e95 664 bool xa_has_values = false;
d3db2c04
SB
665 int nr = folio_batch_count(&fbatch);
666
667 for (i = 0; i < nr; i++) {
0e499ed3 668 struct folio *folio = fbatch.folios[i];
b85e0eff 669
fae9bc4a 670 /* We rely upon deletion not changing folio->index */
1da177e4 671
0e499ed3 672 if (xa_is_value(folio)) {
61c663e0
YZ
673 xa_has_values = true;
674 if (dax_mapping(mapping) &&
675 !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
c6dcf52c 676 ret = -EBUSY;
0cd6144a
JW
677 continue;
678 }
679
fae9bc4a 680 if (!did_range_unmap && folio_mapped(folio)) {
22061a1f 681 /*
fae9bc4a 682 * If folio is mapped, before taking its lock,
22061a1f
HD
683 * zap the rest of the file in one hit.
684 */
9fb6beea
VMO
685 unmap_mapping_pages(mapping, indices[i],
686 (1 + end - indices[i]), false);
22061a1f
HD
687 did_range_unmap = 1;
688 }
689
fae9bc4a 690 folio_lock(folio);
aa5b9178 691 if (unlikely(folio->mapping != mapping)) {
fae9bc4a 692 folio_unlock(folio);
1da177e4
LT
693 continue;
694 }
aa5b9178 695 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
fae9bc4a 696 folio_wait_writeback(folio);
4a9e2315 697 ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
0dd1334f
HH
698 if (ret2 < 0)
699 ret = ret2;
fae9bc4a 700 folio_unlock(folio);
1da177e4 701 }
61c663e0
YZ
702
703 if (xa_has_values)
d3db2c04 704 clear_shadow_entries(mapping, indices[0], indices[nr-1]);
61c663e0 705
0e499ed3
MWO
706 folio_batch_remove_exceptionals(&fbatch);
707 folio_batch_release(&fbatch);
1da177e4
LT
708 cond_resched();
709 }
cd656375 710 /*
69b6c131 711 * For DAX we invalidate page tables after invalidating page cache. We
cd656375
JK
712 * could invalidate page tables while invalidating each entry however
713 * that would be expensive. And doing range unmapping before doesn't
69b6c131 714 * work as we have no cheap way to find whether page cache entry didn't
cd656375
JK
715 * get remapped later.
716 */
717 if (dax_mapping(mapping)) {
977fbdcd 718 unmap_mapping_pages(mapping, start, end - start + 1, false);
cd656375 719 }
1da177e4
LT
720 return ret;
721}
722EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
723
724/**
725 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 726 * @mapping: the address_space
1da177e4
LT
727 *
728 * Any pages which are found to be mapped into pagetables are unmapped prior to
729 * invalidation.
730 *
a862f68a 731 * Return: -EBUSY if any pages could not be invalidated.
1da177e4
LT
732 */
733int invalidate_inode_pages2(struct address_space *mapping)
734{
735 return invalidate_inode_pages2_range(mapping, 0, -1);
736}
737EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
25d9e2d1 738
739/**
740 * truncate_pagecache - unmap and remove pagecache that has been truncated
741 * @inode: inode
8a549bea 742 * @newsize: new file size
25d9e2d1 743 *
744 * inode's new i_size must already be written before truncate_pagecache
745 * is called.
746 *
747 * This function should typically be called before the filesystem
748 * releases resources associated with the freed range (eg. deallocates
749 * blocks). This way, pagecache will always stay logically coherent
750 * with on-disk format, and the filesystem would not have to deal with
751 * situations such as writepage being called for a page that has already
752 * had its underlying blocks deallocated.
753 */
7caef267 754void truncate_pagecache(struct inode *inode, loff_t newsize)
25d9e2d1 755{
cedabed4 756 struct address_space *mapping = inode->i_mapping;
8a549bea 757 loff_t holebegin = round_up(newsize, PAGE_SIZE);
cedabed4
OH
758
759 /*
760 * unmap_mapping_range is called twice, first simply for
761 * efficiency so that truncate_inode_pages does fewer
762 * single-page unmaps. However after this first call, and
763 * before truncate_inode_pages finishes, it is possible for
764 * private pages to be COWed, which remain after
765 * truncate_inode_pages finishes, hence the second
766 * unmap_mapping_range call must be made for correctness.
767 */
8a549bea
HD
768 unmap_mapping_range(mapping, holebegin, 0, 1);
769 truncate_inode_pages(mapping, newsize);
770 unmap_mapping_range(mapping, holebegin, 0, 1);
25d9e2d1 771}
772EXPORT_SYMBOL(truncate_pagecache);
773
2c27c65e
CH
774/**
775 * truncate_setsize - update inode and pagecache for a new file size
776 * @inode: inode
777 * @newsize: new file size
778 *
382e27da
JK
779 * truncate_setsize updates i_size and performs pagecache truncation (if
780 * necessary) to @newsize. It will be typically be called from the filesystem's
781 * setattr function when ATTR_SIZE is passed in.
2c27c65e 782 *
77783d06 783 * Must be called with a lock serializing truncates and writes (generally
9608703e 784 * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
77783d06 785 * specific block truncation has been performed.
2c27c65e
CH
786 */
787void truncate_setsize(struct inode *inode, loff_t newsize)
788{
90a80202
JK
789 loff_t oldsize = inode->i_size;
790
2c27c65e 791 i_size_write(inode, newsize);
90a80202
JK
792 if (newsize > oldsize)
793 pagecache_isize_extended(inode, oldsize, newsize);
7caef267 794 truncate_pagecache(inode, newsize);
2c27c65e
CH
795}
796EXPORT_SYMBOL(truncate_setsize);
797
90a80202
JK
798/**
799 * pagecache_isize_extended - update pagecache after extension of i_size
800 * @inode: inode for which i_size was extended
801 * @from: original inode size
802 * @to: new inode size
803 *
2ebe90da
MWO
804 * Handle extension of inode size either caused by extending truncate or
805 * by write starting after current i_size. We mark the page straddling
806 * current i_size RO so that page_mkwrite() is called on the first
807 * write access to the page. The filesystem will update its per-block
808 * information before user writes to the page via mmap after the i_size
809 * has been changed.
90a80202
JK
810 *
811 * The function must be called after i_size is updated so that page fault
2ebe90da 812 * coming after we unlock the folio will already see the new i_size.
9608703e 813 * The function must be called while we still hold i_rwsem - this not only
90a80202
JK
814 * makes sure i_size is stable but also that userspace cannot observe new
815 * i_size value before we are prepared to store mmap writes at new inode size.
816 */
817void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
818{
93407472 819 int bsize = i_blocksize(inode);
90a80202 820 loff_t rounded_from;
2ebe90da 821 struct folio *folio;
90a80202 822
90a80202
JK
823 WARN_ON(to > inode->i_size);
824
2ebe90da 825 if (from >= to || bsize >= PAGE_SIZE)
90a80202
JK
826 return;
827 /* Page straddling @from will not have any hole block created? */
828 rounded_from = round_up(from, bsize);
09cbfeaf 829 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
90a80202
JK
830 return;
831
2ebe90da
MWO
832 folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
833 /* Folio not cached? Nothing to do */
834 if (IS_ERR(folio))
90a80202
JK
835 return;
836 /*
2ebe90da 837 * See folio_clear_dirty_for_io() for details why folio_mark_dirty()
90a80202
JK
838 * is needed.
839 */
2ebe90da
MWO
840 if (folio_mkclean(folio))
841 folio_mark_dirty(folio);
52aecaee
BF
842
843 /*
844 * The post-eof range of the folio must be zeroed before it is exposed
845 * to the file. Writeback normally does this, but since i_size has been
846 * increased we handle it here.
847 */
848 if (folio_test_dirty(folio)) {
849 unsigned int offset, end;
850
851 offset = from - folio_pos(folio);
852 end = min_t(unsigned int, to - folio_pos(folio),
853 folio_size(folio));
854 folio_zero_segment(folio, offset, end);
855 }
856
2ebe90da
MWO
857 folio_unlock(folio);
858 folio_put(folio);
90a80202
JK
859}
860EXPORT_SYMBOL(pagecache_isize_extended);
861
623e3db9
HD
862/**
863 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
864 * @inode: inode
865 * @lstart: offset of beginning of hole
866 * @lend: offset of last byte of hole
867 *
868 * This function should typically be called before the filesystem
869 * releases resources associated with the freed range (eg. deallocates
870 * blocks). This way, pagecache will always stay logically coherent
871 * with on-disk format, and the filesystem would not have to deal with
872 * situations such as writepage being called for a page that has already
873 * had its underlying blocks deallocated.
874 */
875void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
876{
877 struct address_space *mapping = inode->i_mapping;
878 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
879 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
880 /*
881 * This rounding is currently just for example: unmap_mapping_range
882 * expands its hole outwards, whereas we want it to contract the hole
883 * inwards. However, existing callers of truncate_pagecache_range are
5a720394
LC
884 * doing their own page rounding first. Note that unmap_mapping_range
885 * allows holelen 0 for all, and we allow lend -1 for end of file.
623e3db9
HD
886 */
887
888 /*
889 * Unlike in truncate_pagecache, unmap_mapping_range is called only
890 * once (before truncating pagecache), and without "even_cows" flag:
891 * hole-punching should not remove private COWed pages from the hole.
892 */
893 if ((u64)unmap_end > (u64)unmap_start)
894 unmap_mapping_range(mapping, unmap_start,
895 1 + unmap_end - unmap_start, 0);
896 truncate_inode_pages_range(mapping, lstart, lend);
897}
898EXPORT_SYMBOL(truncate_pagecache_range);