mm/khugepaged: collapse_shmem() remember to clear holes
[linux-2.6-block.git] / mm / truncate.c
CommitLineData
1da177e4
LT
1/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
e1f8e874 6 * 10Sep2002 Andrew Morton
1da177e4
LT
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
4af3c9cc 11#include <linux/backing-dev.h>
f9fe48be 12#include <linux/dax.h>
5a0e3ad6 13#include <linux/gfp.h>
1da177e4 14#include <linux/mm.h>
0fd0e6b0 15#include <linux/swap.h>
b95f1b31 16#include <linux/export.h>
1da177e4 17#include <linux/pagemap.h>
01f2705d 18#include <linux/highmem.h>
1da177e4 19#include <linux/pagevec.h>
e08748ce 20#include <linux/task_io_accounting_ops.h>
1da177e4 21#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 22 do_invalidatepage */
3a4f8a0b 23#include <linux/shmem_fs.h>
c515e1fd 24#include <linux/cleancache.h>
90a80202 25#include <linux/rmap.h>
ba470de4 26#include "internal.h"
1da177e4 27
f2187599
MG
28/*
29 * Regular page slots are stabilized by the page lock even without the tree
30 * itself locked. These unlocked entries need verification under the tree
31 * lock.
32 */
33static inline void __clear_shadow_entry(struct address_space *mapping,
34 pgoff_t index, void *entry)
0cd6144a 35{
69b6c131 36 XA_STATE(xas, &mapping->i_pages, index);
449dd698 37
69b6c131
MW
38 xas_set_update(&xas, workingset_update_node);
39 if (xas_load(&xas) != entry)
f2187599 40 return;
69b6c131 41 xas_store(&xas, NULL);
ac401cc7 42 mapping->nrexceptional--;
f2187599
MG
43}
44
45static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
46 void *entry)
47{
b93b0163 48 xa_lock_irq(&mapping->i_pages);
f2187599 49 __clear_shadow_entry(mapping, index, entry);
b93b0163 50 xa_unlock_irq(&mapping->i_pages);
0cd6144a 51}
1da177e4 52
c6dcf52c 53/*
f2187599
MG
54 * Unconditionally remove exceptional entries. Usually called from truncate
55 * path. Note that the pagevec may be altered by this function by removing
56 * exceptional entries similar to what pagevec_remove_exceptionals does.
c6dcf52c 57 */
f2187599
MG
58static void truncate_exceptional_pvec_entries(struct address_space *mapping,
59 struct pagevec *pvec, pgoff_t *indices,
60 pgoff_t end)
c6dcf52c 61{
f2187599
MG
62 int i, j;
63 bool dax, lock;
64
c6dcf52c
JK
65 /* Handled by shmem itself */
66 if (shmem_mapping(mapping))
67 return;
68
f2187599 69 for (j = 0; j < pagevec_count(pvec); j++)
3159f943 70 if (xa_is_value(pvec->pages[j]))
f2187599
MG
71 break;
72
73 if (j == pagevec_count(pvec))
c6dcf52c 74 return;
f2187599
MG
75
76 dax = dax_mapping(mapping);
77 lock = !dax && indices[j] < end;
78 if (lock)
b93b0163 79 xa_lock_irq(&mapping->i_pages);
f2187599
MG
80
81 for (i = j; i < pagevec_count(pvec); i++) {
82 struct page *page = pvec->pages[i];
83 pgoff_t index = indices[i];
84
3159f943 85 if (!xa_is_value(page)) {
f2187599
MG
86 pvec->pages[j++] = page;
87 continue;
88 }
89
90 if (index >= end)
91 continue;
92
93 if (unlikely(dax)) {
94 dax_delete_mapping_entry(mapping, index);
95 continue;
96 }
97
98 __clear_shadow_entry(mapping, index, page);
c6dcf52c 99 }
f2187599
MG
100
101 if (lock)
b93b0163 102 xa_unlock_irq(&mapping->i_pages);
f2187599 103 pvec->nr = j;
c6dcf52c
JK
104}
105
106/*
107 * Invalidate exceptional entry if easily possible. This handles exceptional
4636e70b 108 * entries for invalidate_inode_pages().
c6dcf52c
JK
109 */
110static int invalidate_exceptional_entry(struct address_space *mapping,
111 pgoff_t index, void *entry)
112{
4636e70b
RZ
113 /* Handled by shmem itself, or for DAX we do nothing. */
114 if (shmem_mapping(mapping) || dax_mapping(mapping))
c6dcf52c 115 return 1;
c6dcf52c
JK
116 clear_shadow_entry(mapping, index, entry);
117 return 1;
118}
119
120/*
121 * Invalidate exceptional entry if clean. This handles exceptional entries for
122 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
123 */
124static int invalidate_exceptional_entry2(struct address_space *mapping,
125 pgoff_t index, void *entry)
126{
127 /* Handled by shmem itself */
128 if (shmem_mapping(mapping))
129 return 1;
130 if (dax_mapping(mapping))
131 return dax_invalidate_mapping_entry_sync(mapping, index);
132 clear_shadow_entry(mapping, index, entry);
133 return 1;
134}
135
cf9a2ae8 136/**
28bc44d7 137 * do_invalidatepage - invalidate part or all of a page
cf9a2ae8 138 * @page: the page which is affected
d47992f8
LC
139 * @offset: start of the range to invalidate
140 * @length: length of the range to invalidate
cf9a2ae8
DH
141 *
142 * do_invalidatepage() is called when all or part of the page has become
143 * invalidated by a truncate operation.
144 *
145 * do_invalidatepage() does not have to release all buffers, but it must
146 * ensure that no dirty buffer is left outside @offset and that no I/O
147 * is underway against any of the blocks which are outside the truncation
148 * point. Because the caller is about to free (and possibly reuse) those
149 * blocks on-disk.
150 */
d47992f8
LC
151void do_invalidatepage(struct page *page, unsigned int offset,
152 unsigned int length)
cf9a2ae8 153{
d47992f8
LC
154 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
155
cf9a2ae8 156 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 157#ifdef CONFIG_BLOCK
cf9a2ae8
DH
158 if (!invalidatepage)
159 invalidatepage = block_invalidatepage;
9361401e 160#endif
cf9a2ae8 161 if (invalidatepage)
d47992f8 162 (*invalidatepage)(page, offset, length);
cf9a2ae8
DH
163}
164
1da177e4
LT
165/*
166 * If truncate cannot remove the fs-private metadata from the page, the page
62e1c553 167 * becomes orphaned. It will be left on the LRU and may even be mapped into
54cb8821 168 * user pagetables if we're racing with filemap_fault().
1da177e4
LT
169 *
170 * We need to bale out if page->mapping is no longer equal to the original
171 * mapping. This happens a) when the VM reclaimed the page while we waited on
fc0ecff6 172 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4
LT
173 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
174 */
9f4e41f4
JK
175static void
176truncate_cleanup_page(struct address_space *mapping, struct page *page)
1da177e4 177{
9f4e41f4 178 if (page_mapped(page)) {
977fbdcd
MW
179 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
180 unmap_mapping_pages(mapping, page->index, nr, false);
9f4e41f4 181 }
1da177e4 182
266cf658 183 if (page_has_private(page))
09cbfeaf 184 do_invalidatepage(page, 0, PAGE_SIZE);
1da177e4 185
b9ea2515
KK
186 /*
187 * Some filesystems seem to re-dirty the page even after
188 * the VM has canceled the dirty bit (eg ext3 journaling).
189 * Hence dirty accounting check is placed after invalidation.
190 */
11f81bec 191 cancel_dirty_page(page);
1da177e4 192 ClearPageMappedToDisk(page);
1da177e4
LT
193}
194
195/*
fc0ecff6 196 * This is for invalidate_mapping_pages(). That function can be called at
1da177e4 197 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
198 * be marked dirty at any time too, so use remove_mapping which safely
199 * discards clean, unused pages.
1da177e4
LT
200 *
201 * Returns non-zero if the page was successfully invalidated.
202 */
203static int
204invalidate_complete_page(struct address_space *mapping, struct page *page)
205{
0fd0e6b0
NP
206 int ret;
207
1da177e4
LT
208 if (page->mapping != mapping)
209 return 0;
210
266cf658 211 if (page_has_private(page) && !try_to_release_page(page, 0))
1da177e4
LT
212 return 0;
213
0fd0e6b0 214 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
215
216 return ret;
1da177e4
LT
217}
218
750b4987
NP
219int truncate_inode_page(struct address_space *mapping, struct page *page)
220{
fc127da0
KS
221 VM_BUG_ON_PAGE(PageTail(page), page);
222
9f4e41f4
JK
223 if (page->mapping != mapping)
224 return -EIO;
225
226 truncate_cleanup_page(mapping, page);
227 delete_from_page_cache(page);
228 return 0;
750b4987
NP
229}
230
25718736
AK
231/*
232 * Used to get rid of pages on hardware memory corruption.
233 */
234int generic_error_remove_page(struct address_space *mapping, struct page *page)
235{
236 if (!mapping)
237 return -EINVAL;
238 /*
239 * Only punch for normal data pages for now.
240 * Handling other types like directories would need more auditing.
241 */
242 if (!S_ISREG(mapping->host->i_mode))
243 return -EIO;
244 return truncate_inode_page(mapping, page);
245}
246EXPORT_SYMBOL(generic_error_remove_page);
247
83f78668
WF
248/*
249 * Safely invalidate one page from its pagecache mapping.
250 * It only drops clean, unused pages. The page must be locked.
251 *
252 * Returns 1 if the page is successfully invalidated, otherwise 0.
253 */
254int invalidate_inode_page(struct page *page)
255{
256 struct address_space *mapping = page_mapping(page);
257 if (!mapping)
258 return 0;
259 if (PageDirty(page) || PageWriteback(page))
260 return 0;
261 if (page_mapped(page))
262 return 0;
263 return invalidate_complete_page(mapping, page);
264}
265
1da177e4 266/**
73c1e204 267 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
1da177e4
LT
268 * @mapping: mapping to truncate
269 * @lstart: offset from which to truncate
5a720394 270 * @lend: offset to which to truncate (inclusive)
1da177e4 271 *
d7339071 272 * Truncate the page cache, removing the pages that are between
5a720394
LC
273 * specified offsets (and zeroing out partial pages
274 * if lstart or lend + 1 is not page aligned).
1da177e4
LT
275 *
276 * Truncate takes two passes - the first pass is nonblocking. It will not
277 * block on page locks and it will not block on writeback. The second pass
278 * will wait. This is to prevent as much IO as possible in the affected region.
279 * The first pass will remove most pages, so the search cost of the second pass
280 * is low.
281 *
1da177e4
LT
282 * We pass down the cache-hot hint to the page freeing code. Even if the
283 * mapping is large, it is probably the case that the final pages are the most
284 * recently touched, and freeing happens in ascending file offset order.
5a720394
LC
285 *
286 * Note that since ->invalidatepage() accepts range to invalidate
287 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
288 * page aligned properly.
1da177e4 289 */
d7339071
HR
290void truncate_inode_pages_range(struct address_space *mapping,
291 loff_t lstart, loff_t lend)
1da177e4 292{
5a720394
LC
293 pgoff_t start; /* inclusive */
294 pgoff_t end; /* exclusive */
295 unsigned int partial_start; /* inclusive */
296 unsigned int partial_end; /* exclusive */
297 struct pagevec pvec;
0cd6144a 298 pgoff_t indices[PAGEVEC_SIZE];
5a720394
LC
299 pgoff_t index;
300 int i;
1da177e4 301
f9fe48be 302 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
34ccb69e 303 goto out;
1da177e4 304
5a720394 305 /* Offsets within partial pages */
09cbfeaf
KS
306 partial_start = lstart & (PAGE_SIZE - 1);
307 partial_end = (lend + 1) & (PAGE_SIZE - 1);
5a720394
LC
308
309 /*
310 * 'start' and 'end' always covers the range of pages to be fully
311 * truncated. Partial pages are covered with 'partial_start' at the
312 * start of the range and 'partial_end' at the end of the range.
313 * Note that 'end' is exclusive while 'lend' is inclusive.
314 */
09cbfeaf 315 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
5a720394
LC
316 if (lend == -1)
317 /*
318 * lend == -1 indicates end-of-file so we have to set 'end'
319 * to the highest possible pgoff_t and since the type is
320 * unsigned we're using -1.
321 */
322 end = -1;
323 else
09cbfeaf 324 end = (lend + 1) >> PAGE_SHIFT;
d7339071 325
86679820 326 pagevec_init(&pvec);
b85e0eff 327 index = start;
0cd6144a
JW
328 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
329 min(end - index, (pgoff_t)PAGEVEC_SIZE),
330 indices)) {
aa65c29c
JK
331 /*
332 * Pagevec array has exceptional entries and we may also fail
333 * to lock some pages. So we store pages that can be deleted
334 * in a new pagevec.
335 */
336 struct pagevec locked_pvec;
337
86679820 338 pagevec_init(&locked_pvec);
1da177e4
LT
339 for (i = 0; i < pagevec_count(&pvec); i++) {
340 struct page *page = pvec.pages[i];
1da177e4 341
b85e0eff 342 /* We rely upon deletion not changing page->index */
0cd6144a 343 index = indices[i];
5a720394 344 if (index >= end)
d7339071 345 break;
d7339071 346
3159f943 347 if (xa_is_value(page))
0cd6144a 348 continue;
0cd6144a 349
529ae9aa 350 if (!trylock_page(page))
1da177e4 351 continue;
5cbc198a 352 WARN_ON(page_to_index(page) != index);
1da177e4
LT
353 if (PageWriteback(page)) {
354 unlock_page(page);
355 continue;
356 }
aa65c29c
JK
357 if (page->mapping != mapping) {
358 unlock_page(page);
359 continue;
360 }
361 pagevec_add(&locked_pvec, page);
1da177e4 362 }
aa65c29c
JK
363 for (i = 0; i < pagevec_count(&locked_pvec); i++)
364 truncate_cleanup_page(mapping, locked_pvec.pages[i]);
365 delete_from_page_cache_batch(mapping, &locked_pvec);
366 for (i = 0; i < pagevec_count(&locked_pvec); i++)
367 unlock_page(locked_pvec.pages[i]);
f2187599 368 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
1da177e4
LT
369 pagevec_release(&pvec);
370 cond_resched();
b85e0eff 371 index++;
1da177e4 372 }
5a720394 373 if (partial_start) {
1da177e4
LT
374 struct page *page = find_lock_page(mapping, start - 1);
375 if (page) {
09cbfeaf 376 unsigned int top = PAGE_SIZE;
5a720394
LC
377 if (start > end) {
378 /* Truncation within a single page */
379 top = partial_end;
380 partial_end = 0;
381 }
1da177e4 382 wait_on_page_writeback(page);
5a720394
LC
383 zero_user_segment(page, partial_start, top);
384 cleancache_invalidate_page(mapping, page);
385 if (page_has_private(page))
386 do_invalidatepage(page, partial_start,
387 top - partial_start);
1da177e4 388 unlock_page(page);
09cbfeaf 389 put_page(page);
1da177e4
LT
390 }
391 }
5a720394
LC
392 if (partial_end) {
393 struct page *page = find_lock_page(mapping, end);
394 if (page) {
395 wait_on_page_writeback(page);
396 zero_user_segment(page, 0, partial_end);
397 cleancache_invalidate_page(mapping, page);
398 if (page_has_private(page))
399 do_invalidatepage(page, 0,
400 partial_end);
401 unlock_page(page);
09cbfeaf 402 put_page(page);
5a720394
LC
403 }
404 }
405 /*
406 * If the truncation happened within a single page no pages
407 * will be released, just zeroed, so we can bail out now.
408 */
409 if (start >= end)
34ccb69e 410 goto out;
1da177e4 411
b85e0eff 412 index = start;
1da177e4
LT
413 for ( ; ; ) {
414 cond_resched();
0cd6144a 415 if (!pagevec_lookup_entries(&pvec, mapping, index,
792ceaef
HD
416 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
417 /* If all gone from start onwards, we're done */
b85e0eff 418 if (index == start)
1da177e4 419 break;
792ceaef 420 /* Otherwise restart to make sure all gone */
b85e0eff 421 index = start;
1da177e4
LT
422 continue;
423 }
0cd6144a 424 if (index == start && indices[0] >= end) {
792ceaef 425 /* All gone out of hole to be punched, we're done */
0cd6144a 426 pagevec_remove_exceptionals(&pvec);
d7339071
HR
427 pagevec_release(&pvec);
428 break;
429 }
f2187599 430
1da177e4
LT
431 for (i = 0; i < pagevec_count(&pvec); i++) {
432 struct page *page = pvec.pages[i];
433
b85e0eff 434 /* We rely upon deletion not changing page->index */
0cd6144a 435 index = indices[i];
792ceaef
HD
436 if (index >= end) {
437 /* Restart punch to make sure all gone */
438 index = start - 1;
d7339071 439 break;
792ceaef 440 }
b85e0eff 441
3159f943 442 if (xa_is_value(page))
0cd6144a 443 continue;
0cd6144a 444
1da177e4 445 lock_page(page);
5cbc198a 446 WARN_ON(page_to_index(page) != index);
1da177e4 447 wait_on_page_writeback(page);
750b4987 448 truncate_inode_page(mapping, page);
1da177e4
LT
449 unlock_page(page);
450 }
f2187599 451 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
1da177e4 452 pagevec_release(&pvec);
b85e0eff 453 index++;
1da177e4 454 }
34ccb69e
AR
455
456out:
3167760f 457 cleancache_invalidate_inode(mapping);
1da177e4 458}
d7339071 459EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 460
d7339071
HR
461/**
462 * truncate_inode_pages - truncate *all* the pages from an offset
463 * @mapping: mapping to truncate
464 * @lstart: offset from which to truncate
465 *
1b1dcc1b 466 * Called under (and serialised by) inode->i_mutex.
08142579
JK
467 *
468 * Note: When this function returns, there can be a page in the process of
469 * deletion (inside __delete_from_page_cache()) in the specified range. Thus
470 * mapping->nrpages can be non-zero when this function returns even after
471 * truncation of the whole mapping.
d7339071
HR
472 */
473void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
474{
475 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
476}
1da177e4
LT
477EXPORT_SYMBOL(truncate_inode_pages);
478
91b0abe3
JW
479/**
480 * truncate_inode_pages_final - truncate *all* pages before inode dies
481 * @mapping: mapping to truncate
482 *
483 * Called under (and serialized by) inode->i_mutex.
484 *
485 * Filesystems have to use this in the .evict_inode path to inform the
486 * VM that this is the final truncate and the inode is going away.
487 */
488void truncate_inode_pages_final(struct address_space *mapping)
489{
f9fe48be 490 unsigned long nrexceptional;
91b0abe3
JW
491 unsigned long nrpages;
492
493 /*
494 * Page reclaim can not participate in regular inode lifetime
495 * management (can't call iput()) and thus can race with the
496 * inode teardown. Tell it when the address space is exiting,
497 * so that it does not install eviction information after the
498 * final truncate has begun.
499 */
500 mapping_set_exiting(mapping);
501
502 /*
503 * When reclaim installs eviction entries, it increases
f9fe48be 504 * nrexceptional first, then decreases nrpages. Make sure we see
91b0abe3
JW
505 * this in the right order or we might miss an entry.
506 */
507 nrpages = mapping->nrpages;
508 smp_rmb();
f9fe48be 509 nrexceptional = mapping->nrexceptional;
91b0abe3 510
f9fe48be 511 if (nrpages || nrexceptional) {
91b0abe3
JW
512 /*
513 * As truncation uses a lockless tree lookup, cycle
514 * the tree lock to make sure any ongoing tree
515 * modification that does not see AS_EXITING is
516 * completed before starting the final truncate.
517 */
b93b0163
MW
518 xa_lock_irq(&mapping->i_pages);
519 xa_unlock_irq(&mapping->i_pages);
91b0abe3 520 }
6ff38bd4
PT
521
522 /*
523 * Cleancache needs notification even if there are no pages or shadow
524 * entries.
525 */
526 truncate_inode_pages(mapping, 0);
91b0abe3
JW
527}
528EXPORT_SYMBOL(truncate_inode_pages_final);
529
28697355
MW
530/**
531 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
532 * @mapping: the address_space which holds the pages to invalidate
533 * @start: the offset 'from' which to invalidate
534 * @end: the offset 'to' which to invalidate (inclusive)
535 *
536 * This function only removes the unlocked pages, if you want to
537 * remove all the pages of one inode, you must call truncate_inode_pages.
538 *
539 * invalidate_mapping_pages() will not block on IO activity. It will not
540 * invalidate pages which are dirty, locked, under writeback or mapped into
541 * pagetables.
542 */
543unsigned long invalidate_mapping_pages(struct address_space *mapping,
31560180 544 pgoff_t start, pgoff_t end)
1da177e4 545{
0cd6144a 546 pgoff_t indices[PAGEVEC_SIZE];
1da177e4 547 struct pagevec pvec;
b85e0eff 548 pgoff_t index = start;
31560180
MK
549 unsigned long ret;
550 unsigned long count = 0;
1da177e4
LT
551 int i;
552
86679820 553 pagevec_init(&pvec);
0cd6144a
JW
554 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
555 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
556 indices)) {
1da177e4
LT
557 for (i = 0; i < pagevec_count(&pvec); i++) {
558 struct page *page = pvec.pages[i];
e0f23603 559
b85e0eff 560 /* We rely upon deletion not changing page->index */
0cd6144a 561 index = indices[i];
b85e0eff
HD
562 if (index > end)
563 break;
e0f23603 564
3159f943 565 if (xa_is_value(page)) {
c6dcf52c
JK
566 invalidate_exceptional_entry(mapping, index,
567 page);
0cd6144a
JW
568 continue;
569 }
570
b85e0eff
HD
571 if (!trylock_page(page))
572 continue;
fc127da0 573
5cbc198a 574 WARN_ON(page_to_index(page) != index);
fc127da0
KS
575
576 /* Middle of THP: skip */
577 if (PageTransTail(page)) {
578 unlock_page(page);
579 continue;
580 } else if (PageTransHuge(page)) {
581 index += HPAGE_PMD_NR - 1;
582 i += HPAGE_PMD_NR - 1;
76b6f9b7
JK
583 /*
584 * 'end' is in the middle of THP. Don't
585 * invalidate the page as the part outside of
586 * 'end' could be still useful.
587 */
588 if (index > end) {
589 unlock_page(page);
fc127da0 590 continue;
76b6f9b7 591 }
fc127da0
KS
592 }
593
31560180 594 ret = invalidate_inode_page(page);
1da177e4 595 unlock_page(page);
31560180
MK
596 /*
597 * Invalidation is a hint that the page is no longer
598 * of interest and try to speed up its reclaim.
599 */
600 if (!ret)
cc5993bd 601 deactivate_file_page(page);
31560180 602 count += ret;
1da177e4 603 }
0cd6144a 604 pagevec_remove_exceptionals(&pvec);
1da177e4 605 pagevec_release(&pvec);
28697355 606 cond_resched();
b85e0eff 607 index++;
1da177e4 608 }
31560180 609 return count;
1da177e4 610}
54bc4855 611EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4 612
bd4c8ce4
AM
613/*
614 * This is like invalidate_complete_page(), except it ignores the page's
615 * refcount. We do this because invalidate_inode_pages2() needs stronger
616 * invalidation guarantees, and cannot afford to leave pages behind because
2706a1b8
AB
617 * shrink_page_list() has a temp ref on them, or because they're transiently
618 * sitting in the lru_cache_add() pagevecs.
bd4c8ce4
AM
619 */
620static int
621invalidate_complete_page2(struct address_space *mapping, struct page *page)
622{
c4843a75
GT
623 unsigned long flags;
624
bd4c8ce4
AM
625 if (page->mapping != mapping)
626 return 0;
627
266cf658 628 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
629 return 0;
630
b93b0163 631 xa_lock_irqsave(&mapping->i_pages, flags);
bd4c8ce4
AM
632 if (PageDirty(page))
633 goto failed;
634
266cf658 635 BUG_ON(page_has_private(page));
62cccb8c 636 __delete_from_page_cache(page, NULL);
b93b0163 637 xa_unlock_irqrestore(&mapping->i_pages, flags);
6072d13c
LT
638
639 if (mapping->a_ops->freepage)
640 mapping->a_ops->freepage(page);
641
09cbfeaf 642 put_page(page); /* pagecache ref */
bd4c8ce4
AM
643 return 1;
644failed:
b93b0163 645 xa_unlock_irqrestore(&mapping->i_pages, flags);
bd4c8ce4
AM
646 return 0;
647}
648
e3db7691
TM
649static int do_launder_page(struct address_space *mapping, struct page *page)
650{
651 if (!PageDirty(page))
652 return 0;
653 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
654 return 0;
655 return mapping->a_ops->launder_page(page);
656}
657
1da177e4
LT
658/**
659 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 660 * @mapping: the address_space
1da177e4
LT
661 * @start: the page offset 'from' which to invalidate
662 * @end: the page offset 'to' which to invalidate (inclusive)
663 *
664 * Any pages which are found to be mapped into pagetables are unmapped prior to
665 * invalidation.
666 *
6ccfa806 667 * Returns -EBUSY if any pages could not be invalidated.
1da177e4
LT
668 */
669int invalidate_inode_pages2_range(struct address_space *mapping,
670 pgoff_t start, pgoff_t end)
671{
0cd6144a 672 pgoff_t indices[PAGEVEC_SIZE];
1da177e4 673 struct pagevec pvec;
b85e0eff 674 pgoff_t index;
1da177e4
LT
675 int i;
676 int ret = 0;
0dd1334f 677 int ret2 = 0;
1da177e4 678 int did_range_unmap = 0;
1da177e4 679
32691f0f 680 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
34ccb69e 681 goto out;
32691f0f 682
86679820 683 pagevec_init(&pvec);
b85e0eff 684 index = start;
0cd6144a
JW
685 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
686 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
687 indices)) {
7b965e08 688 for (i = 0; i < pagevec_count(&pvec); i++) {
1da177e4 689 struct page *page = pvec.pages[i];
b85e0eff
HD
690
691 /* We rely upon deletion not changing page->index */
0cd6144a 692 index = indices[i];
b85e0eff
HD
693 if (index > end)
694 break;
1da177e4 695
3159f943 696 if (xa_is_value(page)) {
c6dcf52c
JK
697 if (!invalidate_exceptional_entry2(mapping,
698 index, page))
699 ret = -EBUSY;
0cd6144a
JW
700 continue;
701 }
702
1da177e4 703 lock_page(page);
5cbc198a 704 WARN_ON(page_to_index(page) != index);
1da177e4
LT
705 if (page->mapping != mapping) {
706 unlock_page(page);
707 continue;
708 }
1da177e4 709 wait_on_page_writeback(page);
d00806b1 710 if (page_mapped(page)) {
1da177e4
LT
711 if (!did_range_unmap) {
712 /*
713 * Zap the rest of the file in one hit.
714 */
977fbdcd
MW
715 unmap_mapping_pages(mapping, index,
716 (1 + end - index), false);
1da177e4
LT
717 did_range_unmap = 1;
718 } else {
719 /*
720 * Just zap this page
721 */
977fbdcd
MW
722 unmap_mapping_pages(mapping, index,
723 1, false);
1da177e4
LT
724 }
725 }
d00806b1 726 BUG_ON(page_mapped(page));
0dd1334f
HH
727 ret2 = do_launder_page(mapping, page);
728 if (ret2 == 0) {
729 if (!invalidate_complete_page2(mapping, page))
6ccfa806 730 ret2 = -EBUSY;
0dd1334f
HH
731 }
732 if (ret2 < 0)
733 ret = ret2;
1da177e4
LT
734 unlock_page(page);
735 }
0cd6144a 736 pagevec_remove_exceptionals(&pvec);
1da177e4
LT
737 pagevec_release(&pvec);
738 cond_resched();
b85e0eff 739 index++;
1da177e4 740 }
cd656375 741 /*
69b6c131 742 * For DAX we invalidate page tables after invalidating page cache. We
cd656375
JK
743 * could invalidate page tables while invalidating each entry however
744 * that would be expensive. And doing range unmapping before doesn't
69b6c131 745 * work as we have no cheap way to find whether page cache entry didn't
cd656375
JK
746 * get remapped later.
747 */
748 if (dax_mapping(mapping)) {
977fbdcd 749 unmap_mapping_pages(mapping, start, end - start + 1, false);
cd656375 750 }
34ccb69e 751out:
3167760f 752 cleancache_invalidate_inode(mapping);
1da177e4
LT
753 return ret;
754}
755EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
756
757/**
758 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 759 * @mapping: the address_space
1da177e4
LT
760 *
761 * Any pages which are found to be mapped into pagetables are unmapped prior to
762 * invalidation.
763 *
e9de25dd 764 * Returns -EBUSY if any pages could not be invalidated.
1da177e4
LT
765 */
766int invalidate_inode_pages2(struct address_space *mapping)
767{
768 return invalidate_inode_pages2_range(mapping, 0, -1);
769}
770EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
25d9e2d1 771
772/**
773 * truncate_pagecache - unmap and remove pagecache that has been truncated
774 * @inode: inode
8a549bea 775 * @newsize: new file size
25d9e2d1 776 *
777 * inode's new i_size must already be written before truncate_pagecache
778 * is called.
779 *
780 * This function should typically be called before the filesystem
781 * releases resources associated with the freed range (eg. deallocates
782 * blocks). This way, pagecache will always stay logically coherent
783 * with on-disk format, and the filesystem would not have to deal with
784 * situations such as writepage being called for a page that has already
785 * had its underlying blocks deallocated.
786 */
7caef267 787void truncate_pagecache(struct inode *inode, loff_t newsize)
25d9e2d1 788{
cedabed4 789 struct address_space *mapping = inode->i_mapping;
8a549bea 790 loff_t holebegin = round_up(newsize, PAGE_SIZE);
cedabed4
OH
791
792 /*
793 * unmap_mapping_range is called twice, first simply for
794 * efficiency so that truncate_inode_pages does fewer
795 * single-page unmaps. However after this first call, and
796 * before truncate_inode_pages finishes, it is possible for
797 * private pages to be COWed, which remain after
798 * truncate_inode_pages finishes, hence the second
799 * unmap_mapping_range call must be made for correctness.
800 */
8a549bea
HD
801 unmap_mapping_range(mapping, holebegin, 0, 1);
802 truncate_inode_pages(mapping, newsize);
803 unmap_mapping_range(mapping, holebegin, 0, 1);
25d9e2d1 804}
805EXPORT_SYMBOL(truncate_pagecache);
806
2c27c65e
CH
807/**
808 * truncate_setsize - update inode and pagecache for a new file size
809 * @inode: inode
810 * @newsize: new file size
811 *
382e27da
JK
812 * truncate_setsize updates i_size and performs pagecache truncation (if
813 * necessary) to @newsize. It will be typically be called from the filesystem's
814 * setattr function when ATTR_SIZE is passed in.
2c27c65e 815 *
77783d06
JK
816 * Must be called with a lock serializing truncates and writes (generally
817 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
818 * specific block truncation has been performed.
2c27c65e
CH
819 */
820void truncate_setsize(struct inode *inode, loff_t newsize)
821{
90a80202
JK
822 loff_t oldsize = inode->i_size;
823
2c27c65e 824 i_size_write(inode, newsize);
90a80202
JK
825 if (newsize > oldsize)
826 pagecache_isize_extended(inode, oldsize, newsize);
7caef267 827 truncate_pagecache(inode, newsize);
2c27c65e
CH
828}
829EXPORT_SYMBOL(truncate_setsize);
830
90a80202
JK
831/**
832 * pagecache_isize_extended - update pagecache after extension of i_size
833 * @inode: inode for which i_size was extended
834 * @from: original inode size
835 * @to: new inode size
836 *
837 * Handle extension of inode size either caused by extending truncate or by
838 * write starting after current i_size. We mark the page straddling current
839 * i_size RO so that page_mkwrite() is called on the nearest write access to
840 * the page. This way filesystem can be sure that page_mkwrite() is called on
841 * the page before user writes to the page via mmap after the i_size has been
842 * changed.
843 *
844 * The function must be called after i_size is updated so that page fault
845 * coming after we unlock the page will already see the new i_size.
846 * The function must be called while we still hold i_mutex - this not only
847 * makes sure i_size is stable but also that userspace cannot observe new
848 * i_size value before we are prepared to store mmap writes at new inode size.
849 */
850void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
851{
93407472 852 int bsize = i_blocksize(inode);
90a80202
JK
853 loff_t rounded_from;
854 struct page *page;
855 pgoff_t index;
856
90a80202
JK
857 WARN_ON(to > inode->i_size);
858
09cbfeaf 859 if (from >= to || bsize == PAGE_SIZE)
90a80202
JK
860 return;
861 /* Page straddling @from will not have any hole block created? */
862 rounded_from = round_up(from, bsize);
09cbfeaf 863 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
90a80202
JK
864 return;
865
09cbfeaf 866 index = from >> PAGE_SHIFT;
90a80202
JK
867 page = find_lock_page(inode->i_mapping, index);
868 /* Page not cached? Nothing to do */
869 if (!page)
870 return;
871 /*
872 * See clear_page_dirty_for_io() for details why set_page_dirty()
873 * is needed.
874 */
875 if (page_mkclean(page))
876 set_page_dirty(page);
877 unlock_page(page);
09cbfeaf 878 put_page(page);
90a80202
JK
879}
880EXPORT_SYMBOL(pagecache_isize_extended);
881
623e3db9
HD
882/**
883 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
884 * @inode: inode
885 * @lstart: offset of beginning of hole
886 * @lend: offset of last byte of hole
887 *
888 * This function should typically be called before the filesystem
889 * releases resources associated with the freed range (eg. deallocates
890 * blocks). This way, pagecache will always stay logically coherent
891 * with on-disk format, and the filesystem would not have to deal with
892 * situations such as writepage being called for a page that has already
893 * had its underlying blocks deallocated.
894 */
895void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
896{
897 struct address_space *mapping = inode->i_mapping;
898 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
899 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
900 /*
901 * This rounding is currently just for example: unmap_mapping_range
902 * expands its hole outwards, whereas we want it to contract the hole
903 * inwards. However, existing callers of truncate_pagecache_range are
5a720394
LC
904 * doing their own page rounding first. Note that unmap_mapping_range
905 * allows holelen 0 for all, and we allow lend -1 for end of file.
623e3db9
HD
906 */
907
908 /*
909 * Unlike in truncate_pagecache, unmap_mapping_range is called only
910 * once (before truncating pagecache), and without "even_cows" flag:
911 * hole-punching should not remove private COWed pages from the hole.
912 */
913 if ((u64)unmap_end > (u64)unmap_start)
914 unmap_mapping_range(mapping, unmap_start,
915 1 + unmap_end - unmap_start, 0);
916 truncate_inode_pages_range(mapping, lstart, lend);
917}
918EXPORT_SYMBOL(truncate_pagecache_range);