| 1 | /* |
| 2 | * mm/truncate.c - code for taking down pages from address_spaces |
| 3 | * |
| 4 | * Copyright (C) 2002, Linus Torvalds |
| 5 | * |
| 6 | * 10Sep2002 Andrew Morton |
| 7 | * Initial version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/backing-dev.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/swap.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/pagemap.h> |
| 16 | #include <linux/highmem.h> |
| 17 | #include <linux/pagevec.h> |
| 18 | #include <linux/task_io_accounting_ops.h> |
| 19 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
| 20 | do_invalidatepage */ |
| 21 | #include "internal.h" |
| 22 | |
| 23 | |
| 24 | /** |
| 25 | * do_invalidatepage - invalidate part or all of a page |
| 26 | * @page: the page which is affected |
| 27 | * @offset: the index of the truncation point |
| 28 | * |
| 29 | * do_invalidatepage() is called when all or part of the page has become |
| 30 | * invalidated by a truncate operation. |
| 31 | * |
| 32 | * do_invalidatepage() does not have to release all buffers, but it must |
| 33 | * ensure that no dirty buffer is left outside @offset and that no I/O |
| 34 | * is underway against any of the blocks which are outside the truncation |
| 35 | * point. Because the caller is about to free (and possibly reuse) those |
| 36 | * blocks on-disk. |
| 37 | */ |
| 38 | void do_invalidatepage(struct page *page, unsigned long offset) |
| 39 | { |
| 40 | void (*invalidatepage)(struct page *, unsigned long); |
| 41 | invalidatepage = page->mapping->a_ops->invalidatepage; |
| 42 | #ifdef CONFIG_BLOCK |
| 43 | if (!invalidatepage) |
| 44 | invalidatepage = block_invalidatepage; |
| 45 | #endif |
| 46 | if (invalidatepage) |
| 47 | (*invalidatepage)(page, offset); |
| 48 | } |
| 49 | |
| 50 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
| 51 | { |
| 52 | zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
| 53 | if (page_has_private(page)) |
| 54 | do_invalidatepage(page, partial); |
| 55 | } |
| 56 | |
| 57 | /* |
| 58 | * This cancels just the dirty bit on the kernel page itself, it |
| 59 | * does NOT actually remove dirty bits on any mmap's that may be |
| 60 | * around. It also leaves the page tagged dirty, so any sync |
| 61 | * activity will still find it on the dirty lists, and in particular, |
| 62 | * clear_page_dirty_for_io() will still look at the dirty bits in |
| 63 | * the VM. |
| 64 | * |
| 65 | * Doing this should *normally* only ever be done when a page |
| 66 | * is truncated, and is not actually mapped anywhere at all. However, |
| 67 | * fs/buffer.c does this when it notices that somebody has cleaned |
| 68 | * out all the buffers on a page without actually doing it through |
| 69 | * the VM. Can you say "ext3 is horribly ugly"? Tought you could. |
| 70 | */ |
| 71 | void cancel_dirty_page(struct page *page, unsigned int account_size) |
| 72 | { |
| 73 | if (TestClearPageDirty(page)) { |
| 74 | struct address_space *mapping = page->mapping; |
| 75 | if (mapping && mapping_cap_account_dirty(mapping)) { |
| 76 | dec_zone_page_state(page, NR_FILE_DIRTY); |
| 77 | dec_bdi_stat(mapping->backing_dev_info, |
| 78 | BDI_RECLAIMABLE); |
| 79 | if (account_size) |
| 80 | task_io_account_cancelled_write(account_size); |
| 81 | } |
| 82 | } |
| 83 | } |
| 84 | EXPORT_SYMBOL(cancel_dirty_page); |
| 85 | |
| 86 | /* |
| 87 | * If truncate cannot remove the fs-private metadata from the page, the page |
| 88 | * becomes orphaned. It will be left on the LRU and may even be mapped into |
| 89 | * user pagetables if we're racing with filemap_fault(). |
| 90 | * |
| 91 | * We need to bale out if page->mapping is no longer equal to the original |
| 92 | * mapping. This happens a) when the VM reclaimed the page while we waited on |
| 93 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
| 94 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
| 95 | */ |
| 96 | static int |
| 97 | truncate_complete_page(struct address_space *mapping, struct page *page) |
| 98 | { |
| 99 | if (page->mapping != mapping) |
| 100 | return -EIO; |
| 101 | |
| 102 | if (page_has_private(page)) |
| 103 | do_invalidatepage(page, 0); |
| 104 | |
| 105 | cancel_dirty_page(page, PAGE_CACHE_SIZE); |
| 106 | |
| 107 | clear_page_mlock(page); |
| 108 | remove_from_page_cache(page); |
| 109 | ClearPageMappedToDisk(page); |
| 110 | page_cache_release(page); /* pagecache ref */ |
| 111 | return 0; |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * This is for invalidate_mapping_pages(). That function can be called at |
| 116 | * any time, and is not supposed to throw away dirty pages. But pages can |
| 117 | * be marked dirty at any time too, so use remove_mapping which safely |
| 118 | * discards clean, unused pages. |
| 119 | * |
| 120 | * Returns non-zero if the page was successfully invalidated. |
| 121 | */ |
| 122 | static int |
| 123 | invalidate_complete_page(struct address_space *mapping, struct page *page) |
| 124 | { |
| 125 | int ret; |
| 126 | |
| 127 | if (page->mapping != mapping) |
| 128 | return 0; |
| 129 | |
| 130 | if (page_has_private(page) && !try_to_release_page(page, 0)) |
| 131 | return 0; |
| 132 | |
| 133 | clear_page_mlock(page); |
| 134 | ret = remove_mapping(mapping, page); |
| 135 | |
| 136 | return ret; |
| 137 | } |
| 138 | |
| 139 | int truncate_inode_page(struct address_space *mapping, struct page *page) |
| 140 | { |
| 141 | if (page_mapped(page)) { |
| 142 | unmap_mapping_range(mapping, |
| 143 | (loff_t)page->index << PAGE_CACHE_SHIFT, |
| 144 | PAGE_CACHE_SIZE, 0); |
| 145 | } |
| 146 | return truncate_complete_page(mapping, page); |
| 147 | } |
| 148 | |
| 149 | /** |
| 150 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
| 151 | * @mapping: mapping to truncate |
| 152 | * @lstart: offset from which to truncate |
| 153 | * @lend: offset to which to truncate |
| 154 | * |
| 155 | * Truncate the page cache, removing the pages that are between |
| 156 | * specified offsets (and zeroing out partial page |
| 157 | * (if lstart is not page aligned)). |
| 158 | * |
| 159 | * Truncate takes two passes - the first pass is nonblocking. It will not |
| 160 | * block on page locks and it will not block on writeback. The second pass |
| 161 | * will wait. This is to prevent as much IO as possible in the affected region. |
| 162 | * The first pass will remove most pages, so the search cost of the second pass |
| 163 | * is low. |
| 164 | * |
| 165 | * When looking at page->index outside the page lock we need to be careful to |
| 166 | * copy it into a local to avoid races (it could change at any time). |
| 167 | * |
| 168 | * We pass down the cache-hot hint to the page freeing code. Even if the |
| 169 | * mapping is large, it is probably the case that the final pages are the most |
| 170 | * recently touched, and freeing happens in ascending file offset order. |
| 171 | */ |
| 172 | void truncate_inode_pages_range(struct address_space *mapping, |
| 173 | loff_t lstart, loff_t lend) |
| 174 | { |
| 175 | const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; |
| 176 | pgoff_t end; |
| 177 | const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); |
| 178 | struct pagevec pvec; |
| 179 | pgoff_t next; |
| 180 | int i; |
| 181 | |
| 182 | if (mapping->nrpages == 0) |
| 183 | return; |
| 184 | |
| 185 | BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); |
| 186 | end = (lend >> PAGE_CACHE_SHIFT); |
| 187 | |
| 188 | pagevec_init(&pvec, 0); |
| 189 | next = start; |
| 190 | while (next <= end && |
| 191 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
| 192 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 193 | struct page *page = pvec.pages[i]; |
| 194 | pgoff_t page_index = page->index; |
| 195 | |
| 196 | if (page_index > end) { |
| 197 | next = page_index; |
| 198 | break; |
| 199 | } |
| 200 | |
| 201 | if (page_index > next) |
| 202 | next = page_index; |
| 203 | next++; |
| 204 | if (!trylock_page(page)) |
| 205 | continue; |
| 206 | if (PageWriteback(page)) { |
| 207 | unlock_page(page); |
| 208 | continue; |
| 209 | } |
| 210 | truncate_inode_page(mapping, page); |
| 211 | unlock_page(page); |
| 212 | } |
| 213 | pagevec_release(&pvec); |
| 214 | cond_resched(); |
| 215 | } |
| 216 | |
| 217 | if (partial) { |
| 218 | struct page *page = find_lock_page(mapping, start - 1); |
| 219 | if (page) { |
| 220 | wait_on_page_writeback(page); |
| 221 | truncate_partial_page(page, partial); |
| 222 | unlock_page(page); |
| 223 | page_cache_release(page); |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | next = start; |
| 228 | for ( ; ; ) { |
| 229 | cond_resched(); |
| 230 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
| 231 | if (next == start) |
| 232 | break; |
| 233 | next = start; |
| 234 | continue; |
| 235 | } |
| 236 | if (pvec.pages[0]->index > end) { |
| 237 | pagevec_release(&pvec); |
| 238 | break; |
| 239 | } |
| 240 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 241 | struct page *page = pvec.pages[i]; |
| 242 | |
| 243 | if (page->index > end) |
| 244 | break; |
| 245 | lock_page(page); |
| 246 | wait_on_page_writeback(page); |
| 247 | truncate_inode_page(mapping, page); |
| 248 | if (page->index > next) |
| 249 | next = page->index; |
| 250 | next++; |
| 251 | unlock_page(page); |
| 252 | } |
| 253 | pagevec_release(&pvec); |
| 254 | } |
| 255 | } |
| 256 | EXPORT_SYMBOL(truncate_inode_pages_range); |
| 257 | |
| 258 | /** |
| 259 | * truncate_inode_pages - truncate *all* the pages from an offset |
| 260 | * @mapping: mapping to truncate |
| 261 | * @lstart: offset from which to truncate |
| 262 | * |
| 263 | * Called under (and serialised by) inode->i_mutex. |
| 264 | */ |
| 265 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) |
| 266 | { |
| 267 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); |
| 268 | } |
| 269 | EXPORT_SYMBOL(truncate_inode_pages); |
| 270 | |
| 271 | /** |
| 272 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode |
| 273 | * @mapping: the address_space which holds the pages to invalidate |
| 274 | * @start: the offset 'from' which to invalidate |
| 275 | * @end: the offset 'to' which to invalidate (inclusive) |
| 276 | * |
| 277 | * This function only removes the unlocked pages, if you want to |
| 278 | * remove all the pages of one inode, you must call truncate_inode_pages. |
| 279 | * |
| 280 | * invalidate_mapping_pages() will not block on IO activity. It will not |
| 281 | * invalidate pages which are dirty, locked, under writeback or mapped into |
| 282 | * pagetables. |
| 283 | */ |
| 284 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
| 285 | pgoff_t start, pgoff_t end) |
| 286 | { |
| 287 | struct pagevec pvec; |
| 288 | pgoff_t next = start; |
| 289 | unsigned long ret = 0; |
| 290 | int i; |
| 291 | |
| 292 | pagevec_init(&pvec, 0); |
| 293 | while (next <= end && |
| 294 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
| 295 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 296 | struct page *page = pvec.pages[i]; |
| 297 | pgoff_t index; |
| 298 | int lock_failed; |
| 299 | |
| 300 | lock_failed = !trylock_page(page); |
| 301 | |
| 302 | /* |
| 303 | * We really shouldn't be looking at the ->index of an |
| 304 | * unlocked page. But we're not allowed to lock these |
| 305 | * pages. So we rely upon nobody altering the ->index |
| 306 | * of this (pinned-by-us) page. |
| 307 | */ |
| 308 | index = page->index; |
| 309 | if (index > next) |
| 310 | next = index; |
| 311 | next++; |
| 312 | if (lock_failed) |
| 313 | continue; |
| 314 | |
| 315 | if (PageDirty(page) || PageWriteback(page)) |
| 316 | goto unlock; |
| 317 | if (page_mapped(page)) |
| 318 | goto unlock; |
| 319 | ret += invalidate_complete_page(mapping, page); |
| 320 | unlock: |
| 321 | unlock_page(page); |
| 322 | if (next > end) |
| 323 | break; |
| 324 | } |
| 325 | pagevec_release(&pvec); |
| 326 | cond_resched(); |
| 327 | } |
| 328 | return ret; |
| 329 | } |
| 330 | EXPORT_SYMBOL(invalidate_mapping_pages); |
| 331 | |
| 332 | /* |
| 333 | * This is like invalidate_complete_page(), except it ignores the page's |
| 334 | * refcount. We do this because invalidate_inode_pages2() needs stronger |
| 335 | * invalidation guarantees, and cannot afford to leave pages behind because |
| 336 | * shrink_page_list() has a temp ref on them, or because they're transiently |
| 337 | * sitting in the lru_cache_add() pagevecs. |
| 338 | */ |
| 339 | static int |
| 340 | invalidate_complete_page2(struct address_space *mapping, struct page *page) |
| 341 | { |
| 342 | if (page->mapping != mapping) |
| 343 | return 0; |
| 344 | |
| 345 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
| 346 | return 0; |
| 347 | |
| 348 | spin_lock_irq(&mapping->tree_lock); |
| 349 | if (PageDirty(page)) |
| 350 | goto failed; |
| 351 | |
| 352 | clear_page_mlock(page); |
| 353 | BUG_ON(page_has_private(page)); |
| 354 | __remove_from_page_cache(page); |
| 355 | spin_unlock_irq(&mapping->tree_lock); |
| 356 | mem_cgroup_uncharge_cache_page(page); |
| 357 | page_cache_release(page); /* pagecache ref */ |
| 358 | return 1; |
| 359 | failed: |
| 360 | spin_unlock_irq(&mapping->tree_lock); |
| 361 | return 0; |
| 362 | } |
| 363 | |
| 364 | static int do_launder_page(struct address_space *mapping, struct page *page) |
| 365 | { |
| 366 | if (!PageDirty(page)) |
| 367 | return 0; |
| 368 | if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) |
| 369 | return 0; |
| 370 | return mapping->a_ops->launder_page(page); |
| 371 | } |
| 372 | |
| 373 | /** |
| 374 | * invalidate_inode_pages2_range - remove range of pages from an address_space |
| 375 | * @mapping: the address_space |
| 376 | * @start: the page offset 'from' which to invalidate |
| 377 | * @end: the page offset 'to' which to invalidate (inclusive) |
| 378 | * |
| 379 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
| 380 | * invalidation. |
| 381 | * |
| 382 | * Returns -EBUSY if any pages could not be invalidated. |
| 383 | */ |
| 384 | int invalidate_inode_pages2_range(struct address_space *mapping, |
| 385 | pgoff_t start, pgoff_t end) |
| 386 | { |
| 387 | struct pagevec pvec; |
| 388 | pgoff_t next; |
| 389 | int i; |
| 390 | int ret = 0; |
| 391 | int ret2 = 0; |
| 392 | int did_range_unmap = 0; |
| 393 | int wrapped = 0; |
| 394 | |
| 395 | pagevec_init(&pvec, 0); |
| 396 | next = start; |
| 397 | while (next <= end && !wrapped && |
| 398 | pagevec_lookup(&pvec, mapping, next, |
| 399 | min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
| 400 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 401 | struct page *page = pvec.pages[i]; |
| 402 | pgoff_t page_index; |
| 403 | |
| 404 | lock_page(page); |
| 405 | if (page->mapping != mapping) { |
| 406 | unlock_page(page); |
| 407 | continue; |
| 408 | } |
| 409 | page_index = page->index; |
| 410 | next = page_index + 1; |
| 411 | if (next == 0) |
| 412 | wrapped = 1; |
| 413 | if (page_index > end) { |
| 414 | unlock_page(page); |
| 415 | break; |
| 416 | } |
| 417 | wait_on_page_writeback(page); |
| 418 | if (page_mapped(page)) { |
| 419 | if (!did_range_unmap) { |
| 420 | /* |
| 421 | * Zap the rest of the file in one hit. |
| 422 | */ |
| 423 | unmap_mapping_range(mapping, |
| 424 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
| 425 | (loff_t)(end - page_index + 1) |
| 426 | << PAGE_CACHE_SHIFT, |
| 427 | 0); |
| 428 | did_range_unmap = 1; |
| 429 | } else { |
| 430 | /* |
| 431 | * Just zap this page |
| 432 | */ |
| 433 | unmap_mapping_range(mapping, |
| 434 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
| 435 | PAGE_CACHE_SIZE, 0); |
| 436 | } |
| 437 | } |
| 438 | BUG_ON(page_mapped(page)); |
| 439 | ret2 = do_launder_page(mapping, page); |
| 440 | if (ret2 == 0) { |
| 441 | if (!invalidate_complete_page2(mapping, page)) |
| 442 | ret2 = -EBUSY; |
| 443 | } |
| 444 | if (ret2 < 0) |
| 445 | ret = ret2; |
| 446 | unlock_page(page); |
| 447 | } |
| 448 | pagevec_release(&pvec); |
| 449 | cond_resched(); |
| 450 | } |
| 451 | return ret; |
| 452 | } |
| 453 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); |
| 454 | |
| 455 | /** |
| 456 | * invalidate_inode_pages2 - remove all pages from an address_space |
| 457 | * @mapping: the address_space |
| 458 | * |
| 459 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
| 460 | * invalidation. |
| 461 | * |
| 462 | * Returns -EIO if any pages could not be invalidated. |
| 463 | */ |
| 464 | int invalidate_inode_pages2(struct address_space *mapping) |
| 465 | { |
| 466 | return invalidate_inode_pages2_range(mapping, 0, -1); |
| 467 | } |
| 468 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |