badpage: remove vma from page_remove_rmap
authorHugh Dickins <hugh@veritas.com>
Tue, 6 Jan 2009 22:40:11 +0000 (14:40 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 6 Jan 2009 23:59:07 +0000 (15:59 -0800)
Remove page_remove_rmap()'s vma arg, which was only for the Eeek message.
And remove the BUG_ON(page_mapcount(page) == 0) from CONFIG_DEBUG_VM's
page_dup_rmap(): we're trying to be more resilient about that than BUGs.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/filemap_xip.c
mm/fremap.c
mm/memory.c
mm/rmap.c

index 3593b18a07dde13c8e2f10a181d602b921aeec57..b35bc0e19cd9a69ee1bedc7e2e98253b40a486be 100644 (file)
@@ -69,7 +69,7 @@ void __anon_vma_link(struct vm_area_struct *);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 void page_add_file_rmap(struct page *);
-void page_remove_rmap(struct page *, struct vm_area_struct *);
+void page_remove_rmap(struct page *);
 
 #ifdef CONFIG_DEBUG_VM
 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
index b5167dfb2f2d65366004963558dfa765643423b8..0c04615651b7e8b412e85cdeee26365ddee7515c 100644 (file)
@@ -193,7 +193,7 @@ retry:
                        /* Nuke the page table entry. */
                        flush_cache_page(vma, address, pte_pfn(*pte));
                        pteval = ptep_clear_flush_notify(vma, address, pte);
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
                        dec_mm_counter(mm, file_rss);
                        BUG_ON(pte_dirty(pteval));
                        pte_unmap_unlock(pte, ptl);
index 7d12ca70ef7bf22c7e1efcdee186adad87a6c8c8..62d5bbda921aa55f49a03507b36cbff10c5f3b69 100644 (file)
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
                if (page) {
                        if (pte_dirty(pte))
                                set_page_dirty(page);
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
                        page_cache_release(page);
                        update_hiwater_rss(mm);
                        dec_mm_counter(mm, file_rss);
index b273cc12b15d38ba4b2c80aee9a5f433deb715ab..0f9abbaf618cb8b21ceb7145b53d955a64f462db 100644 (file)
@@ -798,7 +798,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                        mark_page_accessed(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
                                print_bad_pte(vma, addr, ptent, page);
                        tlb_remove_page(tlb, page);
@@ -2023,7 +2023,7 @@ gotten:
                         * mapcount is visible. So transitively, TLBs to
                         * old page will be flushed before it can be reused.
                         */
-                       page_remove_rmap(old_page, vma);
+                       page_remove_rmap(old_page);
                }
 
                /* Free the old page.. */
index 32098255082ebcde9cbfe9ef98a0a78d44cbd955..ac4af8cffbf952757dfd11df4b5a9789d5d95a1f 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ void page_add_file_rmap(struct page *page)
  */
 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
 {
-       BUG_ON(page_mapcount(page) == 0);
        if (PageAnon(page))
                __page_check_anon_rmap(page, vma, address);
        atomic_inc(&page->_mapcount);
@@ -717,11 +716,10 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
 /**
  * page_remove_rmap - take down pte mapping from a page
  * @page: page to remove mapping from
- * @vma: the vm area in which the mapping is removed
  *
  * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
+void page_remove_rmap(struct page *page)
 {
        if (atomic_add_negative(-1, &page->_mapcount)) {
                /*
@@ -837,7 +835,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                dec_mm_counter(mm, file_rss);
 
 
-       page_remove_rmap(page, vma);
+       page_remove_rmap(page);
        page_cache_release(page);
 
 out_unmap:
@@ -952,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                if (pte_dirty(pteval))
                        set_page_dirty(page);
 
-               page_remove_rmap(page, vma);
+               page_remove_rmap(page);
                page_cache_release(page);
                dec_mm_counter(mm, file_rss);
                (*mapcount)--;