arm: define __PAGETABLE_PMD_FOLDED for !LPAE
[linux-2.6-block.git] / mm / hugetlb.c
index 85032de5e20f88bdef36f8fa54b71ae826f53f13..fd28d6ba5e5db3b2d2603ccc31abd3bc894d309b 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/node.h>
 #include "internal.h"
 
-unsigned long hugepages_treat_as_movable;
+int hugepages_treat_as_movable;
 
 int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
@@ -2657,9 +2657,10 @@ again:
                        goto unlock;
 
                /*
-                * HWPoisoned hugepage is already unmapped and dropped reference
+                * Migrating hugepage or HWPoisoned hugepage is already
+                * unmapped and its refcount is dropped, so just clear pte here.
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+               if (unlikely(!pte_present(pte))) {
                        huge_pte_clear(mm, address, ptep);
                        goto unlock;
                }
@@ -3134,6 +3135,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *pagecache_page = NULL;
        struct hstate *h = hstate_vma(vma);
        struct address_space *mapping;
+       int need_wait_lock = 0;
 
        address &= huge_page_mask(h);
 
@@ -3171,6 +3173,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        ret = 0;
 
+       /*
+        * entry could be a migration/hwpoison entry at this point, so this
+        * check prevents the kernel from going below assuming that we have
+        * a active hugepage in pagecache. This goto expects the 2nd page fault,
+        * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
+        * handle it.
+        */
+       if (!pte_present(entry))
+               goto out_mutex;
+
        /*
         * If we are going to COW the mapping later, we examine the pending
         * reservations for this page now. This will ensure that any
@@ -3190,30 +3202,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
+       ptl = huge_pte_lock(h, mm, ptep);
+
+       /* Check for a racing update before calling hugetlb_cow */
+       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+               goto out_ptl;
+
        /*
         * hugetlb_cow() requires page locks of pte_page(entry) and
         * pagecache_page, so here we need take the former one
         * when page != pagecache_page or !pagecache_page.
-        * Note that locking order is always pagecache_page -> page,
-        * so no worry about deadlock.
         */
        page = pte_page(entry);
-       get_page(page);
        if (page != pagecache_page)
-               lock_page(page);
-
-       ptl = huge_pte_lockptr(h, mm, ptep);
-       spin_lock(ptl);
-       /* Check for a racing update before calling hugetlb_cow */
-       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
-               goto out_ptl;
+               if (!trylock_page(page)) {
+                       need_wait_lock = 1;
+                       goto out_ptl;
+               }
 
+       get_page(page);
 
        if (flags & FAULT_FLAG_WRITE) {
                if (!huge_pte_write(entry)) {
                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
                                        pagecache_page, ptl);
-                       goto out_ptl;
+                       goto out_put_page;
                }
                entry = huge_pte_mkdirty(entry);
        }
@@ -3221,7 +3234,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
                                                flags & FAULT_FLAG_WRITE))
                update_mmu_cache(vma, address, ptep);
-
+out_put_page:
+       if (page != pagecache_page)
+               unlock_page(page);
+       put_page(page);
 out_ptl:
        spin_unlock(ptl);
 
@@ -3229,12 +3245,17 @@ out_ptl:
                unlock_page(pagecache_page);
                put_page(pagecache_page);
        }
-       if (page != pagecache_page)
-               unlock_page(page);
-       put_page(page);
-
 out_mutex:
        mutex_unlock(&htlb_fault_mutex_table[hash]);
+       /*
+        * Generally it's safe to hold refcount during waiting page lock. But
+        * here we just wait to defer the next page fault to avoid busy loop and
+        * the page is not used after unlocked before returning from the current
+        * page fault. So we are safe from accessing freed page, even if we wait
+        * here without taking refcount.
+        */
+       if (need_wait_lock)
+               wait_on_page_locked(page);
        return ret;
 }
 
@@ -3364,7 +3385,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                        spin_unlock(ptl);
                        continue;
                }
-               if (!huge_pte_none(huge_ptep_get(ptep))) {
+               pte = huge_ptep_get(ptep);
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+                       spin_unlock(ptl);
+                       continue;
+               }
+               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       swp_entry_t entry = pte_to_swp_entry(pte);
+
+                       if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
+
+                               make_migration_entry_read(&entry);
+                               newpte = swp_entry_to_pte(entry);
+                               set_huge_pte_at(mm, address, ptep, newpte);
+                               pages++;
+                       }
+                       spin_unlock(ptl);
+                       continue;
+               }
+               if (!huge_pte_none(pte)) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(huge_pte_modify(pte, newprot));
                        pte = arch_make_huge_pte(pte, vma, NULL, 0);
@@ -3660,42 +3700,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        return (pte_t *) pmd;
 }
 
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-               pmd_t *pmd, int write)
-{
-       struct page *page;
+#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
 
-       page = pte_page(*(pte_t *)pmd);
-       if (page)
-               page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
-       return page;
+/*
+ * These functions are overwritable if your architecture needs its own
+ * behavior.
+ */
+struct page * __weak
+follow_huge_addr(struct mm_struct *mm, unsigned long address,
+                             int write)
+{
+       return ERR_PTR(-EINVAL);
 }
 
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
-               pud_t *pud, int write)
+struct page * __weak
+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+               pmd_t *pmd, int flags)
 {
-       struct page *page;
-
-       page = pte_page(*(pte_t *)pud);
-       if (page)
-               page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+       struct page *page = NULL;
+       spinlock_t *ptl;
+retry:
+       ptl = pmd_lockptr(mm, pmd);
+       spin_lock(ptl);
+       /*
+        * make sure that the address range covered by this pmd is not
+        * unmapped from other threads.
+        */
+       if (!pmd_huge(*pmd))
+               goto out;
+       if (pmd_present(*pmd)) {
+               page = pte_page(*(pte_t *)pmd) +
+                       ((address & ~PMD_MASK) >> PAGE_SHIFT);
+               if (flags & FOLL_GET)
+                       get_page(page);
+       } else {
+               if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+                       spin_unlock(ptl);
+                       __migration_entry_wait(mm, (pte_t *)pmd, ptl);
+                       goto retry;
+               }
+               /*
+                * hwpoisoned entry is treated as no_page_table in
+                * follow_page_mask().
+                */
+       }
+out:
+       spin_unlock(ptl);
        return page;
 }
 
-#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-
-/* Can be overriden by architectures */
 struct page * __weak
 follow_huge_pud(struct mm_struct *mm, unsigned long address,
-              pud_t *pud, int write)
+               pud_t *pud, int flags)
 {
-       BUG();
-       return NULL;
-}
+       if (flags & FOLL_GET)
+               return NULL;
 
-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+       return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
+}
 
 #ifdef CONFIG_MEMORY_FAILURE