mm: Some arch may want to use HPAGE_PMD related values as variables
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 1 Mar 2016 04:15:14 +0000 (09:45 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 3 Mar 2016 10:18:29 +0000 (21:18 +1100)
With next generation power processor, we are having a new mmu model
[1] that require us to maintain a different linux page table format.

Inorder to support both current and future ppc64 systems with a single
kernel we need to make sure kernel can select between different page
table format at runtime. With the new MMU (radix MMU) added, we will
have two different pmd hugepage size 16MB for hash model and 2MB for
Radix model. Hence make HPAGE_PMD related values as a variable.

Actual conversion of HPAGE_PMD to a variable for ppc64 happens in a
followup patch.

[1] http://ibm.biz/power-isa3 (Needs registration).

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/pgtable_64.c
include/linux/bug.h
include/linux/huge_mm.h
mm/huge_memory.c

index af304e6d5a89af1dd162751682060670891d276d..0eb53128ca2a32ef7bd049060ffd69223f8ebcc1 100644 (file)
@@ -817,6 +817,13 @@ pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 
 int has_transparent_hugepage(void)
 {
+
+       BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) >= MAX_ORDER,
+               "hugepages can't be allocated by the buddy allocator");
+
+       BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) < 2,
+                        "We need more than 2 pages to do deferred thp split");
+
        if (!mmu_has_feature(MMU_FTR_16M_PAGE))
                return 0;
        /*
index 7f4818673c41f80d391faee5ea1667435d7bbf84..e51b0709e78dce274a915cf9fbd16c80f70b6b94 100644 (file)
@@ -20,6 +20,7 @@ struct pt_regs;
 #define BUILD_BUG_ON_MSG(cond, msg) (0)
 #define BUILD_BUG_ON(condition) (0)
 #define BUILD_BUG() (0)
+#define MAYBE_BUILD_BUG_ON(cond) (0)
 #else /* __CHECKER__ */
 
 /* Force a compilation error if a constant expression is not a power of 2 */
@@ -83,6 +84,14 @@ struct pt_regs;
  */
 #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
 
+#define MAYBE_BUILD_BUG_ON(cond)                       \
+       do {                                            \
+               if (__builtin_constant_p((cond)))       \
+                       BUILD_BUG_ON(cond);             \
+               else                                    \
+                       BUG_ON(cond);                   \
+       } while (0)
+
 #endif /* __CHECKER__ */
 
 #ifdef CONFIG_GENERIC_BUG
index 459fd25b378e73cfd2e911761ad845076be547de..f12513a20a0623c9931b6587ccce23f81c18991c 100644 (file)
@@ -111,9 +111,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        __split_huge_pmd(__vma, __pmd, __address);      \
        }  while (0)
 
-#if HPAGE_PMD_ORDER >= MAX_ORDER
-#error "hugepages can't be allocated by the buddy allocator"
-#endif
 extern int hugepage_madvise(struct vm_area_struct *vma,
                            unsigned long *vm_flags, int advice);
 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
index aea8f7a42df97d7185f626d5bbc445c64f376eb1..36c22a89df610032b50f275a0c92cd3c45e20d49 100644 (file)
@@ -83,7 +83,7 @@ unsigned long transparent_hugepage_flags __read_mostly =
        (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
 /* default scan 8*512 pte (or vmas) every 30 second */
-static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
+static unsigned int khugepaged_pages_to_scan __read_mostly;
 static unsigned int khugepaged_pages_collapsed;
 static unsigned int khugepaged_full_scans;
 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
@@ -98,7 +98,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  * it would have happened if the vma was large enough during page
  * fault.
  */
-static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
+static unsigned int khugepaged_max_ptes_none __read_mostly;
 
 static int khugepaged(void *none);
 static int khugepaged_slab_init(void);
@@ -660,6 +660,18 @@ static int __init hugepage_init(void)
                return -EINVAL;
        }
 
+       khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
+       khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
+       /*
+        * hugepages can't be allocated by the buddy allocator
+        */
+       MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
+       /*
+        * we use page->mapping and page->index in second tail page
+        * as list_head: assuming THP order >= 2
+        */
+       MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
+
        err = hugepage_init_sysfs(&hugepage_kobj);
        if (err)
                goto err_sysfs;
@@ -764,7 +776,6 @@ void prep_transhuge_page(struct page *page)
         * we use page->mapping and page->indexlru in second tail page
         * as list_head: assuming THP order >= 2
         */
-       BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
 
        INIT_LIST_HEAD(page_deferred_list(page));
        set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);