for_each_populated_zone(zone)
nr_zones++;
- /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
+ /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
recommended_min = pageblock_nr_pages * nr_zones * 2;
/*
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
- return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
+ return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
}
/* Caller must hold page table lock. */
(1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty);
- /* clear PageTail before overwriting first_page */
- smp_wmb();
+ clear_compound_head(page_tail);
if (page_is_young(page))
set_page_young(page_tail);
static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- int node)
+ unsigned long address, int node)
{
VM_BUG_ON_PAGE(*hpage, *hpage);
static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- int node)
+ unsigned long address, int node)
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
__GFP_THISNODE;
/* release the mmap_sem read lock. */
- new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
+ new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
if (!new_page)
return;