projects
/
linux-2.6-block.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm: loosen MADV_NOHUGEPAGE to enable Qemu postcopy on s390
[linux-2.6-block.git]
/
mm
/
huge_memory.c
diff --git
a/mm/huge_memory.c
b/mm/huge_memory.c
index f5c08b46fef85dd5c6d4247299660a0e1acb0838..62fe06bb7d04bacccc8c2213a6a65134fa274010 100644
(file)
--- a/
mm/huge_memory.c
+++ b/
mm/huge_memory.c
@@
-116,7
+116,7
@@
static void set_recommended_min_free_kbytes(void)
for_each_populated_zone(zone)
nr_zones++;
for_each_populated_zone(zone)
nr_zones++;
- /*
Make sure at least 2 hugepages are free for MIGRATE_RESERVE
*/
+ /*
Ensure 2 pageblocks are free to assist fragmentation avoidance
*/
recommended_min = pageblock_nr_pages * nr_zones * 2;
/*
recommended_min = pageblock_nr_pages * nr_zones * 2;
/*
@@
-151,7
+151,7
@@
static int start_stop_khugepaged(void)
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
- if (
unlikely(IS_ERR(khugepaged_thread)
)) {
+ if (
IS_ERR(khugepaged_thread
)) {
pr_err("khugepaged: kthread_run(khugepaged) failed\n");
err = PTR_ERR(khugepaged_thread);
khugepaged_thread = NULL;
pr_err("khugepaged: kthread_run(khugepaged) failed\n");
err = PTR_ERR(khugepaged_thread);
khugepaged_thread = NULL;
@@
-786,7
+786,7
@@
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
- return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_
WAIT
)) | extra_gfp;
+ return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_
RECLAIM
)) | extra_gfp;
}
/* Caller must hold page table lock. */
}
/* Caller must hold page table lock. */
@@
-1755,8
+1755,7
@@
static void __split_huge_page_refcount(struct page *page,
(1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty);
(1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty);
- /* clear PageTail before overwriting first_page */
- smp_wmb();
+ clear_compound_head(page_tail);
if (page_is_young(page))
set_page_young(page_tail);
if (page_is_young(page))
set_page_young(page_tail);
@@
-2010,7
+2009,7
@@
int hugepage_madvise(struct vm_area_struct *vma,
/*
* Be somewhat over-protective like KSM for now!
*/
/*
* Be somewhat over-protective like KSM for now!
*/
- if (*vm_flags &
(VM_HUGEPAGE | VM_NO_THP)
)
+ if (*vm_flags &
VM_NO_THP
)
return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE;
*vm_flags |= VM_HUGEPAGE;
return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE;
*vm_flags |= VM_HUGEPAGE;
@@
-2026,7
+2025,7
@@
int hugepage_madvise(struct vm_area_struct *vma,
/*
* Be somewhat over-protective like KSM for now!
*/
/*
* Be somewhat over-protective like KSM for now!
*/
- if (*vm_flags &
(VM_NOHUGEPAGE | VM_NO_THP)
)
+ if (*vm_flags &
VM_NO_THP
)
return -EINVAL;
*vm_flags &= ~VM_HUGEPAGE;
*vm_flags |= VM_NOHUGEPAGE;
return -EINVAL;
*vm_flags &= ~VM_HUGEPAGE;
*vm_flags |= VM_NOHUGEPAGE;
@@
-2413,8
+2412,7
@@
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- int node)
+ unsigned long address, int node)
{
VM_BUG_ON_PAGE(*hpage, *hpage);
{
VM_BUG_ON_PAGE(*hpage, *hpage);
@@
-2481,8
+2479,7
@@
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- int node)
+ unsigned long address, int node)
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
@@
-2530,7
+2527,7
@@
static void collapse_huge_page(struct mm_struct *mm,
__GFP_THISNODE;
/* release the mmap_sem read lock. */
__GFP_THISNODE;
/* release the mmap_sem read lock. */
- new_page = khugepaged_alloc_page(hpage, gfp, mm,
vma,
address, node);
+ new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
if (!new_page)
return;
if (!new_page)
return;