2 * linux/mm/compaction.c
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10 #include <linux/cpu.h>
11 #include <linux/swap.h>
12 #include <linux/migrate.h>
13 #include <linux/compaction.h>
14 #include <linux/mm_inline.h>
15 #include <linux/backing-dev.h>
16 #include <linux/sysctl.h>
17 #include <linux/sysfs.h>
18 #include <linux/page-isolation.h>
19 #include <linux/kasan.h>
20 #include <linux/kthread.h>
21 #include <linux/freezer.h>
24 #ifdef CONFIG_COMPACTION
25 static inline void count_compact_event(enum vm_event_item item)
30 static inline void count_compact_events(enum vm_event_item item, long delta)
32 count_vm_events(item, delta);
35 #define count_compact_event(item) do { } while (0)
36 #define count_compact_events(item, delta) do { } while (0)
39 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/compaction.h>
44 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
45 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
46 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
47 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
49 static unsigned long release_freepages(struct list_head *freelist)
51 struct page *page, *next;
52 unsigned long high_pfn = 0;
54 list_for_each_entry_safe(page, next, freelist, lru) {
55 unsigned long pfn = page_to_pfn(page);
65 static void map_pages(struct list_head *list)
67 unsigned int i, order, nr_pages;
68 struct page *page, *next;
71 list_for_each_entry_safe(page, next, list, lru) {
74 order = page_private(page);
75 nr_pages = 1 << order;
76 set_page_private(page, 0);
77 set_page_refcounted(page);
79 arch_alloc_page(page, order);
80 kernel_map_pages(page, nr_pages, 1);
81 kasan_alloc_pages(page, order);
83 split_page(page, order);
85 for (i = 0; i < nr_pages; i++) {
86 list_add(&page->lru, &tmp_list);
91 list_splice(&tmp_list, list);
94 static inline bool migrate_async_suitable(int migratetype)
96 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
99 #ifdef CONFIG_COMPACTION
101 int PageMovable(struct page *page)
103 struct address_space *mapping;
105 VM_BUG_ON_PAGE(!PageLocked(page), page);
106 if (!__PageMovable(page))
109 mapping = page_mapping(page);
110 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
115 EXPORT_SYMBOL(PageMovable);
117 void __SetPageMovable(struct page *page, struct address_space *mapping)
119 VM_BUG_ON_PAGE(!PageLocked(page), page);
120 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
121 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
123 EXPORT_SYMBOL(__SetPageMovable);
125 void __ClearPageMovable(struct page *page)
127 VM_BUG_ON_PAGE(!PageLocked(page), page);
128 VM_BUG_ON_PAGE(!PageMovable(page), page);
130 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
131 * flag so that VM can catch up released page by driver after isolation.
132 * With it, VM migration doesn't try to put it back.
134 page->mapping = (void *)((unsigned long)page->mapping &
135 PAGE_MAPPING_MOVABLE);
137 EXPORT_SYMBOL(__ClearPageMovable);
139 /* Do not skip compaction more than 64 times */
140 #define COMPACT_MAX_DEFER_SHIFT 6
143 * Compaction is deferred when compaction fails to result in a page
144 * allocation success. 1 << compact_defer_limit compactions are skipped up
145 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
147 void defer_compaction(struct zone *zone, int order)
149 zone->compact_considered = 0;
150 zone->compact_defer_shift++;
152 if (order < zone->compact_order_failed)
153 zone->compact_order_failed = order;
155 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
156 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
158 trace_mm_compaction_defer_compaction(zone, order);
161 /* Returns true if compaction should be skipped this time */
162 bool compaction_deferred(struct zone *zone, int order)
164 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
166 if (order < zone->compact_order_failed)
169 /* Avoid possible overflow */
170 if (++zone->compact_considered > defer_limit)
171 zone->compact_considered = defer_limit;
173 if (zone->compact_considered >= defer_limit)
176 trace_mm_compaction_deferred(zone, order);
182 * Update defer tracking counters after successful compaction of given order,
183 * which means an allocation either succeeded (alloc_success == true) or is
184 * expected to succeed.
186 void compaction_defer_reset(struct zone *zone, int order,
190 zone->compact_considered = 0;
191 zone->compact_defer_shift = 0;
193 if (order >= zone->compact_order_failed)
194 zone->compact_order_failed = order + 1;
196 trace_mm_compaction_defer_reset(zone, order);
199 /* Returns true if restarting compaction after many failures */
200 bool compaction_restarting(struct zone *zone, int order)
202 if (order < zone->compact_order_failed)
205 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
206 zone->compact_considered >= 1UL << zone->compact_defer_shift;
209 /* Returns true if the pageblock should be scanned for pages to isolate. */
210 static inline bool isolation_suitable(struct compact_control *cc,
213 if (cc->ignore_skip_hint)
216 return !get_pageblock_skip(page);
219 static void reset_cached_positions(struct zone *zone)
221 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
222 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
223 zone->compact_cached_free_pfn =
224 pageblock_start_pfn(zone_end_pfn(zone) - 1);
228 * This function is called to clear all cached information on pageblocks that
229 * should be skipped for page isolation when the migrate and free page scanner
232 static void __reset_isolation_suitable(struct zone *zone)
234 unsigned long start_pfn = zone->zone_start_pfn;
235 unsigned long end_pfn = zone_end_pfn(zone);
238 zone->compact_blockskip_flush = false;
240 /* Walk the zone and mark every pageblock as suitable for isolation */
241 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
249 page = pfn_to_page(pfn);
250 if (zone != page_zone(page))
253 clear_pageblock_skip(page);
256 reset_cached_positions(zone);
259 void reset_isolation_suitable(pg_data_t *pgdat)
263 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
264 struct zone *zone = &pgdat->node_zones[zoneid];
265 if (!populated_zone(zone))
268 /* Only flush if a full compaction finished recently */
269 if (zone->compact_blockskip_flush)
270 __reset_isolation_suitable(zone);
275 * If no pages were isolated then mark this pageblock to be skipped in the
276 * future. The information is later cleared by __reset_isolation_suitable().
278 static void update_pageblock_skip(struct compact_control *cc,
279 struct page *page, unsigned long nr_isolated,
280 bool migrate_scanner)
282 struct zone *zone = cc->zone;
285 if (cc->ignore_skip_hint)
294 set_pageblock_skip(page);
296 pfn = page_to_pfn(page);
298 /* Update where async and sync compaction should restart */
299 if (migrate_scanner) {
300 if (pfn > zone->compact_cached_migrate_pfn[0])
301 zone->compact_cached_migrate_pfn[0] = pfn;
302 if (cc->mode != MIGRATE_ASYNC &&
303 pfn > zone->compact_cached_migrate_pfn[1])
304 zone->compact_cached_migrate_pfn[1] = pfn;
306 if (pfn < zone->compact_cached_free_pfn)
307 zone->compact_cached_free_pfn = pfn;
311 static inline bool isolation_suitable(struct compact_control *cc,
317 static void update_pageblock_skip(struct compact_control *cc,
318 struct page *page, unsigned long nr_isolated,
319 bool migrate_scanner)
322 #endif /* CONFIG_COMPACTION */
325 * Compaction requires the taking of some coarse locks that are potentially
326 * very heavily contended. For async compaction, back out if the lock cannot
327 * be taken immediately. For sync compaction, spin on the lock if needed.
329 * Returns true if the lock is held
330 * Returns false if the lock is not held and compaction should abort
332 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
333 struct compact_control *cc)
335 if (cc->mode == MIGRATE_ASYNC) {
336 if (!spin_trylock_irqsave(lock, *flags)) {
337 cc->contended = COMPACT_CONTENDED_LOCK;
341 spin_lock_irqsave(lock, *flags);
348 * Compaction requires the taking of some coarse locks that are potentially
349 * very heavily contended. The lock should be periodically unlocked to avoid
350 * having disabled IRQs for a long time, even when there is nobody waiting on
351 * the lock. It might also be that allowing the IRQs will result in
352 * need_resched() becoming true. If scheduling is needed, async compaction
353 * aborts. Sync compaction schedules.
354 * Either compaction type will also abort if a fatal signal is pending.
355 * In either case if the lock was locked, it is dropped and not regained.
357 * Returns true if compaction should abort due to fatal signal pending, or
358 * async compaction due to need_resched()
359 * Returns false when compaction can continue (sync compaction might have
362 static bool compact_unlock_should_abort(spinlock_t *lock,
363 unsigned long flags, bool *locked, struct compact_control *cc)
366 spin_unlock_irqrestore(lock, flags);
370 if (fatal_signal_pending(current)) {
371 cc->contended = COMPACT_CONTENDED_SCHED;
375 if (need_resched()) {
376 if (cc->mode == MIGRATE_ASYNC) {
377 cc->contended = COMPACT_CONTENDED_SCHED;
387 * Aside from avoiding lock contention, compaction also periodically checks
388 * need_resched() and either schedules in sync compaction or aborts async
389 * compaction. This is similar to what compact_unlock_should_abort() does, but
390 * is used where no lock is concerned.
392 * Returns false when no scheduling was needed, or sync compaction scheduled.
393 * Returns true when async compaction should abort.
395 static inline bool compact_should_abort(struct compact_control *cc)
397 /* async compaction aborts if contended */
398 if (need_resched()) {
399 if (cc->mode == MIGRATE_ASYNC) {
400 cc->contended = COMPACT_CONTENDED_SCHED;
411 * Isolate free pages onto a private freelist. If @strict is true, will abort
412 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
413 * (even though it may still end up isolating some pages).
415 static unsigned long isolate_freepages_block(struct compact_control *cc,
416 unsigned long *start_pfn,
417 unsigned long end_pfn,
418 struct list_head *freelist,
421 int nr_scanned = 0, total_isolated = 0;
422 struct page *cursor, *valid_page = NULL;
423 unsigned long flags = 0;
425 unsigned long blockpfn = *start_pfn;
428 cursor = pfn_to_page(blockpfn);
430 /* Isolate free pages. */
431 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
433 struct page *page = cursor;
436 * Periodically drop the lock (if held) regardless of its
437 * contention, to give chance to IRQs. Abort if fatal signal
438 * pending or async compaction detects need_resched()
440 if (!(blockpfn % SWAP_CLUSTER_MAX)
441 && compact_unlock_should_abort(&cc->zone->lock, flags,
446 if (!pfn_valid_within(blockpfn))
453 * For compound pages such as THP and hugetlbfs, we can save
454 * potentially a lot of iterations if we skip them at once.
455 * The check is racy, but we can consider only valid values
456 * and the only danger is skipping too much.
458 if (PageCompound(page)) {
459 unsigned int comp_order = compound_order(page);
461 if (likely(comp_order < MAX_ORDER)) {
462 blockpfn += (1UL << comp_order) - 1;
463 cursor += (1UL << comp_order) - 1;
469 if (!PageBuddy(page))
473 * If we already hold the lock, we can skip some rechecking.
474 * Note that if we hold the lock now, checked_pageblock was
475 * already set in some previous iteration (or strict is true),
476 * so it is correct to skip the suitable migration target
481 * The zone lock must be held to isolate freepages.
482 * Unfortunately this is a very coarse lock and can be
483 * heavily contended if there are parallel allocations
484 * or parallel compactions. For async compaction do not
485 * spin on the lock and we acquire the lock as late as
488 locked = compact_trylock_irqsave(&cc->zone->lock,
493 /* Recheck this is a buddy page under lock */
494 if (!PageBuddy(page))
498 /* Found a free page, will break it into order-0 pages */
499 order = page_order(page);
500 isolated = __isolate_free_page(page, order);
503 set_page_private(page, order);
505 total_isolated += isolated;
506 cc->nr_freepages += isolated;
507 list_add_tail(&page->lru, freelist);
509 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
510 blockpfn += isolated;
513 /* Advance to the end of split page */
514 blockpfn += isolated - 1;
515 cursor += isolated - 1;
527 spin_unlock_irqrestore(&cc->zone->lock, flags);
530 * There is a tiny chance that we have read bogus compound_order(),
531 * so be careful to not go outside of the pageblock.
533 if (unlikely(blockpfn > end_pfn))
536 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
537 nr_scanned, total_isolated);
539 /* Record how far we have got within the block */
540 *start_pfn = blockpfn;
543 * If strict isolation is requested by CMA then check that all the
544 * pages requested were isolated. If there were any failures, 0 is
545 * returned and CMA will fail.
547 if (strict && blockpfn < end_pfn)
550 /* Update the pageblock-skip if the whole pageblock was scanned */
551 if (blockpfn == end_pfn)
552 update_pageblock_skip(cc, valid_page, total_isolated, false);
554 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
556 count_compact_events(COMPACTISOLATED, total_isolated);
557 return total_isolated;
561 * isolate_freepages_range() - isolate free pages.
562 * @start_pfn: The first PFN to start isolating.
563 * @end_pfn: The one-past-last PFN.
565 * Non-free pages, invalid PFNs, or zone boundaries within the
566 * [start_pfn, end_pfn) range are considered errors, cause function to
567 * undo its actions and return zero.
569 * Otherwise, function returns one-past-the-last PFN of isolated page
570 * (which may be greater then end_pfn if end fell in a middle of
574 isolate_freepages_range(struct compact_control *cc,
575 unsigned long start_pfn, unsigned long end_pfn)
577 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
581 block_start_pfn = pageblock_start_pfn(pfn);
582 if (block_start_pfn < cc->zone->zone_start_pfn)
583 block_start_pfn = cc->zone->zone_start_pfn;
584 block_end_pfn = pageblock_end_pfn(pfn);
586 for (; pfn < end_pfn; pfn += isolated,
587 block_start_pfn = block_end_pfn,
588 block_end_pfn += pageblock_nr_pages) {
589 /* Protect pfn from changing by isolate_freepages_block */
590 unsigned long isolate_start_pfn = pfn;
592 block_end_pfn = min(block_end_pfn, end_pfn);
595 * pfn could pass the block_end_pfn if isolated freepage
596 * is more than pageblock order. In this case, we adjust
597 * scanning range to right one.
599 if (pfn >= block_end_pfn) {
600 block_start_pfn = pageblock_start_pfn(pfn);
601 block_end_pfn = pageblock_end_pfn(pfn);
602 block_end_pfn = min(block_end_pfn, end_pfn);
605 if (!pageblock_pfn_to_page(block_start_pfn,
606 block_end_pfn, cc->zone))
609 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
610 block_end_pfn, &freelist, true);
613 * In strict mode, isolate_freepages_block() returns 0 if
614 * there are any holes in the block (ie. invalid PFNs or
621 * If we managed to isolate pages, it is always (1 << n) *
622 * pageblock_nr_pages for some non-negative n. (Max order
623 * page may span two pageblocks).
627 /* __isolate_free_page() does not map the pages */
628 map_pages(&freelist);
631 /* Loop terminated early, cleanup. */
632 release_freepages(&freelist);
636 /* We don't use freelists for anything. */
640 /* Update the number of anon and file isolated pages in the zone */
641 static void acct_isolated(struct zone *zone, struct compact_control *cc)
644 unsigned int count[2] = { 0, };
646 if (list_empty(&cc->migratepages))
649 list_for_each_entry(page, &cc->migratepages, lru)
650 count[!!page_is_file_cache(page)]++;
652 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
653 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
656 /* Similar to reclaim, but different enough that they don't share logic */
657 static bool too_many_isolated(struct zone *zone)
659 unsigned long active, inactive, isolated;
661 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
662 zone_page_state(zone, NR_INACTIVE_ANON);
663 active = zone_page_state(zone, NR_ACTIVE_FILE) +
664 zone_page_state(zone, NR_ACTIVE_ANON);
665 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
666 zone_page_state(zone, NR_ISOLATED_ANON);
668 return isolated > (inactive + active) / 2;
672 * isolate_migratepages_block() - isolate all migrate-able pages within
674 * @cc: Compaction control structure.
675 * @low_pfn: The first PFN to isolate
676 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
677 * @isolate_mode: Isolation mode to be used.
679 * Isolate all pages that can be migrated from the range specified by
680 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
681 * Returns zero if there is a fatal signal pending, otherwise PFN of the
682 * first page that was not scanned (which may be both less, equal to or more
685 * The pages are isolated on cc->migratepages list (not required to be empty),
686 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
687 * is neither read nor updated.
690 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
691 unsigned long end_pfn, isolate_mode_t isolate_mode)
693 struct zone *zone = cc->zone;
694 unsigned long nr_scanned = 0, nr_isolated = 0;
695 struct lruvec *lruvec;
696 unsigned long flags = 0;
698 struct page *page = NULL, *valid_page = NULL;
699 unsigned long start_pfn = low_pfn;
700 bool skip_on_failure = false;
701 unsigned long next_skip_pfn = 0;
704 * Ensure that there are not too many pages isolated from the LRU
705 * list by either parallel reclaimers or compaction. If there are,
706 * delay for some time until fewer pages are isolated
708 while (unlikely(too_many_isolated(zone))) {
709 /* async migration should just abort */
710 if (cc->mode == MIGRATE_ASYNC)
713 congestion_wait(BLK_RW_ASYNC, HZ/10);
715 if (fatal_signal_pending(current))
719 if (compact_should_abort(cc))
722 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
723 skip_on_failure = true;
724 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
727 /* Time to isolate some pages for migration */
728 for (; low_pfn < end_pfn; low_pfn++) {
730 if (skip_on_failure && low_pfn >= next_skip_pfn) {
732 * We have isolated all migration candidates in the
733 * previous order-aligned block, and did not skip it due
734 * to failure. We should migrate the pages now and
735 * hopefully succeed compaction.
741 * We failed to isolate in the previous order-aligned
742 * block. Set the new boundary to the end of the
743 * current block. Note we can't simply increase
744 * next_skip_pfn by 1 << order, as low_pfn might have
745 * been incremented by a higher number due to skipping
746 * a compound or a high-order buddy page in the
747 * previous loop iteration.
749 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
753 * Periodically drop the lock (if held) regardless of its
754 * contention, to give chance to IRQs. Abort async compaction
757 if (!(low_pfn % SWAP_CLUSTER_MAX)
758 && compact_unlock_should_abort(&zone->lru_lock, flags,
762 if (!pfn_valid_within(low_pfn))
766 page = pfn_to_page(low_pfn);
772 * Skip if free. We read page order here without zone lock
773 * which is generally unsafe, but the race window is small and
774 * the worst thing that can happen is that we skip some
775 * potential isolation targets.
777 if (PageBuddy(page)) {
778 unsigned long freepage_order = page_order_unsafe(page);
781 * Without lock, we cannot be sure that what we got is
782 * a valid page order. Consider only values in the
783 * valid order range to prevent low_pfn overflow.
785 if (freepage_order > 0 && freepage_order < MAX_ORDER)
786 low_pfn += (1UL << freepage_order) - 1;
791 * Regardless of being on LRU, compound pages such as THP and
792 * hugetlbfs are not to be compacted. We can potentially save
793 * a lot of iterations if we skip them at once. The check is
794 * racy, but we can consider only valid values and the only
795 * danger is skipping too much.
797 if (PageCompound(page)) {
798 unsigned int comp_order = compound_order(page);
800 if (likely(comp_order < MAX_ORDER))
801 low_pfn += (1UL << comp_order) - 1;
807 * Check may be lockless but that's ok as we recheck later.
808 * It's possible to migrate LRU and non-lru movable pages.
809 * Skip any other type of page
811 if (!PageLRU(page)) {
813 * __PageMovable can return false positive so we need
814 * to verify it under page_lock.
816 if (unlikely(__PageMovable(page)) &&
817 !PageIsolated(page)) {
819 spin_unlock_irqrestore(&zone->lru_lock,
824 if (isolate_movable_page(page, isolate_mode))
825 goto isolate_success;
832 * Migration will fail if an anonymous page is pinned in memory,
833 * so avoid taking lru_lock and isolating it unnecessarily in an
834 * admittedly racy check.
836 if (!page_mapping(page) &&
837 page_count(page) > page_mapcount(page))
840 /* If we already hold the lock, we can skip some rechecking */
842 locked = compact_trylock_irqsave(&zone->lru_lock,
847 /* Recheck PageLRU and PageCompound under lock */
852 * Page become compound since the non-locked check,
853 * and it's on LRU. It can only be a THP so the order
854 * is safe to read and it's 0 for tail pages.
856 if (unlikely(PageCompound(page))) {
857 low_pfn += (1UL << compound_order(page)) - 1;
862 lruvec = mem_cgroup_page_lruvec(page, zone);
864 /* Try isolate the page */
865 if (__isolate_lru_page(page, isolate_mode) != 0)
868 VM_BUG_ON_PAGE(PageCompound(page), page);
870 /* Successfully isolated */
871 del_page_from_lru_list(page, lruvec, page_lru(page));
874 list_add(&page->lru, &cc->migratepages);
875 cc->nr_migratepages++;
879 * Record where we could have freed pages by migration and not
880 * yet flushed them to buddy allocator.
881 * - this is the lowest page that was isolated and likely be
882 * then freed by migration.
884 if (!cc->last_migrated_pfn)
885 cc->last_migrated_pfn = low_pfn;
887 /* Avoid isolating too much */
888 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
895 if (!skip_on_failure)
899 * We have isolated some pages, but then failed. Release them
900 * instead of migrating, as we cannot form the cc->order buddy
905 spin_unlock_irqrestore(&zone->lru_lock, flags);
908 acct_isolated(zone, cc);
909 putback_movable_pages(&cc->migratepages);
910 cc->nr_migratepages = 0;
911 cc->last_migrated_pfn = 0;
915 if (low_pfn < next_skip_pfn) {
916 low_pfn = next_skip_pfn - 1;
918 * The check near the loop beginning would have updated
919 * next_skip_pfn too, but this is a bit simpler.
921 next_skip_pfn += 1UL << cc->order;
926 * The PageBuddy() check could have potentially brought us outside
927 * the range to be scanned.
929 if (unlikely(low_pfn > end_pfn))
933 spin_unlock_irqrestore(&zone->lru_lock, flags);
936 * Update the pageblock-skip information and cached scanner pfn,
937 * if the whole pageblock was scanned without isolating any page.
939 if (low_pfn == end_pfn)
940 update_pageblock_skip(cc, valid_page, nr_isolated, true);
942 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
943 nr_scanned, nr_isolated);
945 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
947 count_compact_events(COMPACTISOLATED, nr_isolated);
953 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
954 * @cc: Compaction control structure.
955 * @start_pfn: The first PFN to start isolating.
956 * @end_pfn: The one-past-last PFN.
958 * Returns zero if isolation fails fatally due to e.g. pending signal.
959 * Otherwise, function returns one-past-the-last PFN of isolated page
960 * (which may be greater than end_pfn if end fell in a middle of a THP page).
963 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
964 unsigned long end_pfn)
966 unsigned long pfn, block_start_pfn, block_end_pfn;
968 /* Scan block by block. First and last block may be incomplete */
970 block_start_pfn = pageblock_start_pfn(pfn);
971 if (block_start_pfn < cc->zone->zone_start_pfn)
972 block_start_pfn = cc->zone->zone_start_pfn;
973 block_end_pfn = pageblock_end_pfn(pfn);
975 for (; pfn < end_pfn; pfn = block_end_pfn,
976 block_start_pfn = block_end_pfn,
977 block_end_pfn += pageblock_nr_pages) {
979 block_end_pfn = min(block_end_pfn, end_pfn);
981 if (!pageblock_pfn_to_page(block_start_pfn,
982 block_end_pfn, cc->zone))
985 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
986 ISOLATE_UNEVICTABLE);
991 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
994 acct_isolated(cc->zone, cc);
999 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1000 #ifdef CONFIG_COMPACTION
1002 /* Returns true if the page is within a block suitable for migration to */
1003 static bool suitable_migration_target(struct page *page)
1005 /* If the page is a large free page, then disallow migration */
1006 if (PageBuddy(page)) {
1008 * We are checking page_order without zone->lock taken. But
1009 * the only small danger is that we skip a potentially suitable
1010 * pageblock, so it's not worth to check order for valid range.
1012 if (page_order_unsafe(page) >= pageblock_order)
1016 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1017 if (migrate_async_suitable(get_pageblock_migratetype(page)))
1020 /* Otherwise skip the block */
1025 * Test whether the free scanner has reached the same or lower pageblock than
1026 * the migration scanner, and compaction should thus terminate.
1028 static inline bool compact_scanners_met(struct compact_control *cc)
1030 return (cc->free_pfn >> pageblock_order)
1031 <= (cc->migrate_pfn >> pageblock_order);
1035 * Based on information in the current compact_control, find blocks
1036 * suitable for isolating free pages from and then isolate them.
1038 static void isolate_freepages(struct compact_control *cc)
1040 struct zone *zone = cc->zone;
1042 unsigned long block_start_pfn; /* start of current pageblock */
1043 unsigned long isolate_start_pfn; /* exact pfn we start at */
1044 unsigned long block_end_pfn; /* end of current pageblock */
1045 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
1046 struct list_head *freelist = &cc->freepages;
1049 * Initialise the free scanner. The starting point is where we last
1050 * successfully isolated from, zone-cached value, or the end of the
1051 * zone when isolating for the first time. For looping we also need
1052 * this pfn aligned down to the pageblock boundary, because we do
1053 * block_start_pfn -= pageblock_nr_pages in the for loop.
1054 * For ending point, take care when isolating in last pageblock of a
1055 * a zone which ends in the middle of a pageblock.
1056 * The low boundary is the end of the pageblock the migration scanner
1059 isolate_start_pfn = cc->free_pfn;
1060 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
1061 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1062 zone_end_pfn(zone));
1063 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1066 * Isolate free pages until enough are available to migrate the
1067 * pages on cc->migratepages. We stop searching if the migrate
1068 * and free page scanners meet or enough free pages are isolated.
1070 for (; block_start_pfn >= low_pfn;
1071 block_end_pfn = block_start_pfn,
1072 block_start_pfn -= pageblock_nr_pages,
1073 isolate_start_pfn = block_start_pfn) {
1075 * This can iterate a massively long zone without finding any
1076 * suitable migration targets, so periodically check if we need
1077 * to schedule, or even abort async compaction.
1079 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1080 && compact_should_abort(cc))
1083 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1088 /* Check the block is suitable for migration */
1089 if (!suitable_migration_target(page))
1092 /* If isolation recently failed, do not retry */
1093 if (!isolation_suitable(cc, page))
1096 /* Found a block suitable for isolating free pages from. */
1097 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1101 * If we isolated enough freepages, or aborted due to lock
1102 * contention, terminate.
1104 if ((cc->nr_freepages >= cc->nr_migratepages)
1106 if (isolate_start_pfn >= block_end_pfn) {
1108 * Restart at previous pageblock if more
1109 * freepages can be isolated next time.
1112 block_start_pfn - pageblock_nr_pages;
1115 } else if (isolate_start_pfn < block_end_pfn) {
1117 * If isolation failed early, do not continue
1124 /* __isolate_free_page() does not map the pages */
1125 map_pages(freelist);
1128 * Record where the free scanner will restart next time. Either we
1129 * broke from the loop and set isolate_start_pfn based on the last
1130 * call to isolate_freepages_block(), or we met the migration scanner
1131 * and the loop terminated due to isolate_start_pfn < low_pfn
1133 cc->free_pfn = isolate_start_pfn;
1137 * This is a migrate-callback that "allocates" freepages by taking pages
1138 * from the isolated freelists in the block we are migrating to.
1140 static struct page *compaction_alloc(struct page *migratepage,
1144 struct compact_control *cc = (struct compact_control *)data;
1145 struct page *freepage;
1148 * Isolate free pages if necessary, and if we are not aborting due to
1151 if (list_empty(&cc->freepages)) {
1153 isolate_freepages(cc);
1155 if (list_empty(&cc->freepages))
1159 freepage = list_entry(cc->freepages.next, struct page, lru);
1160 list_del(&freepage->lru);
1167 * This is a migrate-callback that "frees" freepages back to the isolated
1168 * freelist. All pages on the freelist are from the same zone, so there is no
1169 * special handling needed for NUMA.
1171 static void compaction_free(struct page *page, unsigned long data)
1173 struct compact_control *cc = (struct compact_control *)data;
1175 list_add(&page->lru, &cc->freepages);
1179 /* possible outcome of isolate_migratepages */
1181 ISOLATE_ABORT, /* Abort compaction now */
1182 ISOLATE_NONE, /* No pages isolated, continue scanning */
1183 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1184 } isolate_migrate_t;
1187 * Allow userspace to control policy on scanning the unevictable LRU for
1188 * compactable pages.
1190 int sysctl_compact_unevictable_allowed __read_mostly = 1;
1193 * Isolate all pages that can be migrated from the first suitable block,
1194 * starting at the block pointed to by the migrate scanner pfn within
1197 static isolate_migrate_t isolate_migratepages(struct zone *zone,
1198 struct compact_control *cc)
1200 unsigned long block_start_pfn;
1201 unsigned long block_end_pfn;
1202 unsigned long low_pfn;
1204 const isolate_mode_t isolate_mode =
1205 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1206 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1209 * Start at where we last stopped, or beginning of the zone as
1210 * initialized by compact_zone()
1212 low_pfn = cc->migrate_pfn;
1213 block_start_pfn = pageblock_start_pfn(low_pfn);
1214 if (block_start_pfn < zone->zone_start_pfn)
1215 block_start_pfn = zone->zone_start_pfn;
1217 /* Only scan within a pageblock boundary */
1218 block_end_pfn = pageblock_end_pfn(low_pfn);
1221 * Iterate over whole pageblocks until we find the first suitable.
1222 * Do not cross the free scanner.
1224 for (; block_end_pfn <= cc->free_pfn;
1225 low_pfn = block_end_pfn,
1226 block_start_pfn = block_end_pfn,
1227 block_end_pfn += pageblock_nr_pages) {
1230 * This can potentially iterate a massively long zone with
1231 * many pageblocks unsuitable, so periodically check if we
1232 * need to schedule, or even abort async compaction.
1234 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1235 && compact_should_abort(cc))
1238 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1243 /* If isolation recently failed, do not retry */
1244 if (!isolation_suitable(cc, page))
1248 * For async compaction, also only scan in MOVABLE blocks.
1249 * Async compaction is optimistic to see if the minimum amount
1250 * of work satisfies the allocation.
1252 if (cc->mode == MIGRATE_ASYNC &&
1253 !migrate_async_suitable(get_pageblock_migratetype(page)))
1256 /* Perform the isolation */
1257 low_pfn = isolate_migratepages_block(cc, low_pfn,
1258 block_end_pfn, isolate_mode);
1260 if (!low_pfn || cc->contended) {
1261 acct_isolated(zone, cc);
1262 return ISOLATE_ABORT;
1266 * Either we isolated something and proceed with migration. Or
1267 * we failed and compact_zone should decide if we should
1273 acct_isolated(zone, cc);
1274 /* Record where migration scanner will be restarted. */
1275 cc->migrate_pfn = low_pfn;
1277 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1281 * order == -1 is expected when compacting via
1282 * /proc/sys/vm/compact_memory
1284 static inline bool is_via_compact_memory(int order)
1289 static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
1290 const int migratetype)
1293 unsigned long watermark;
1295 if (cc->contended || fatal_signal_pending(current))
1296 return COMPACT_CONTENDED;
1298 /* Compaction run completes if the migrate and free scanner meet */
1299 if (compact_scanners_met(cc)) {
1300 /* Let the next compaction start anew. */
1301 reset_cached_positions(zone);
1304 * Mark that the PG_migrate_skip information should be cleared
1305 * by kswapd when it goes to sleep. kcompactd does not set the
1306 * flag itself as the decision to be clear should be directly
1307 * based on an allocation request.
1309 if (cc->direct_compaction)
1310 zone->compact_blockskip_flush = true;
1313 return COMPACT_COMPLETE;
1315 return COMPACT_PARTIAL_SKIPPED;
1318 if (is_via_compact_memory(cc->order))
1319 return COMPACT_CONTINUE;
1321 /* Compaction run is not finished if the watermark is not met */
1322 watermark = low_wmark_pages(zone);
1324 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1326 return COMPACT_CONTINUE;
1328 /* Direct compactor: Is a suitable page free? */
1329 for (order = cc->order; order < MAX_ORDER; order++) {
1330 struct free_area *area = &zone->free_area[order];
1333 /* Job done if page is free of the right migratetype */
1334 if (!list_empty(&area->free_list[migratetype]))
1335 return COMPACT_PARTIAL;
1338 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1339 if (migratetype == MIGRATE_MOVABLE &&
1340 !list_empty(&area->free_list[MIGRATE_CMA]))
1341 return COMPACT_PARTIAL;
1344 * Job done if allocation would steal freepages from
1345 * other migratetype buddy lists.
1347 if (find_suitable_fallback(area, order, migratetype,
1348 true, &can_steal) != -1)
1349 return COMPACT_PARTIAL;
1352 return COMPACT_NO_SUITABLE_PAGE;
1355 static enum compact_result compact_finished(struct zone *zone,
1356 struct compact_control *cc,
1357 const int migratetype)
1361 ret = __compact_finished(zone, cc, migratetype);
1362 trace_mm_compaction_finished(zone, cc->order, ret);
1363 if (ret == COMPACT_NO_SUITABLE_PAGE)
1364 ret = COMPACT_CONTINUE;
1370 * compaction_suitable: Is this suitable to run compaction on this zone now?
1372 * COMPACT_SKIPPED - If there are too few free pages for compaction
1373 * COMPACT_PARTIAL - If the allocation would succeed without compaction
1374 * COMPACT_CONTINUE - If compaction should run now
1376 static enum compact_result __compaction_suitable(struct zone *zone, int order,
1377 unsigned int alloc_flags,
1379 unsigned long wmark_target)
1382 unsigned long watermark;
1384 if (is_via_compact_memory(order))
1385 return COMPACT_CONTINUE;
1387 watermark = low_wmark_pages(zone);
1389 * If watermarks for high-order allocation are already met, there
1390 * should be no need for compaction at all.
1392 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1394 return COMPACT_PARTIAL;
1397 * Watermarks for order-0 must be met for compaction. Note the 2UL.
1398 * This is because during migration, copies of pages need to be
1399 * allocated and for a short time, the footprint is higher
1401 watermark += (2UL << order);
1402 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1403 alloc_flags, wmark_target))
1404 return COMPACT_SKIPPED;
1407 * fragmentation index determines if allocation failures are due to
1408 * low memory or external fragmentation
1410 * index of -1000 would imply allocations might succeed depending on
1411 * watermarks, but we already failed the high-order watermark check
1412 * index towards 0 implies failure is due to lack of memory
1413 * index towards 1000 implies failure is due to fragmentation
1415 * Only compact if a failure would be due to fragmentation.
1417 fragindex = fragmentation_index(zone, order);
1418 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1419 return COMPACT_NOT_SUITABLE_ZONE;
1421 return COMPACT_CONTINUE;
1424 enum compact_result compaction_suitable(struct zone *zone, int order,
1425 unsigned int alloc_flags,
1428 enum compact_result ret;
1430 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1431 zone_page_state(zone, NR_FREE_PAGES));
1432 trace_mm_compaction_suitable(zone, order, ret);
1433 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1434 ret = COMPACT_SKIPPED;
1439 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1446 * Make sure at least one zone would pass __compaction_suitable if we continue
1447 * retrying the reclaim.
1449 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1451 unsigned long available;
1452 enum compact_result compact_result;
1455 * Do not consider all the reclaimable memory because we do not
1456 * want to trash just for a single high order allocation which
1457 * is even not guaranteed to appear even if __compaction_suitable
1458 * is happy about the watermark check.
1460 available = zone_reclaimable_pages(zone) / order;
1461 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1462 compact_result = __compaction_suitable(zone, order, alloc_flags,
1463 ac_classzone_idx(ac), available);
1464 if (compact_result != COMPACT_SKIPPED &&
1465 compact_result != COMPACT_NOT_SUITABLE_ZONE)
1472 static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
1474 enum compact_result ret;
1475 unsigned long start_pfn = zone->zone_start_pfn;
1476 unsigned long end_pfn = zone_end_pfn(zone);
1477 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1478 const bool sync = cc->mode != MIGRATE_ASYNC;
1480 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1482 /* Compaction is likely to fail */
1483 if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
1486 /* huh, compaction_suitable is returning something unexpected */
1487 VM_BUG_ON(ret != COMPACT_CONTINUE);
1490 * Clear pageblock skip if there were failures recently and compaction
1491 * is about to be retried after being deferred.
1493 if (compaction_restarting(zone, cc->order))
1494 __reset_isolation_suitable(zone);
1497 * Setup to move all movable pages to the end of the zone. Used cached
1498 * information on where the scanners should start but check that it
1499 * is initialised by ensuring the values are within zone boundaries.
1501 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1502 cc->free_pfn = zone->compact_cached_free_pfn;
1503 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1504 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1505 zone->compact_cached_free_pfn = cc->free_pfn;
1507 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1508 cc->migrate_pfn = start_pfn;
1509 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1510 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1513 if (cc->migrate_pfn == start_pfn)
1514 cc->whole_zone = true;
1516 cc->last_migrated_pfn = 0;
1518 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1519 cc->free_pfn, end_pfn, sync);
1521 migrate_prep_local();
1523 while ((ret = compact_finished(zone, cc, migratetype)) ==
1527 switch (isolate_migratepages(zone, cc)) {
1529 ret = COMPACT_CONTENDED;
1530 putback_movable_pages(&cc->migratepages);
1531 cc->nr_migratepages = 0;
1535 * We haven't isolated and migrated anything, but
1536 * there might still be unflushed migrations from
1537 * previous cc->order aligned block.
1540 case ISOLATE_SUCCESS:
1544 err = migrate_pages(&cc->migratepages, compaction_alloc,
1545 compaction_free, (unsigned long)cc, cc->mode,
1548 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1551 /* All pages were either migrated or will be released */
1552 cc->nr_migratepages = 0;
1554 putback_movable_pages(&cc->migratepages);
1556 * migrate_pages() may return -ENOMEM when scanners meet
1557 * and we want compact_finished() to detect it
1559 if (err == -ENOMEM && !compact_scanners_met(cc)) {
1560 ret = COMPACT_CONTENDED;
1564 * We failed to migrate at least one page in the current
1565 * order-aligned block, so skip the rest of it.
1567 if (cc->direct_compaction &&
1568 (cc->mode == MIGRATE_ASYNC)) {
1569 cc->migrate_pfn = block_end_pfn(
1570 cc->migrate_pfn - 1, cc->order);
1571 /* Draining pcplists is useless in this case */
1572 cc->last_migrated_pfn = 0;
1579 * Has the migration scanner moved away from the previous
1580 * cc->order aligned block where we migrated from? If yes,
1581 * flush the pages that were freed, so that they can merge and
1582 * compact_finished() can detect immediately if allocation
1585 if (cc->order > 0 && cc->last_migrated_pfn) {
1587 unsigned long current_block_start =
1588 block_start_pfn(cc->migrate_pfn, cc->order);
1590 if (cc->last_migrated_pfn < current_block_start) {
1592 lru_add_drain_cpu(cpu);
1593 drain_local_pages(zone);
1595 /* No more flushing until we migrate again */
1596 cc->last_migrated_pfn = 0;
1604 * Release free pages and update where the free scanner should restart,
1605 * so we don't leave any returned pages behind in the next attempt.
1607 if (cc->nr_freepages > 0) {
1608 unsigned long free_pfn = release_freepages(&cc->freepages);
1610 cc->nr_freepages = 0;
1611 VM_BUG_ON(free_pfn == 0);
1612 /* The cached pfn is always the first in a pageblock */
1613 free_pfn = pageblock_start_pfn(free_pfn);
1615 * Only go back, not forward. The cached pfn might have been
1616 * already reset to zone end in compact_finished()
1618 if (free_pfn > zone->compact_cached_free_pfn)
1619 zone->compact_cached_free_pfn = free_pfn;
1622 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1623 cc->free_pfn, end_pfn, sync, ret);
1625 if (ret == COMPACT_CONTENDED)
1626 ret = COMPACT_PARTIAL;
1631 static enum compact_result compact_zone_order(struct zone *zone, int order,
1632 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1633 unsigned int alloc_flags, int classzone_idx)
1635 enum compact_result ret;
1636 struct compact_control cc = {
1638 .nr_migratepages = 0,
1640 .gfp_mask = gfp_mask,
1643 .alloc_flags = alloc_flags,
1644 .classzone_idx = classzone_idx,
1645 .direct_compaction = true,
1647 INIT_LIST_HEAD(&cc.freepages);
1648 INIT_LIST_HEAD(&cc.migratepages);
1650 ret = compact_zone(zone, &cc);
1652 VM_BUG_ON(!list_empty(&cc.freepages));
1653 VM_BUG_ON(!list_empty(&cc.migratepages));
1655 *contended = cc.contended;
1659 int sysctl_extfrag_threshold = 500;
1662 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1663 * @gfp_mask: The GFP mask of the current allocation
1664 * @order: The order of the current allocation
1665 * @alloc_flags: The allocation flags of the current allocation
1666 * @ac: The context of current allocation
1667 * @mode: The migration mode for async, sync light, or sync migration
1668 * @contended: Return value that determines if compaction was aborted due to
1669 * need_resched() or lock contention
1671 * This is the main entry point for direct page compaction.
1673 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1674 unsigned int alloc_flags, const struct alloc_context *ac,
1675 enum migrate_mode mode, int *contended)
1677 int may_enter_fs = gfp_mask & __GFP_FS;
1678 int may_perform_io = gfp_mask & __GFP_IO;
1681 enum compact_result rc = COMPACT_SKIPPED;
1682 int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1684 *contended = COMPACT_CONTENDED_NONE;
1686 /* Check if the GFP flags allow compaction */
1687 if (!order || !may_enter_fs || !may_perform_io)
1688 return COMPACT_SKIPPED;
1690 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1692 /* Compact each zone in the list */
1693 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1695 enum compact_result status;
1698 if (compaction_deferred(zone, order)) {
1699 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1703 status = compact_zone_order(zone, order, gfp_mask, mode,
1704 &zone_contended, alloc_flags,
1705 ac_classzone_idx(ac));
1706 rc = max(status, rc);
1708 * It takes at least one zone that wasn't lock contended
1709 * to clear all_zones_contended.
1711 all_zones_contended &= zone_contended;
1713 /* If a normal allocation would succeed, stop compacting */
1714 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1715 ac_classzone_idx(ac), alloc_flags)) {
1717 * We think the allocation will succeed in this zone,
1718 * but it is not certain, hence the false. The caller
1719 * will repeat this with true if allocation indeed
1720 * succeeds in this zone.
1722 compaction_defer_reset(zone, order, false);
1724 * It is possible that async compaction aborted due to
1725 * need_resched() and the watermarks were ok thanks to
1726 * somebody else freeing memory. The allocation can
1727 * however still fail so we better signal the
1728 * need_resched() contention anyway (this will not
1729 * prevent the allocation attempt).
1731 if (zone_contended == COMPACT_CONTENDED_SCHED)
1732 *contended = COMPACT_CONTENDED_SCHED;
1737 if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE ||
1738 status == COMPACT_PARTIAL_SKIPPED)) {
1740 * We think that allocation won't succeed in this zone
1741 * so we defer compaction there. If it ends up
1742 * succeeding after all, it will be reset.
1744 defer_compaction(zone, order);
1748 * We might have stopped compacting due to need_resched() in
1749 * async compaction, or due to a fatal signal detected. In that
1750 * case do not try further zones and signal need_resched()
1753 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1754 || fatal_signal_pending(current)) {
1755 *contended = COMPACT_CONTENDED_SCHED;
1762 * We might not have tried all the zones, so be conservative
1763 * and assume they are not all lock contended.
1765 all_zones_contended = 0;
1770 * If at least one zone wasn't deferred or skipped, we report if all
1771 * zones that were tried were lock contended.
1773 if (rc > COMPACT_INACTIVE && all_zones_contended)
1774 *contended = COMPACT_CONTENDED_LOCK;
1780 /* Compact all zones within a node */
1781 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1786 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1788 zone = &pgdat->node_zones[zoneid];
1789 if (!populated_zone(zone))
1792 cc->nr_freepages = 0;
1793 cc->nr_migratepages = 0;
1795 INIT_LIST_HEAD(&cc->freepages);
1796 INIT_LIST_HEAD(&cc->migratepages);
1799 * When called via /proc/sys/vm/compact_memory
1800 * this makes sure we compact the whole zone regardless of
1801 * cached scanner positions.
1803 if (is_via_compact_memory(cc->order))
1804 __reset_isolation_suitable(zone);
1806 if (is_via_compact_memory(cc->order) ||
1807 !compaction_deferred(zone, cc->order))
1808 compact_zone(zone, cc);
1810 VM_BUG_ON(!list_empty(&cc->freepages));
1811 VM_BUG_ON(!list_empty(&cc->migratepages));
1813 if (is_via_compact_memory(cc->order))
1816 if (zone_watermark_ok(zone, cc->order,
1817 low_wmark_pages(zone), 0, 0))
1818 compaction_defer_reset(zone, cc->order, false);
1822 void compact_pgdat(pg_data_t *pgdat, int order)
1824 struct compact_control cc = {
1826 .mode = MIGRATE_ASYNC,
1832 __compact_pgdat(pgdat, &cc);
1835 static void compact_node(int nid)
1837 struct compact_control cc = {
1839 .mode = MIGRATE_SYNC,
1840 .ignore_skip_hint = true,
1843 __compact_pgdat(NODE_DATA(nid), &cc);
1846 /* Compact all nodes in the system */
1847 static void compact_nodes(void)
1851 /* Flush pending updates to the LRU lists */
1852 lru_add_drain_all();
1854 for_each_online_node(nid)
1858 /* The written value is actually unused, all memory is compacted */
1859 int sysctl_compact_memory;
1862 * This is the entry point for compacting all nodes via
1863 * /proc/sys/vm/compact_memory
1865 int sysctl_compaction_handler(struct ctl_table *table, int write,
1866 void __user *buffer, size_t *length, loff_t *ppos)
1874 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1875 void __user *buffer, size_t *length, loff_t *ppos)
1877 proc_dointvec_minmax(table, write, buffer, length, ppos);
1882 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1883 static ssize_t sysfs_compact_node(struct device *dev,
1884 struct device_attribute *attr,
1885 const char *buf, size_t count)
1889 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1890 /* Flush pending updates to the LRU lists */
1891 lru_add_drain_all();
1898 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1900 int compaction_register_node(struct node *node)
1902 return device_create_file(&node->dev, &dev_attr_compact);
1905 void compaction_unregister_node(struct node *node)
1907 return device_remove_file(&node->dev, &dev_attr_compact);
1909 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1911 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1913 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1916 static bool kcompactd_node_suitable(pg_data_t *pgdat)
1920 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1922 for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1923 zone = &pgdat->node_zones[zoneid];
1925 if (!populated_zone(zone))
1928 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1929 classzone_idx) == COMPACT_CONTINUE)
1936 static void kcompactd_do_work(pg_data_t *pgdat)
1939 * With no special task, compact all zones so that a page of requested
1940 * order is allocatable.
1944 struct compact_control cc = {
1945 .order = pgdat->kcompactd_max_order,
1946 .classzone_idx = pgdat->kcompactd_classzone_idx,
1947 .mode = MIGRATE_SYNC_LIGHT,
1948 .ignore_skip_hint = true,
1951 bool success = false;
1953 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1955 count_vm_event(KCOMPACTD_WAKE);
1957 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1960 zone = &pgdat->node_zones[zoneid];
1961 if (!populated_zone(zone))
1964 if (compaction_deferred(zone, cc.order))
1967 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1971 cc.nr_freepages = 0;
1972 cc.nr_migratepages = 0;
1974 INIT_LIST_HEAD(&cc.freepages);
1975 INIT_LIST_HEAD(&cc.migratepages);
1977 if (kthread_should_stop())
1979 status = compact_zone(zone, &cc);
1981 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1982 cc.classzone_idx, 0)) {
1984 compaction_defer_reset(zone, cc.order, false);
1985 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1987 * We use sync migration mode here, so we defer like
1988 * sync direct compaction does.
1990 defer_compaction(zone, cc.order);
1993 VM_BUG_ON(!list_empty(&cc.freepages));
1994 VM_BUG_ON(!list_empty(&cc.migratepages));
1998 * Regardless of success, we are done until woken up next. But remember
1999 * the requested order/classzone_idx in case it was higher/tighter than
2002 if (pgdat->kcompactd_max_order <= cc.order)
2003 pgdat->kcompactd_max_order = 0;
2004 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
2005 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2008 void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
2013 if (pgdat->kcompactd_max_order < order)
2014 pgdat->kcompactd_max_order = order;
2016 if (pgdat->kcompactd_classzone_idx > classzone_idx)
2017 pgdat->kcompactd_classzone_idx = classzone_idx;
2019 if (!waitqueue_active(&pgdat->kcompactd_wait))
2022 if (!kcompactd_node_suitable(pgdat))
2025 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2027 wake_up_interruptible(&pgdat->kcompactd_wait);
2031 * The background compaction daemon, started as a kernel thread
2032 * from the init process.
2034 static int kcompactd(void *p)
2036 pg_data_t *pgdat = (pg_data_t*)p;
2037 struct task_struct *tsk = current;
2039 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2041 if (!cpumask_empty(cpumask))
2042 set_cpus_allowed_ptr(tsk, cpumask);
2046 pgdat->kcompactd_max_order = 0;
2047 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2049 while (!kthread_should_stop()) {
2050 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2051 wait_event_freezable(pgdat->kcompactd_wait,
2052 kcompactd_work_requested(pgdat));
2054 kcompactd_do_work(pgdat);
2061 * This kcompactd start function will be called by init and node-hot-add.
2062 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2064 int kcompactd_run(int nid)
2066 pg_data_t *pgdat = NODE_DATA(nid);
2069 if (pgdat->kcompactd)
2072 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2073 if (IS_ERR(pgdat->kcompactd)) {
2074 pr_err("Failed to start kcompactd on node %d\n", nid);
2075 ret = PTR_ERR(pgdat->kcompactd);
2076 pgdat->kcompactd = NULL;
2082 * Called by memory hotplug when all memory in a node is offlined. Caller must
2083 * hold mem_hotplug_begin/end().
2085 void kcompactd_stop(int nid)
2087 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2090 kthread_stop(kcompactd);
2091 NODE_DATA(nid)->kcompactd = NULL;
2096 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2097 * not required for correctness. So if the last cpu in a node goes
2098 * away, we get changed to run anywhere: as the first one comes back,
2099 * restore their cpu bindings.
2101 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
2106 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2107 for_each_node_state(nid, N_MEMORY) {
2108 pg_data_t *pgdat = NODE_DATA(nid);
2109 const struct cpumask *mask;
2111 mask = cpumask_of_node(pgdat->node_id);
2113 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2114 /* One of our CPUs online: restore mask */
2115 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2121 static int __init kcompactd_init(void)
2125 for_each_node_state(nid, N_MEMORY)
2127 hotcpu_notifier(cpu_callback, 0);
2130 subsys_initcall(kcompactd_init)
2132 #endif /* CONFIG_COMPACTION */