From f98a497e1f16ee411df72629e32e31cba4cfa9cf Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:58 +0200 Subject: [PATCH] mm: compaction: remove unnecessary is_via_compact_memory() checks Remove from all paths not reachable via /proc/sys/vm/compact_memory. Link: https://lkml.kernel.org/r/20230519123959.77335-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/compaction.c | 11 +---------- mm/vmscan.c | 8 +------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index bb9b76244a5d..bc1f389ed378 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2280,9 +2280,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, unsigned long available; unsigned long watermark; - if (is_via_compact_memory(order)) - return true; - /* Allocation can already succeed, nothing to do */ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); if (zone_watermark_ok(zone, order, watermark, @@ -2848,9 +2845,6 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - if (is_via_compact_memory(pgdat->kcompactd_max_order)) - return true; - /* Allocation can already succeed, check other zones */ if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, min_wmark_pages(zone), @@ -2895,9 +2889,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_deferred(zone, cc.order)) continue; - if (is_via_compact_memory(cc.order)) - goto compact; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, cc.order, min_wmark_pages(zone), zoneid, 0)) @@ -2906,7 +2897,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_suitable(zone, cc.order, zoneid) != COMPACT_CONTINUE) continue; -compact: + if (kthread_should_stop()) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index 9f8bfd1fcf58..99e4ae44850d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6399,9 +6399,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, if (!managed_zone(zone)) continue; - if (sc->order == -1) /* is_via_compact_memory() */ - return false; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6598,9 +6595,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long watermark; - if (sc->order == -1) /* is_via_compact_memory() */ - goto suitable; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6610,7 +6604,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) if (compaction_suitable(zone, sc->order, sc->reclaim_idx) == COMPACT_SKIPPED) return false; -suitable: + /* * Compaction is already possible, but it takes time to run and there * are potentially other callers using the pages just freed. So proceed -- 2.25.1