1 #ifndef _LINUX_COMPACTION_H
2 #define _LINUX_COMPACTION_H
5 * Determines how hard direct compaction should try to succeed.
6 * Lower value means higher priority, analogically to reclaim priority.
8 enum compact_priority {
9 COMPACT_PRIO_SYNC_LIGHT,
10 MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
11 DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
13 INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
16 /* Return values for compact_zone() and try_to_compact_pages() */
17 /* When adding new states, please adjust include/trace/events/compaction.h */
19 /* For more detailed tracepoint output - internal to compaction */
20 COMPACT_NOT_SUITABLE_ZONE,
22 * compaction didn't start as it was not possible or direct reclaim
26 /* compaction didn't start as it was deferred due to past failures */
29 /* compaction not active last round */
30 COMPACT_INACTIVE = COMPACT_DEFERRED,
32 /* For more detailed tracepoint output - internal to compaction */
33 COMPACT_NO_SUITABLE_PAGE,
34 /* compaction should continue to another pageblock */
38 * The full zone was compacted scanned but wasn't successfull to compact
43 * direct compaction has scanned part of the zone but wasn't successfull
44 * to compact suitable pages.
46 COMPACT_PARTIAL_SKIPPED,
48 /* compaction terminated prematurely due to lock contentions */
52 * direct compaction partially compacted a zone and there might be
58 /* Used to signal whether compaction detected need_sched() or lock contention */
59 /* No contention detected */
60 #define COMPACT_CONTENDED_NONE 0
61 /* Either need_sched() was true or fatal signal pending */
62 #define COMPACT_CONTENDED_SCHED 1
63 /* Zone lock or lru_lock was contended in async compaction */
64 #define COMPACT_CONTENDED_LOCK 2
66 struct alloc_context; /* in mm/internal.h */
68 #ifdef CONFIG_COMPACTION
69 extern int sysctl_compact_memory;
70 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
71 void __user *buffer, size_t *length, loff_t *ppos);
72 extern int sysctl_extfrag_threshold;
73 extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
74 void __user *buffer, size_t *length, loff_t *ppos);
75 extern int sysctl_compact_unevictable_allowed;
77 extern int fragmentation_index(struct zone *zone, unsigned int order);
78 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
80 unsigned int alloc_flags, const struct alloc_context *ac,
81 enum compact_priority prio, int *contended);
82 extern void compact_pgdat(pg_data_t *pgdat, int order);
83 extern void reset_isolation_suitable(pg_data_t *pgdat);
84 extern enum compact_result compaction_suitable(struct zone *zone, int order,
85 unsigned int alloc_flags, int classzone_idx);
87 extern void defer_compaction(struct zone *zone, int order);
88 extern bool compaction_deferred(struct zone *zone, int order);
89 extern void compaction_defer_reset(struct zone *zone, int order,
91 extern bool compaction_restarting(struct zone *zone, int order);
93 /* Compaction has made some progress and retrying makes sense */
94 static inline bool compaction_made_progress(enum compact_result result)
97 * Even though this might sound confusing this in fact tells us
98 * that the compaction successfully isolated and migrated some
101 if (result == COMPACT_PARTIAL)
107 /* Compaction has failed and it doesn't make much sense to keep retrying. */
108 static inline bool compaction_failed(enum compact_result result)
110 /* All zones were scanned completely and still not result. */
111 if (result == COMPACT_COMPLETE)
118 * Compaction has backed off for some reason. It might be throttling or
119 * lock contention. Retrying is still worthwhile.
121 static inline bool compaction_withdrawn(enum compact_result result)
124 * Compaction backed off due to watermark checks for order-0
125 * so the regular reclaim has to try harder and reclaim something.
127 if (result == COMPACT_SKIPPED)
131 * If compaction is deferred for high-order allocations, it is
132 * because sync compaction recently failed. If this is the case
133 * and the caller requested a THP allocation, we do not want
134 * to heavily disrupt the system, so we fail the allocation
135 * instead of entering direct reclaim.
137 if (result == COMPACT_DEFERRED)
141 * If compaction in async mode encounters contention or blocks higher
142 * priority task we back off early rather than cause stalls.
144 if (result == COMPACT_CONTENDED)
148 * Page scanners have met but we haven't scanned full zones so this
149 * is a back off in fact.
151 if (result == COMPACT_PARTIAL_SKIPPED)
158 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
161 extern int kcompactd_run(int nid);
162 extern void kcompactd_stop(int nid);
163 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
166 static inline void compact_pgdat(pg_data_t *pgdat, int order)
170 static inline void reset_isolation_suitable(pg_data_t *pgdat)
174 static inline enum compact_result compaction_suitable(struct zone *zone, int order,
175 int alloc_flags, int classzone_idx)
177 return COMPACT_SKIPPED;
180 static inline void defer_compaction(struct zone *zone, int order)
184 static inline bool compaction_deferred(struct zone *zone, int order)
189 static inline bool compaction_made_progress(enum compact_result result)
194 static inline bool compaction_failed(enum compact_result result)
199 static inline bool compaction_withdrawn(enum compact_result result)
204 static inline int kcompactd_run(int nid)
208 static inline void kcompactd_stop(int nid)
212 static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
216 #endif /* CONFIG_COMPACTION */
218 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
220 extern int compaction_register_node(struct node *node);
221 extern void compaction_unregister_node(struct node *node);
225 static inline int compaction_register_node(struct node *node)
230 static inline void compaction_unregister_node(struct node *node)
233 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
235 #endif /* _LINUX_COMPACTION_H */