mm, memory_hotplug: make arch_alloc_nodedata independent on CONFIG_MEMORY_HOTPLUG
[linux-2.6-block.git] / include / linux / memory_hotplug.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
208d54e5
DH
2#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
3947be19 7#include <linux/notifier.h>
187f1882 8#include <linux/bug.h>
208d54e5 9
78679302
KH
10struct page;
11struct zone;
12struct pglist_data;
ea01ea93 13struct mem_section;
e90bdb7f 14struct memory_block;
836809ec 15struct memory_group;
62cedb9f 16struct resource;
24e6d5a5 17struct vmem_altmap;
78679302 18
e930d999
MH
19#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
20/*
21 * For supporting node-hotadd, we have to allocate a new pgdat.
22 *
23 * If an arch has generic style NODE_DATA(),
24 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
25 *
26 * In general, generic_alloc_nodedata() is used.
27 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
28 *
29 */
30extern pg_data_t *arch_alloc_nodedata(int nid);
31extern void arch_free_nodedata(pg_data_t *pgdat);
32extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
33
34#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
35
36#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
37#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
38
39#ifdef CONFIG_NUMA
40/*
41 * XXX: node aware allocation can't work well to get new node's memory at this time.
42 * Because, pgdat for the new node is not allocated/initialized yet itself.
43 * To use new node's memory, more consideration will be necessary.
44 */
45#define generic_alloc_nodedata(nid) \
46({ \
47 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
48})
49/*
50 * This definition is just for error path in node hotadd.
51 * For node hotremove, we have to replace this.
52 */
53#define generic_free_nodedata(pgdat) kfree(pgdat)
54
55extern pg_data_t *node_data[];
56static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
57{
58 node_data[nid] = pgdat;
59}
60
61#else /* !CONFIG_NUMA */
62
63/* never called */
64static inline pg_data_t *generic_alloc_nodedata(int nid)
65{
66 BUG();
67 return NULL;
68}
69static inline void generic_free_nodedata(pg_data_t *pgdat)
70{
71}
72static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
73{
74}
75#endif /* CONFIG_NUMA */
76#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
77
208d54e5 78#ifdef CONFIG_MEMORY_HOTPLUG
9f605f26 79struct page *pfn_to_online_page(unsigned long pfn);
04753278 80
4f7c6b49 81/* Types for control the zone type of onlined and offlined memory */
511c2aba 82enum {
956f8b44 83 /* Offline the memory. */
efc978ad 84 MMOP_OFFLINE = 0,
956f8b44
DH
85 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
86 MMOP_ONLINE,
87 /* Online the memory to ZONE_NORMAL. */
4f7c6b49 88 MMOP_ONLINE_KERNEL,
956f8b44 89 /* Online the memory to ZONE_MOVABLE. */
4f7c6b49 90 MMOP_ONLINE_MOVABLE,
511c2aba
LJ
91};
92
b6117199
DH
93/* Flags for add_memory() and friends to specify memory hotplug details. */
94typedef int __bitwise mhp_t;
95
96/* No special request */
97#define MHP_NONE ((__force mhp_t)0)
9ca6551e
DH
98/*
99 * Allow merging of the added System RAM resource with adjacent,
100 * mergeable resources. After a successful call to add_memory_resource()
101 * with this flag set, the resource pointer must no longer be used as it
102 * might be stale, or the resource might have changed.
103 */
26011267 104#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
b6117199 105
a08a2ae3
OS
106/*
107 * We want memmap (struct page array) to be self contained.
108 * To do so, we will use the beginning of the hot-added range to build
109 * the page tables for the memmap array that describes the entire range.
110 * Only selected architectures support it with SPARSE_VMEMMAP.
111 */
112#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
028fc57a
DH
113/*
114 * The nid field specifies a memory group id (mgid) instead. The memory group
115 * implies the node id (nid).
116 */
117#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
a08a2ae3 118
940519f0 119/*
f5637d3b
LG
120 * Extended parameters for memory hotplug:
121 * altmap: alternative allocator for memmap array (optional)
bfeb022f
LG
122 * pgprot: page protection flags to apply to newly created page tables
123 * (required)
940519f0 124 */
f5637d3b 125struct mhp_params {
940519f0 126 struct vmem_altmap *altmap;
bfeb022f 127 pgprot_t pgprot;
940519f0
MH
128};
129
bca3feaa
AK
130bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
131struct range mhp_get_pluggable_range(bool need_mapping);
132
bdc8cb98
DH
133/*
134 * Zone resizing functions
511c2aba
LJ
135 *
136 * Note: any attempt to resize a zone should has pgdat_resize_lock()
137 * zone_span_writelock() both held. This ensure the size of a zone
138 * can't be changed while pgdat_resize_lock() held.
bdc8cb98
DH
139 */
140static inline unsigned zone_span_seqbegin(struct zone *zone)
141{
142 return read_seqbegin(&zone->span_seqlock);
143}
144static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
145{
146 return read_seqretry(&zone->span_seqlock, iv);
147}
148static inline void zone_span_writelock(struct zone *zone)
149{
150 write_seqlock(&zone->span_seqlock);
151}
152static inline void zone_span_writeunlock(struct zone *zone)
153{
154 write_sequnlock(&zone->span_seqlock);
155}
156static inline void zone_seqlock_init(struct zone *zone)
157{
158 seqlock_init(&zone->span_seqlock);
159}
836809ec
DH
160extern void adjust_present_page_count(struct page *page,
161 struct memory_group *group,
162 long nr_pages);
3947be19 163/* VM interface that may be used by firmware interface */
a08a2ae3
OS
164extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
165 struct zone *zone);
166extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
bd5c2344 167extern int online_pages(unsigned long pfn, unsigned long nr_pages,
836809ec 168 struct zone *zone, struct memory_group *group);
92917998
DH
169extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
170 unsigned long end_pfn);
257bea71
DH
171extern void __offline_isolated_pages(unsigned long start_pfn,
172 unsigned long end_pfn);
48e94196 173
a9cd410a 174typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
9d0ad8ca 175
18db1491 176extern void generic_online_page(struct page *page, unsigned int order);
9d0ad8ca
DK
177extern int set_online_page_callback(online_page_callback_t callback);
178extern int restore_online_page_callback(online_page_callback_t callback);
179
01b0f197
TK
180extern int try_online_node(int nid);
181
940519f0 182extern int arch_add_memory(int nid, u64 start, u64 size,
f5637d3b 183 struct mhp_params *params);
357b4da5
JG
184extern u64 max_mem_size;
185
1adf8b46 186extern int mhp_online_type_from_str(const char *str);
5f47adf7 187
862919e5 188/* Default online_type (MMOP_*) when new memory blocks are added. */
1adf8b46 189extern int mhp_default_online_type;
4932381e
MH
190/* If movable_node boot option specified */
191extern bool movable_node_enabled;
192static inline bool movable_node_is_enabled(void)
193{
194 return movable_node_enabled;
195}
31bc3858 196
65a2aa5f 197extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
feee6b29
DH
198extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
199 struct vmem_altmap *altmap);
49ac8255 200
f1dd2cd1 201/* reasonably generic interface to expand the physical pages */
24e6d5a5 202extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
f5637d3b 203 struct mhp_params *params);
bc02af93 204
3072e413
MH
205#ifndef CONFIG_ARCH_HAS_ADD_PAGES
206static inline int add_pages(int nid, unsigned long start_pfn,
f5637d3b 207 unsigned long nr_pages, struct mhp_params *params)
3072e413 208{
f5637d3b 209 return __add_pages(nid, start_pfn, nr_pages, params);
3072e413
MH
210}
211#else /* ARCH_HAS_ADD_PAGES */
24e6d5a5 212int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
f5637d3b 213 struct mhp_params *params);
3072e413
MH
214#endif /* ARCH_HAS_ADD_PAGES */
215
bfc8c901
VD
216void get_online_mems(void);
217void put_online_mems(void);
20d6c96b 218
30467e0b
DR
219void mem_hotplug_begin(void);
220void mem_hotplug_done(void);
221
208d54e5 222#else /* ! CONFIG_MEMORY_HOTPLUG */
2d070eab
MH
223#define pfn_to_online_page(pfn) \
224({ \
225 struct page *___page = NULL; \
226 if (pfn_valid(pfn)) \
227 ___page = pfn_to_page(pfn); \
228 ___page; \
229 })
230
bdc8cb98
DH
231static inline unsigned zone_span_seqbegin(struct zone *zone)
232{
233 return 0;
234}
235static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
236{
237 return 0;
238}
239static inline void zone_span_writelock(struct zone *zone) {}
240static inline void zone_span_writeunlock(struct zone *zone) {}
241static inline void zone_seqlock_init(struct zone *zone) {}
3947be19 242
01b0f197
TK
243static inline int try_online_node(int nid)
244{
245 return 0;
246}
247
bfc8c901
VD
248static inline void get_online_mems(void) {}
249static inline void put_online_mems(void) {}
20d6c96b 250
30467e0b
DR
251static inline void mem_hotplug_begin(void) {}
252static inline void mem_hotplug_done(void) {}
253
4932381e
MH
254static inline bool movable_node_is_enabled(void)
255{
256 return false;
257}
bdc8cb98 258#endif /* ! CONFIG_MEMORY_HOTPLUG */
9d99aaa3 259
bca3feaa
AK
260/*
261 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
262 * platforms might override and use arch_get_mappable_range()
263 * for internal non memory hotplug purposes.
264 */
265struct range arch_get_mappable_range(void);
266
3a2d7fa8
PT
267#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
268/*
269 * pgdat resizing functions
270 */
271static inline
272void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
273{
274 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
275}
276static inline
277void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
278{
279 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
280}
281static inline
282void pgdat_resize_init(struct pglist_data *pgdat)
283{
284 spin_lock_init(&pgdat->node_size_lock);
285}
286#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
287/*
288 * Stub functions for when hotplug is off
289 */
290static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
291static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
292static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
293#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
294
5c755e9f
BP
295#ifdef CONFIG_MEMORY_HOTREMOVE
296
90b30cdc 297extern void try_offline_node(int nid);
836809ec
DH
298extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
299 struct memory_group *group);
e1c158e4
DH
300extern int remove_memory(u64 start, u64 size);
301extern void __remove_memory(u64 start, u64 size);
302extern int offline_and_remove_memory(u64 start, u64 size);
5c755e9f
BP
303
304#else
90b30cdc 305static inline void try_offline_node(int nid) {}
aba6efc4 306
836809ec
DH
307static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
308 struct memory_group *group)
aba6efc4
RW
309{
310 return -EINVAL;
311}
312
e1c158e4 313static inline int remove_memory(u64 start, u64 size)
eca499ab
PT
314{
315 return -EBUSY;
316}
317
e1c158e4 318static inline void __remove_memory(u64 start, u64 size) {}
5c755e9f
BP
319#endif /* CONFIG_MEMORY_HOTREMOVE */
320
aba9817d
BDC
321extern void set_zone_contiguous(struct zone *zone);
322extern void clear_zone_contiguous(struct zone *zone);
323
3a0aaefe 324#ifdef CONFIG_MEMORY_HOTPLUG
03e85f9d 325extern void __ref free_area_init_core_hotplug(int nid);
b6117199
DH
326extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
327extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
328extern int add_memory_resource(int nid, struct resource *resource,
329 mhp_t mhp_flags);
7b7b2721 330extern int add_memory_driver_managed(int nid, u64 start, u64 size,
b6117199
DH
331 const char *resource_name,
332 mhp_t mhp_flags);
f1dd2cd1 333extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
d882c006
DH
334 unsigned long nr_pages,
335 struct vmem_altmap *altmap, int migratetype);
feee6b29
DH
336extern void remove_pfn_range_from_zone(struct zone *zone,
337 unsigned long start_pfn,
338 unsigned long nr_pages);
6677e3ea 339extern bool is_memblock_offlined(struct memory_block *mem);
7ea62160
DW
340extern int sparse_add_section(int nid, unsigned long pfn,
341 unsigned long nr_pages, struct vmem_altmap *altmap);
ba72b4c8 342extern void sparse_remove_section(struct mem_section *ms,
7ea62160 343 unsigned long pfn, unsigned long nr_pages,
24b6d416 344 unsigned long map_offset, struct vmem_altmap *altmap);
04753278
YG
345extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
346 unsigned long pnum);
7cf209ba 347extern struct zone *zone_for_pfn_range(int online_type, int nid,
445fcf7c
DH
348 struct memory_group *group, unsigned long start_pfn,
349 unsigned long nr_pages);
4abb1e5b
DH
350extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
351 struct mhp_params *params);
352void arch_remove_linear_mapping(u64 start, u64 size);
a08a2ae3 353extern bool mhp_supports_memmap_on_memory(unsigned long size);
3a0aaefe
DH
354#endif /* CONFIG_MEMORY_HOTPLUG */
355
208d54e5 356#endif /* __LINUX_MEMORY_HOTPLUG_H */