1 // SPDX-License-Identifier: GPL-2.0
3 * Memory subsystem support
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
24 #include <linux/xarray.h>
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
29 #define MEMORY_CLASS_NAME "memory"
31 static const char *const online_type_to_str[] = {
32 [MMOP_OFFLINE] = "offline",
33 [MMOP_ONLINE] = "online",
34 [MMOP_ONLINE_KERNEL] = "online_kernel",
35 [MMOP_ONLINE_MOVABLE] = "online_movable",
38 int mhp_online_type_from_str(const char *str)
42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 if (sysfs_streq(str, online_type_to_str[i]))
49 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
51 static int sections_per_block;
53 static inline unsigned long memory_block_id(unsigned long section_nr)
55 return section_nr / sections_per_block;
58 static inline unsigned long pfn_to_block_id(unsigned long pfn)
60 return memory_block_id(pfn_to_section_nr(pfn));
63 static inline unsigned long phys_to_block_id(unsigned long phys)
65 return pfn_to_block_id(PFN_DOWN(phys));
68 static int memory_subsys_online(struct device *dev);
69 static int memory_subsys_offline(struct device *dev);
71 static struct bus_type memory_subsys = {
72 .name = MEMORY_CLASS_NAME,
73 .dev_name = MEMORY_CLASS_NAME,
74 .online = memory_subsys_online,
75 .offline = memory_subsys_offline,
79 * Memory blocks are cached in a local radix tree to avoid
80 * a costly linear search for the corresponding device on
83 static DEFINE_XARRAY(memory_blocks);
85 static BLOCKING_NOTIFIER_HEAD(memory_chain);
87 int register_memory_notifier(struct notifier_block *nb)
89 return blocking_notifier_chain_register(&memory_chain, nb);
91 EXPORT_SYMBOL(register_memory_notifier);
93 void unregister_memory_notifier(struct notifier_block *nb)
95 blocking_notifier_chain_unregister(&memory_chain, nb);
97 EXPORT_SYMBOL(unregister_memory_notifier);
99 static void memory_block_release(struct device *dev)
101 struct memory_block *mem = to_memory_block(dev);
106 unsigned long __weak memory_block_size_bytes(void)
108 return MIN_MEMORY_BLOCK_SIZE;
110 EXPORT_SYMBOL_GPL(memory_block_size_bytes);
113 * Show the first physical section index (number) of this memory block.
115 static ssize_t phys_index_show(struct device *dev,
116 struct device_attribute *attr, char *buf)
118 struct memory_block *mem = to_memory_block(dev);
119 unsigned long phys_index;
121 phys_index = mem->start_section_nr / sections_per_block;
123 return sysfs_emit(buf, "%08lx\n", phys_index);
127 * Legacy interface that we cannot remove. Always indicate "removable"
128 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
130 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
133 return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
137 * online, offline, going offline, etc.
139 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
142 struct memory_block *mem = to_memory_block(dev);
146 * We can probably put these states in a nice little array
147 * so that they're not open-coded
149 switch (mem->state) {
156 case MEM_GOING_OFFLINE:
157 output = "going-offline";
161 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
164 return sysfs_emit(buf, "%s\n", output);
167 int memory_notify(unsigned long val, void *v)
169 return blocking_notifier_call_chain(&memory_chain, val, v);
172 static int memory_block_online(struct memory_block *mem)
174 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
175 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
176 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
180 zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
183 * Although vmemmap pages have a different lifecycle than the pages
184 * they describe (they remain until the memory is unplugged), doing
185 * their initialization and accounting at memory onlining/offlining
186 * stage helps to keep accounting easier to follow - e.g vmemmaps
187 * belong to the same zone as the memory they backed.
189 if (nr_vmemmap_pages) {
190 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
195 ret = online_pages(start_pfn + nr_vmemmap_pages,
196 nr_pages - nr_vmemmap_pages, zone);
198 if (nr_vmemmap_pages)
199 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
204 * Account once onlining succeeded. If the zone was unpopulated, it is
205 * now already properly populated.
207 if (nr_vmemmap_pages)
208 adjust_present_page_count(zone, nr_vmemmap_pages);
213 static int memory_block_offline(struct memory_block *mem)
215 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
216 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
217 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
221 zone = page_zone(pfn_to_page(start_pfn));
224 * Unaccount before offlining, such that unpopulated zone and kthreads
225 * can properly be torn down in offline_pages().
227 if (nr_vmemmap_pages)
228 adjust_present_page_count(zone, -nr_vmemmap_pages);
230 ret = offline_pages(start_pfn + nr_vmemmap_pages,
231 nr_pages - nr_vmemmap_pages);
233 /* offline_pages() failed. Account back. */
234 if (nr_vmemmap_pages)
235 adjust_present_page_count(zone, nr_vmemmap_pages);
239 if (nr_vmemmap_pages)
240 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
246 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
247 * OK to have direct references to sparsemem variables in here.
250 memory_block_action(struct memory_block *mem, unsigned long action)
256 ret = memory_block_online(mem);
259 ret = memory_block_offline(mem);
262 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
263 "%ld\n", __func__, mem->start_section_nr, action, action);
270 static int memory_block_change_state(struct memory_block *mem,
271 unsigned long to_state, unsigned long from_state_req)
275 if (mem->state != from_state_req)
278 if (to_state == MEM_OFFLINE)
279 mem->state = MEM_GOING_OFFLINE;
281 ret = memory_block_action(mem, to_state);
282 mem->state = ret ? from_state_req : to_state;
287 /* The device lock serializes operations on memory_subsys_[online|offline] */
288 static int memory_subsys_online(struct device *dev)
290 struct memory_block *mem = to_memory_block(dev);
293 if (mem->state == MEM_ONLINE)
297 * When called via device_online() without configuring the online_type,
298 * we want to default to MMOP_ONLINE.
300 if (mem->online_type == MMOP_OFFLINE)
301 mem->online_type = MMOP_ONLINE;
303 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
304 mem->online_type = MMOP_OFFLINE;
309 static int memory_subsys_offline(struct device *dev)
311 struct memory_block *mem = to_memory_block(dev);
313 if (mem->state == MEM_OFFLINE)
316 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
319 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
320 const char *buf, size_t count)
322 const int online_type = mhp_online_type_from_str(buf);
323 struct memory_block *mem = to_memory_block(dev);
329 ret = lock_device_hotplug_sysfs();
333 switch (online_type) {
334 case MMOP_ONLINE_KERNEL:
335 case MMOP_ONLINE_MOVABLE:
337 /* mem->online_type is protected by device_hotplug_lock */
338 mem->online_type = online_type;
339 ret = device_online(&mem->dev);
342 ret = device_offline(&mem->dev);
345 ret = -EINVAL; /* should never happen */
348 unlock_device_hotplug();
359 * Legacy interface that we cannot remove: s390x exposes the storage increment
360 * covered by a memory block, allowing for identifying which memory blocks
361 * comprise a storage increment. Since a memory block spans complete
362 * storage increments nowadays, this interface is basically unused. Other
363 * archs never exposed != 0.
365 static ssize_t phys_device_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
368 struct memory_block *mem = to_memory_block(dev);
369 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
371 return sysfs_emit(buf, "%d\n",
372 arch_get_memory_phys_device(start_pfn));
375 #ifdef CONFIG_MEMORY_HOTREMOVE
376 static int print_allowed_zone(char *buf, int len, int nid,
377 unsigned long start_pfn, unsigned long nr_pages,
378 int online_type, struct zone *default_zone)
382 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
383 if (zone == default_zone)
386 return sysfs_emit_at(buf, len, " %s", zone->name);
389 static ssize_t valid_zones_show(struct device *dev,
390 struct device_attribute *attr, char *buf)
392 struct memory_block *mem = to_memory_block(dev);
393 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
394 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
395 struct zone *default_zone;
400 * Check the existing zone. Make sure that we do that only on the
401 * online nodes otherwise the page_zone is not reliable
403 if (mem->state == MEM_ONLINE) {
405 * The block contains more than one zone can not be offlined.
406 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
408 default_zone = test_pages_in_a_zone(start_pfn,
409 start_pfn + nr_pages);
411 return sysfs_emit(buf, "%s\n", "none");
412 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
417 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
420 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
421 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
422 MMOP_ONLINE_KERNEL, default_zone);
423 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
424 MMOP_ONLINE_MOVABLE, default_zone);
426 len += sysfs_emit_at(buf, len, "\n");
429 static DEVICE_ATTR_RO(valid_zones);
432 static DEVICE_ATTR_RO(phys_index);
433 static DEVICE_ATTR_RW(state);
434 static DEVICE_ATTR_RO(phys_device);
435 static DEVICE_ATTR_RO(removable);
438 * Show the memory block size (shared by all memory blocks).
440 static ssize_t block_size_bytes_show(struct device *dev,
441 struct device_attribute *attr, char *buf)
443 return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
446 static DEVICE_ATTR_RO(block_size_bytes);
449 * Memory auto online policy.
452 static ssize_t auto_online_blocks_show(struct device *dev,
453 struct device_attribute *attr, char *buf)
455 return sysfs_emit(buf, "%s\n",
456 online_type_to_str[mhp_default_online_type]);
459 static ssize_t auto_online_blocks_store(struct device *dev,
460 struct device_attribute *attr,
461 const char *buf, size_t count)
463 const int online_type = mhp_online_type_from_str(buf);
468 mhp_default_online_type = online_type;
472 static DEVICE_ATTR_RW(auto_online_blocks);
475 * Some architectures will have custom drivers to do this, and
476 * will not need to do it from userspace. The fake hot-add code
477 * as well as ppc64 will do all of their discovery in userspace
478 * and will require this interface.
480 #ifdef CONFIG_ARCH_MEMORY_PROBE
481 static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
482 const char *buf, size_t count)
486 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
488 ret = kstrtoull(buf, 0, &phys_addr);
492 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
495 ret = lock_device_hotplug_sysfs();
499 nid = memory_add_physaddr_to_nid(phys_addr);
500 ret = __add_memory(nid, phys_addr,
501 MIN_MEMORY_BLOCK_SIZE * sections_per_block,
509 unlock_device_hotplug();
513 static DEVICE_ATTR_WO(probe);
516 #ifdef CONFIG_MEMORY_FAILURE
518 * Support for offlining pages of memory
521 /* Soft offline a page */
522 static ssize_t soft_offline_page_store(struct device *dev,
523 struct device_attribute *attr,
524 const char *buf, size_t count)
528 if (!capable(CAP_SYS_ADMIN))
530 if (kstrtoull(buf, 0, &pfn) < 0)
533 ret = soft_offline_page(pfn, 0);
534 return ret == 0 ? count : ret;
537 /* Forcibly offline a page, including killing processes. */
538 static ssize_t hard_offline_page_store(struct device *dev,
539 struct device_attribute *attr,
540 const char *buf, size_t count)
544 if (!capable(CAP_SYS_ADMIN))
546 if (kstrtoull(buf, 0, &pfn) < 0)
549 ret = memory_failure(pfn, 0);
550 return ret ? ret : count;
553 static DEVICE_ATTR_WO(soft_offline_page);
554 static DEVICE_ATTR_WO(hard_offline_page);
557 /* See phys_device_show(). */
558 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
564 * A reference for the returned memory block device is acquired.
566 * Called under device_hotplug_lock.
568 static struct memory_block *find_memory_block_by_id(unsigned long block_id)
570 struct memory_block *mem;
572 mem = xa_load(&memory_blocks, block_id);
574 get_device(&mem->dev);
579 * Called under device_hotplug_lock.
581 struct memory_block *find_memory_block(struct mem_section *section)
583 unsigned long block_id = memory_block_id(__section_nr(section));
585 return find_memory_block_by_id(block_id);
588 static struct attribute *memory_memblk_attrs[] = {
589 &dev_attr_phys_index.attr,
590 &dev_attr_state.attr,
591 &dev_attr_phys_device.attr,
592 &dev_attr_removable.attr,
593 #ifdef CONFIG_MEMORY_HOTREMOVE
594 &dev_attr_valid_zones.attr,
599 static struct attribute_group memory_memblk_attr_group = {
600 .attrs = memory_memblk_attrs,
603 static const struct attribute_group *memory_memblk_attr_groups[] = {
604 &memory_memblk_attr_group,
609 * register_memory - Setup a sysfs device for a memory block
612 int register_memory(struct memory_block *memory)
616 memory->dev.bus = &memory_subsys;
617 memory->dev.id = memory->start_section_nr / sections_per_block;
618 memory->dev.release = memory_block_release;
619 memory->dev.groups = memory_memblk_attr_groups;
620 memory->dev.offline = memory->state == MEM_OFFLINE;
622 ret = device_register(&memory->dev);
624 put_device(&memory->dev);
627 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
630 put_device(&memory->dev);
631 device_unregister(&memory->dev);
636 static int init_memory_block(unsigned long block_id, unsigned long state,
637 unsigned long nr_vmemmap_pages)
639 struct memory_block *mem;
642 mem = find_memory_block_by_id(block_id);
644 put_device(&mem->dev);
647 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
651 mem->start_section_nr = block_id * sections_per_block;
653 mem->nid = NUMA_NO_NODE;
654 mem->nr_vmemmap_pages = nr_vmemmap_pages;
656 ret = register_memory(mem);
661 static int add_memory_block(unsigned long base_section_nr)
663 int section_count = 0;
666 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
668 if (present_section_nr(nr))
671 if (section_count == 0)
673 return init_memory_block(memory_block_id(base_section_nr),
677 static void unregister_memory(struct memory_block *memory)
679 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
682 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
684 /* drop the ref. we got via find_memory_block() */
685 put_device(&memory->dev);
686 device_unregister(&memory->dev);
690 * Create memory block devices for the given memory area. Start and size
691 * have to be aligned to memory block granularity. Memory block devices
692 * will be initialized as offline.
694 * Called under device_hotplug_lock.
696 int create_memory_block_devices(unsigned long start, unsigned long size,
697 unsigned long vmemmap_pages)
699 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
700 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
701 struct memory_block *mem;
702 unsigned long block_id;
705 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
706 !IS_ALIGNED(size, memory_block_size_bytes())))
709 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
710 ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages);
715 end_block_id = block_id;
716 for (block_id = start_block_id; block_id != end_block_id;
718 mem = find_memory_block_by_id(block_id);
719 if (WARN_ON_ONCE(!mem))
721 unregister_memory(mem);
728 * Remove memory block devices for the given memory area. Start and size
729 * have to be aligned to memory block granularity. Memory block devices
730 * have to be offline.
732 * Called under device_hotplug_lock.
734 void remove_memory_block_devices(unsigned long start, unsigned long size)
736 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
737 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
738 struct memory_block *mem;
739 unsigned long block_id;
741 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
742 !IS_ALIGNED(size, memory_block_size_bytes())))
745 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
746 mem = find_memory_block_by_id(block_id);
747 if (WARN_ON_ONCE(!mem))
749 unregister_memory_block_under_nodes(mem);
750 unregister_memory(mem);
754 /* return true if the memory block is offlined, otherwise, return false */
755 bool is_memblock_offlined(struct memory_block *mem)
757 return mem->state == MEM_OFFLINE;
760 static struct attribute *memory_root_attrs[] = {
761 #ifdef CONFIG_ARCH_MEMORY_PROBE
762 &dev_attr_probe.attr,
765 #ifdef CONFIG_MEMORY_FAILURE
766 &dev_attr_soft_offline_page.attr,
767 &dev_attr_hard_offline_page.attr,
770 &dev_attr_block_size_bytes.attr,
771 &dev_attr_auto_online_blocks.attr,
775 static struct attribute_group memory_root_attr_group = {
776 .attrs = memory_root_attrs,
779 static const struct attribute_group *memory_root_attr_groups[] = {
780 &memory_root_attr_group,
785 * Initialize the sysfs support for memory devices. At the time this function
786 * is called, we cannot have concurrent creation/deletion of memory block
787 * devices, the device_hotplug_lock is not needed.
789 void __init memory_dev_init(void)
792 unsigned long block_sz, nr;
794 /* Validate the configured memory block size */
795 block_sz = memory_block_size_bytes();
796 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
797 panic("Memory block size not suitable: 0x%lx\n", block_sz);
798 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
800 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
802 panic("%s() failed to register subsystem: %d\n", __func__, ret);
805 * Create entries for memory sections that were found
806 * during boot and have been initialized
808 for (nr = 0; nr <= __highest_present_section_nr;
809 nr += sections_per_block) {
810 ret = add_memory_block(nr);
812 panic("%s() failed to add memory block: %d\n", __func__,
818 * walk_memory_blocks - walk through all present memory blocks overlapped
819 * by the range [start, start + size)
821 * @start: start address of the memory range
822 * @size: size of the memory range
823 * @arg: argument passed to func
824 * @func: callback for each memory section walked
826 * This function walks through all present memory blocks overlapped by the
827 * range [start, start + size), calling func on each memory block.
829 * In case func() returns an error, walking is aborted and the error is
832 * Called under device_hotplug_lock.
834 int walk_memory_blocks(unsigned long start, unsigned long size,
835 void *arg, walk_memory_blocks_func_t func)
837 const unsigned long start_block_id = phys_to_block_id(start);
838 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
839 struct memory_block *mem;
840 unsigned long block_id;
846 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
847 mem = find_memory_block_by_id(block_id);
851 ret = func(mem, arg);
852 put_device(&mem->dev);
859 struct for_each_memory_block_cb_data {
860 walk_memory_blocks_func_t func;
864 static int for_each_memory_block_cb(struct device *dev, void *data)
866 struct memory_block *mem = to_memory_block(dev);
867 struct for_each_memory_block_cb_data *cb_data = data;
869 return cb_data->func(mem, cb_data->arg);
873 * for_each_memory_block - walk through all present memory blocks
875 * @arg: argument passed to func
876 * @func: callback for each memory block walked
878 * This function walks through all present memory blocks, calling func on
881 * In case func() returns an error, walking is aborted and the error is
884 int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
886 struct for_each_memory_block_cb_data cb_data = {
891 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
892 for_each_memory_block_cb);