1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/lockdep.h>
4 #include <linux/sysfs.h>
5 #include <linux/kobject.h>
6 #include <linux/memory.h>
7 #include <linux/memory-tiers.h>
8 #include <linux/notifier.h>
13 /* hierarchy of memory tiers */
14 struct list_head list;
15 /* list of all memory types part of this tier */
16 struct list_head memory_types;
18 * start value of abstract distance. memory tier maps
19 * an abstract distance range,
20 * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE
24 /* All the nodes that are part of all the lower memory tiers. */
25 nodemask_t lower_tier_mask;
28 struct demotion_nodes {
32 struct node_memory_type_map {
33 struct memory_dev_type *memtype;
37 static DEFINE_MUTEX(memory_tier_lock);
38 static LIST_HEAD(memory_tiers);
39 static struct node_memory_type_map node_memory_types[MAX_NUMNODES];
40 struct memory_dev_type *default_dram_type;
42 static struct bus_type memory_tier_subsys = {
43 .name = "memory_tiering",
44 .dev_name = "memory_tier",
47 #ifdef CONFIG_MIGRATION
48 static int top_tier_adistance;
50 * node_demotion[] examples:
54 * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes.
66 * node_demotion[0].preferred = 2
67 * node_demotion[1].preferred = 3
68 * node_demotion[2].preferred = <empty>
69 * node_demotion[3].preferred = <empty>
73 * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node.
83 * node_demotion[0].preferred = <empty>
84 * node_demotion[1].preferred = <empty>
85 * node_demotion[2].preferred = <empty>
89 * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node.
101 * node_demotion[0].preferred = 2
102 * node_demotion[1].preferred = 0
103 * node_demotion[2].preferred = <empty>
106 static struct demotion_nodes *node_demotion __read_mostly;
107 #endif /* CONFIG_MIGRATION */
109 static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms);
111 static bool default_dram_perf_error;
112 static struct node_hmem_attrs default_dram_perf;
113 static int default_dram_perf_ref_nid = NUMA_NO_NODE;
114 static const char *default_dram_perf_ref_source;
116 static inline struct memory_tier *to_memory_tier(struct device *device)
118 return container_of(device, struct memory_tier, dev);
121 static __always_inline nodemask_t get_memtier_nodemask(struct memory_tier *memtier)
123 nodemask_t nodes = NODE_MASK_NONE;
124 struct memory_dev_type *memtype;
126 list_for_each_entry(memtype, &memtier->memory_types, tier_sibling)
127 nodes_or(nodes, nodes, memtype->nodes);
132 static void memory_tier_device_release(struct device *dev)
134 struct memory_tier *tier = to_memory_tier(dev);
136 * synchronize_rcu in clear_node_memory_tier makes sure
137 * we don't have rcu access to this memory tier.
142 static ssize_t nodelist_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
148 mutex_lock(&memory_tier_lock);
149 nmask = get_memtier_nodemask(to_memory_tier(dev));
150 ret = sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&nmask));
151 mutex_unlock(&memory_tier_lock);
154 static DEVICE_ATTR_RO(nodelist);
156 static struct attribute *memtier_dev_attrs[] = {
157 &dev_attr_nodelist.attr,
161 static const struct attribute_group memtier_dev_group = {
162 .attrs = memtier_dev_attrs,
165 static const struct attribute_group *memtier_dev_groups[] = {
170 static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memtype)
173 bool found_slot = false;
174 struct memory_tier *memtier, *new_memtier;
175 int adistance = memtype->adistance;
176 unsigned int memtier_adistance_chunk_size = MEMTIER_CHUNK_SIZE;
178 lockdep_assert_held_once(&memory_tier_lock);
180 adistance = round_down(adistance, memtier_adistance_chunk_size);
182 * If the memtype is already part of a memory tier,
185 if (!list_empty(&memtype->tier_sibling)) {
186 list_for_each_entry(memtier, &memory_tiers, list) {
187 if (adistance == memtier->adistance_start)
191 return ERR_PTR(-EINVAL);
194 list_for_each_entry(memtier, &memory_tiers, list) {
195 if (adistance == memtier->adistance_start) {
197 } else if (adistance < memtier->adistance_start) {
203 new_memtier = kzalloc(sizeof(struct memory_tier), GFP_KERNEL);
205 return ERR_PTR(-ENOMEM);
207 new_memtier->adistance_start = adistance;
208 INIT_LIST_HEAD(&new_memtier->list);
209 INIT_LIST_HEAD(&new_memtier->memory_types);
211 list_add_tail(&new_memtier->list, &memtier->list);
213 list_add_tail(&new_memtier->list, &memory_tiers);
215 new_memtier->dev.id = adistance >> MEMTIER_CHUNK_BITS;
216 new_memtier->dev.bus = &memory_tier_subsys;
217 new_memtier->dev.release = memory_tier_device_release;
218 new_memtier->dev.groups = memtier_dev_groups;
220 ret = device_register(&new_memtier->dev);
222 list_del(&new_memtier->list);
223 put_device(&new_memtier->dev);
226 memtier = new_memtier;
229 list_add(&memtype->tier_sibling, &memtier->memory_types);
233 static struct memory_tier *__node_get_memory_tier(int node)
237 pgdat = NODE_DATA(node);
241 * Since we hold memory_tier_lock, we can avoid
242 * RCU read locks when accessing the details. No
243 * parallel updates are possible here.
245 return rcu_dereference_check(pgdat->memtier,
246 lockdep_is_held(&memory_tier_lock));
249 #ifdef CONFIG_MIGRATION
250 bool node_is_toptier(int node)
254 struct memory_tier *memtier;
256 pgdat = NODE_DATA(node);
261 memtier = rcu_dereference(pgdat->memtier);
266 if (memtier->adistance_start <= top_tier_adistance)
275 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
277 struct memory_tier *memtier;
280 * pg_data_t.memtier updates includes a synchronize_rcu()
281 * which ensures that we either find NULL or a valid memtier
282 * in NODE_DATA. protect the access via rcu_read_lock();
285 memtier = rcu_dereference(pgdat->memtier);
287 *targets = memtier->lower_tier_mask;
289 *targets = NODE_MASK_NONE;
294 * next_demotion_node() - Get the next node in the demotion path
295 * @node: The starting node to lookup the next node
297 * Return: node id for next memory node in the demotion path hierarchy
298 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep
299 * @node online or guarantee that it *continues* to be the next demotion
302 int next_demotion_node(int node)
304 struct demotion_nodes *nd;
310 nd = &node_demotion[node];
313 * node_demotion[] is updated without excluding this
314 * function from running.
316 * Make sure to use RCU over entire code blocks if
317 * node_demotion[] reads need to be consistent.
321 * If there are multiple target nodes, just select one
322 * target node randomly.
324 * In addition, we can also use round-robin to select
325 * target node, but we should introduce another variable
326 * for node_demotion[] to record last selected target node,
327 * that may cause cache ping-pong due to the changing of
328 * last target node. Or introducing per-cpu data to avoid
329 * caching issue, which seems more complicated. So selecting
330 * target node randomly seems better until now.
332 target = node_random(&nd->preferred);
338 static void disable_all_demotion_targets(void)
340 struct memory_tier *memtier;
343 for_each_node_state(node, N_MEMORY) {
344 node_demotion[node].preferred = NODE_MASK_NONE;
346 * We are holding memory_tier_lock, it is safe
347 * to access pgda->memtier.
349 memtier = __node_get_memory_tier(node);
351 memtier->lower_tier_mask = NODE_MASK_NONE;
354 * Ensure that the "disable" is visible across the system.
355 * Readers will see either a combination of before+disable
356 * state or disable+after. They will never see before and
357 * after state together.
363 * Find an automatic demotion target for all memory
364 * nodes. Failing here is OK. It might just indicate
365 * being at the end of a chain.
367 static void establish_demotion_targets(void)
369 struct memory_tier *memtier;
370 struct demotion_nodes *nd;
371 int target = NUMA_NO_NODE, node;
372 int distance, best_distance;
373 nodemask_t tier_nodes, lower_tier;
375 lockdep_assert_held_once(&memory_tier_lock);
380 disable_all_demotion_targets();
382 for_each_node_state(node, N_MEMORY) {
384 nd = &node_demotion[node];
386 memtier = __node_get_memory_tier(node);
387 if (!memtier || list_is_last(&memtier->list, &memory_tiers))
390 * Get the lower memtier to find the demotion node list.
392 memtier = list_next_entry(memtier, list);
393 tier_nodes = get_memtier_nodemask(memtier);
395 * find_next_best_node, use 'used' nodemask as a skip list.
396 * Add all memory nodes except the selected memory tier
397 * nodelist to skip list so that we find the best node from the
400 nodes_andnot(tier_nodes, node_states[N_MEMORY], tier_nodes);
403 * Find all the nodes in the memory tier node list of same best distance.
404 * add them to the preferred mask. We randomly select between nodes
405 * in the preferred mask when allocating pages during demotion.
408 target = find_next_best_node(node, &tier_nodes);
409 if (target == NUMA_NO_NODE)
412 distance = node_distance(node, target);
413 if (distance == best_distance || best_distance == -1) {
414 best_distance = distance;
415 node_set(target, nd->preferred);
422 * Promotion is allowed from a memory tier to higher
423 * memory tier only if the memory tier doesn't include
424 * compute. We want to skip promotion from a memory tier,
425 * if any node that is part of the memory tier have CPUs.
426 * Once we detect such a memory tier, we consider that tier
427 * as top tiper from which promotion is not allowed.
429 list_for_each_entry_reverse(memtier, &memory_tiers, list) {
430 tier_nodes = get_memtier_nodemask(memtier);
431 nodes_and(tier_nodes, node_states[N_CPU], tier_nodes);
432 if (!nodes_empty(tier_nodes)) {
434 * abstract distance below the max value of this memtier
435 * is considered toptier.
437 top_tier_adistance = memtier->adistance_start +
438 MEMTIER_CHUNK_SIZE - 1;
443 * Now build the lower_tier mask for each node collecting node mask from
444 * all memory tier below it. This allows us to fallback demotion page
445 * allocation to a set of nodes that is closer the above selected
448 lower_tier = node_states[N_MEMORY];
449 list_for_each_entry(memtier, &memory_tiers, list) {
451 * Keep removing current tier from lower_tier nodes,
452 * This will remove all nodes in current and above
453 * memory tier from the lower_tier mask.
455 tier_nodes = get_memtier_nodemask(memtier);
456 nodes_andnot(lower_tier, lower_tier, tier_nodes);
457 memtier->lower_tier_mask = lower_tier;
462 static inline void establish_demotion_targets(void) {}
463 #endif /* CONFIG_MIGRATION */
465 static inline void __init_node_memory_type(int node, struct memory_dev_type *memtype)
467 if (!node_memory_types[node].memtype)
468 node_memory_types[node].memtype = memtype;
470 * for each device getting added in the same NUMA node
471 * with this specific memtype, bump the map count. We
472 * Only take memtype device reference once, so that
473 * changing a node memtype can be done by droping the
474 * only reference count taken here.
477 if (node_memory_types[node].memtype == memtype) {
478 if (!node_memory_types[node].map_count++)
479 kref_get(&memtype->kref);
483 static struct memory_tier *set_node_memory_tier(int node)
485 struct memory_tier *memtier;
486 struct memory_dev_type *memtype;
487 pg_data_t *pgdat = NODE_DATA(node);
490 lockdep_assert_held_once(&memory_tier_lock);
492 if (!node_state(node, N_MEMORY))
493 return ERR_PTR(-EINVAL);
495 __init_node_memory_type(node, default_dram_type);
497 memtype = node_memory_types[node].memtype;
498 node_set(node, memtype->nodes);
499 memtier = find_create_memory_tier(memtype);
500 if (!IS_ERR(memtier))
501 rcu_assign_pointer(pgdat->memtier, memtier);
505 static void destroy_memory_tier(struct memory_tier *memtier)
507 list_del(&memtier->list);
508 device_unregister(&memtier->dev);
511 static bool clear_node_memory_tier(int node)
513 bool cleared = false;
515 struct memory_tier *memtier;
517 pgdat = NODE_DATA(node);
522 * Make sure that anybody looking at NODE_DATA who finds
523 * a valid memtier finds memory_dev_types with nodes still
524 * linked to the memtier. We achieve this by waiting for
525 * rcu read section to finish using synchronize_rcu.
526 * This also enables us to free the destroyed memory tier
527 * with kfree instead of kfree_rcu
529 memtier = __node_get_memory_tier(node);
531 struct memory_dev_type *memtype;
533 rcu_assign_pointer(pgdat->memtier, NULL);
535 memtype = node_memory_types[node].memtype;
536 node_clear(node, memtype->nodes);
537 if (nodes_empty(memtype->nodes)) {
538 list_del_init(&memtype->tier_sibling);
539 if (list_empty(&memtier->memory_types))
540 destroy_memory_tier(memtier);
547 static void release_memtype(struct kref *kref)
549 struct memory_dev_type *memtype;
551 memtype = container_of(kref, struct memory_dev_type, kref);
555 struct memory_dev_type *alloc_memory_type(int adistance)
557 struct memory_dev_type *memtype;
559 memtype = kmalloc(sizeof(*memtype), GFP_KERNEL);
561 return ERR_PTR(-ENOMEM);
563 memtype->adistance = adistance;
564 INIT_LIST_HEAD(&memtype->tier_sibling);
565 memtype->nodes = NODE_MASK_NONE;
566 kref_init(&memtype->kref);
569 EXPORT_SYMBOL_GPL(alloc_memory_type);
571 void put_memory_type(struct memory_dev_type *memtype)
573 kref_put(&memtype->kref, release_memtype);
575 EXPORT_SYMBOL_GPL(put_memory_type);
577 void init_node_memory_type(int node, struct memory_dev_type *memtype)
580 mutex_lock(&memory_tier_lock);
581 __init_node_memory_type(node, memtype);
582 mutex_unlock(&memory_tier_lock);
584 EXPORT_SYMBOL_GPL(init_node_memory_type);
586 void clear_node_memory_type(int node, struct memory_dev_type *memtype)
588 mutex_lock(&memory_tier_lock);
589 if (node_memory_types[node].memtype == memtype || !memtype)
590 node_memory_types[node].map_count--;
592 * If we umapped all the attached devices to this node,
593 * clear the node memory type.
595 if (!node_memory_types[node].map_count) {
596 memtype = node_memory_types[node].memtype;
597 node_memory_types[node].memtype = NULL;
598 put_memory_type(memtype);
600 mutex_unlock(&memory_tier_lock);
602 EXPORT_SYMBOL_GPL(clear_node_memory_type);
604 static void dump_hmem_attrs(struct node_hmem_attrs *attrs, const char *prefix)
607 "%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n",
608 prefix, attrs->read_latency, attrs->write_latency,
609 attrs->read_bandwidth, attrs->write_bandwidth);
612 int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
617 mutex_lock(&memory_tier_lock);
618 if (default_dram_perf_error) {
623 if (perf->read_latency + perf->write_latency == 0 ||
624 perf->read_bandwidth + perf->write_bandwidth == 0) {
629 if (default_dram_perf_ref_nid == NUMA_NO_NODE) {
630 default_dram_perf = *perf;
631 default_dram_perf_ref_nid = nid;
632 default_dram_perf_ref_source = kstrdup(source, GFP_KERNEL);
637 * The performance of all default DRAM nodes is expected to be
638 * same (that is, the variation is less than 10%). And it
639 * will be used as base to calculate the abstract distance of
640 * other memory nodes.
642 if (abs(perf->read_latency - default_dram_perf.read_latency) * 10 >
643 default_dram_perf.read_latency ||
644 abs(perf->write_latency - default_dram_perf.write_latency) * 10 >
645 default_dram_perf.write_latency ||
646 abs(perf->read_bandwidth - default_dram_perf.read_bandwidth) * 10 >
647 default_dram_perf.read_bandwidth ||
648 abs(perf->write_bandwidth - default_dram_perf.write_bandwidth) * 10 >
649 default_dram_perf.write_bandwidth) {
651 "memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
652 "DRAM node %d.\n", nid, default_dram_perf_ref_nid);
653 pr_info(" performance of reference DRAM node %d:\n",
654 default_dram_perf_ref_nid);
655 dump_hmem_attrs(&default_dram_perf, " ");
656 pr_info(" performance of DRAM node %d:\n", nid);
657 dump_hmem_attrs(perf, " ");
659 " disable default DRAM node performance based abstract distance algorithm.\n");
660 default_dram_perf_error = true;
665 mutex_unlock(&memory_tier_lock);
669 int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
671 if (default_dram_perf_error)
674 if (default_dram_perf_ref_nid == NUMA_NO_NODE)
677 if (perf->read_latency + perf->write_latency == 0 ||
678 perf->read_bandwidth + perf->write_bandwidth == 0)
681 mutex_lock(&memory_tier_lock);
683 * The abstract distance of a memory node is in direct proportion to
684 * its memory latency (read + write) and inversely proportional to its
685 * memory bandwidth (read + write). The abstract distance, memory
686 * latency, and memory bandwidth of the default DRAM nodes are used as
689 *adist = MEMTIER_ADISTANCE_DRAM *
690 (perf->read_latency + perf->write_latency) /
691 (default_dram_perf.read_latency + default_dram_perf.write_latency) *
692 (default_dram_perf.read_bandwidth + default_dram_perf.write_bandwidth) /
693 (perf->read_bandwidth + perf->write_bandwidth);
694 mutex_unlock(&memory_tier_lock);
698 EXPORT_SYMBOL_GPL(mt_perf_to_adistance);
701 * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm
702 * @nb: The notifier block which describe the algorithm
704 * Return: 0 on success, errno on error.
706 * Every memory tiering abstract distance algorithm provider needs to
707 * register the algorithm with register_mt_adistance_algorithm(). To
708 * calculate the abstract distance for a specified memory node, the
709 * notifier function will be called unless some high priority
710 * algorithm has provided result. The prototype of the notifier
711 * function is as follows,
713 * int (*algorithm_notifier)(struct notifier_block *nb,
714 * unsigned long nid, void *data);
716 * Where "nid" specifies the memory node, "data" is the pointer to the
717 * returned abstract distance (that is, "int *adist"). If the
718 * algorithm provides the result, NOTIFY_STOP should be returned.
719 * Otherwise, return_value & %NOTIFY_STOP_MASK == 0 to allow the next
720 * algorithm in the chain to provide the result.
722 int register_mt_adistance_algorithm(struct notifier_block *nb)
724 return blocking_notifier_chain_register(&mt_adistance_algorithms, nb);
726 EXPORT_SYMBOL_GPL(register_mt_adistance_algorithm);
729 * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm
730 * @nb: the notifier block which describe the algorithm
732 * Return: 0 on success, errno on error.
734 int unregister_mt_adistance_algorithm(struct notifier_block *nb)
736 return blocking_notifier_chain_unregister(&mt_adistance_algorithms, nb);
738 EXPORT_SYMBOL_GPL(unregister_mt_adistance_algorithm);
741 * mt_calc_adistance() - Calculate abstract distance with registered algorithms
742 * @node: the node to calculate abstract distance for
743 * @adist: the returned abstract distance
745 * Return: if return_value & %NOTIFY_STOP_MASK != 0, then some
746 * abstract distance algorithm provides the result, and return it via
747 * @adist. Otherwise, no algorithm can provide the result and @adist
748 * will be kept as it is.
750 int mt_calc_adistance(int node, int *adist)
752 return blocking_notifier_call_chain(&mt_adistance_algorithms, node, adist);
754 EXPORT_SYMBOL_GPL(mt_calc_adistance);
756 static int __meminit memtier_hotplug_callback(struct notifier_block *self,
757 unsigned long action, void *_arg)
759 struct memory_tier *memtier;
760 struct memory_notify *arg = _arg;
763 * Only update the node migration order when a node is
764 * changing status, like online->offline.
766 if (arg->status_change_nid < 0)
767 return notifier_from_errno(0);
771 mutex_lock(&memory_tier_lock);
772 if (clear_node_memory_tier(arg->status_change_nid))
773 establish_demotion_targets();
774 mutex_unlock(&memory_tier_lock);
777 mutex_lock(&memory_tier_lock);
778 memtier = set_node_memory_tier(arg->status_change_nid);
779 if (!IS_ERR(memtier))
780 establish_demotion_targets();
781 mutex_unlock(&memory_tier_lock);
785 return notifier_from_errno(0);
788 static int __init memory_tier_init(void)
791 struct memory_tier *memtier;
793 ret = subsys_virtual_register(&memory_tier_subsys, NULL);
795 panic("%s() failed to register memory tier subsystem\n", __func__);
797 #ifdef CONFIG_MIGRATION
798 node_demotion = kcalloc(nr_node_ids, sizeof(struct demotion_nodes),
800 WARN_ON(!node_demotion);
802 mutex_lock(&memory_tier_lock);
804 * For now we can have 4 faster memory tiers with smaller adistance
805 * than default DRAM tier.
807 default_dram_type = alloc_memory_type(MEMTIER_ADISTANCE_DRAM);
808 if (IS_ERR(default_dram_type))
809 panic("%s() failed to allocate default DRAM tier\n", __func__);
812 * Look at all the existing N_MEMORY nodes and add them to
813 * default memory tier or to a tier if we already have memory
816 for_each_node_state(node, N_MEMORY) {
817 memtier = set_node_memory_tier(node);
820 * Continue with memtiers we are able to setup
824 establish_demotion_targets();
825 mutex_unlock(&memory_tier_lock);
827 hotplug_memory_notifier(memtier_hotplug_callback, MEMTIER_HOTPLUG_PRI);
830 subsys_initcall(memory_tier_init);
832 bool numa_demotion_enabled = false;
834 #ifdef CONFIG_MIGRATION
836 static ssize_t demotion_enabled_show(struct kobject *kobj,
837 struct kobj_attribute *attr, char *buf)
839 return sysfs_emit(buf, "%s\n",
840 numa_demotion_enabled ? "true" : "false");
843 static ssize_t demotion_enabled_store(struct kobject *kobj,
844 struct kobj_attribute *attr,
845 const char *buf, size_t count)
849 ret = kstrtobool(buf, &numa_demotion_enabled);
856 static struct kobj_attribute numa_demotion_enabled_attr =
857 __ATTR_RW(demotion_enabled);
859 static struct attribute *numa_attrs[] = {
860 &numa_demotion_enabled_attr.attr,
864 static const struct attribute_group numa_attr_group = {
868 static int __init numa_init_sysfs(void)
871 struct kobject *numa_kobj;
873 numa_kobj = kobject_create_and_add("numa", mm_kobj);
875 pr_err("failed to create numa kobject\n");
878 err = sysfs_create_group(numa_kobj, &numa_attr_group);
880 pr_err("failed to register numa group\n");
886 kobject_put(numa_kobj);
889 subsys_initcall(numa_init_sysfs);
890 #endif /* CONFIG_SYSFS */