mm/demotion: add support for explicit memory tiers
[linux-block.git] / mm / memory-tiers.c
CommitLineData
992bf775
AK
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/types.h>
3#include <linux/nodemask.h>
4#include <linux/slab.h>
5#include <linux/lockdep.h>
6#include <linux/memory-tiers.h>
7
8struct memory_tier {
9 /* hierarchy of memory tiers */
10 struct list_head list;
11 /* list of all memory types part of this tier */
12 struct list_head memory_types;
13 /*
14 * start value of abstract distance. memory tier maps
15 * an abstract distance range,
16 * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE
17 */
18 int adistance_start;
19};
20
21struct memory_dev_type {
22 /* list of memory types that are part of same tier as this type */
23 struct list_head tier_sibiling;
24 /* abstract distance for this specific memory type */
25 int adistance;
26 /* Nodes of same abstract distance */
27 nodemask_t nodes;
28 struct memory_tier *memtier;
29};
30
31static DEFINE_MUTEX(memory_tier_lock);
32static LIST_HEAD(memory_tiers);
33static struct memory_dev_type *node_memory_types[MAX_NUMNODES];
34/*
35 * For now we can have 4 faster memory tiers with smaller adistance
36 * than default DRAM tier.
37 */
38static struct memory_dev_type default_dram_type = {
39 .adistance = MEMTIER_ADISTANCE_DRAM,
40 .tier_sibiling = LIST_HEAD_INIT(default_dram_type.tier_sibiling),
41};
42
43static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memtype)
44{
45 bool found_slot = false;
46 struct memory_tier *memtier, *new_memtier;
47 int adistance = memtype->adistance;
48 unsigned int memtier_adistance_chunk_size = MEMTIER_CHUNK_SIZE;
49
50 lockdep_assert_held_once(&memory_tier_lock);
51
52 /*
53 * If the memtype is already part of a memory tier,
54 * just return that.
55 */
56 if (memtype->memtier)
57 return memtype->memtier;
58
59 adistance = round_down(adistance, memtier_adistance_chunk_size);
60 list_for_each_entry(memtier, &memory_tiers, list) {
61 if (adistance == memtier->adistance_start) {
62 memtype->memtier = memtier;
63 list_add(&memtype->tier_sibiling, &memtier->memory_types);
64 return memtier;
65 } else if (adistance < memtier->adistance_start) {
66 found_slot = true;
67 break;
68 }
69 }
70
71 new_memtier = kmalloc(sizeof(struct memory_tier), GFP_KERNEL);
72 if (!new_memtier)
73 return ERR_PTR(-ENOMEM);
74
75 new_memtier->adistance_start = adistance;
76 INIT_LIST_HEAD(&new_memtier->list);
77 INIT_LIST_HEAD(&new_memtier->memory_types);
78 if (found_slot)
79 list_add_tail(&new_memtier->list, &memtier->list);
80 else
81 list_add_tail(&new_memtier->list, &memory_tiers);
82 memtype->memtier = new_memtier;
83 list_add(&memtype->tier_sibiling, &new_memtier->memory_types);
84 return new_memtier;
85}
86
87static struct memory_tier *set_node_memory_tier(int node)
88{
89 struct memory_tier *memtier;
90 struct memory_dev_type *memtype;
91
92 lockdep_assert_held_once(&memory_tier_lock);
93
94 if (!node_state(node, N_MEMORY))
95 return ERR_PTR(-EINVAL);
96
97 if (!node_memory_types[node])
98 node_memory_types[node] = &default_dram_type;
99
100 memtype = node_memory_types[node];
101 node_set(node, memtype->nodes);
102 memtier = find_create_memory_tier(memtype);
103 return memtier;
104}
105
106static int __init memory_tier_init(void)
107{
108 int node;
109 struct memory_tier *memtier;
110
111 mutex_lock(&memory_tier_lock);
112 /*
113 * Look at all the existing N_MEMORY nodes and add them to
114 * default memory tier or to a tier if we already have memory
115 * types assigned.
116 */
117 for_each_node_state(node, N_MEMORY) {
118 memtier = set_node_memory_tier(node);
119 if (IS_ERR(memtier))
120 /*
121 * Continue with memtiers we are able to setup
122 */
123 break;
124 }
125 mutex_unlock(&memory_tier_lock);
126
127 return 0;
128}
129subsys_initcall(memory_tier_init);