Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * pSeries NUMA support | |
3 | * | |
4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
2d73bae1 NA |
11 | #define pr_fmt(fmt) "numa: " fmt |
12 | ||
1da177e4 LT |
13 | #include <linux/threads.h> |
14 | #include <linux/bootmem.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/mmzone.h> | |
4b16f8e2 | 18 | #include <linux/export.h> |
1da177e4 LT |
19 | #include <linux/nodemask.h> |
20 | #include <linux/cpu.h> | |
21 | #include <linux/notifier.h> | |
95f72d1e | 22 | #include <linux/memblock.h> |
6df1646e | 23 | #include <linux/of.h> |
06eccea6 | 24 | #include <linux/pfn.h> |
9eff1a38 JL |
25 | #include <linux/cpuset.h> |
26 | #include <linux/node.h> | |
30c05350 | 27 | #include <linux/stop_machine.h> |
e04fa612 NF |
28 | #include <linux/proc_fs.h> |
29 | #include <linux/seq_file.h> | |
30 | #include <linux/uaccess.h> | |
191a7120 | 31 | #include <linux/slab.h> |
3be7db6a | 32 | #include <asm/cputhreads.h> |
45fb6cea | 33 | #include <asm/sparsemem.h> |
d9b2b2a2 | 34 | #include <asm/prom.h> |
2249ca9d | 35 | #include <asm/smp.h> |
d4edc5b6 SB |
36 | #include <asm/cputhreads.h> |
37 | #include <asm/topology.h> | |
9eff1a38 JL |
38 | #include <asm/firmware.h> |
39 | #include <asm/paca.h> | |
39bf990e | 40 | #include <asm/hvcall.h> |
ae3a197e | 41 | #include <asm/setup.h> |
176bbf14 | 42 | #include <asm/vdso.h> |
514a9cb3 | 43 | #include <asm/drmem.h> |
1da177e4 LT |
44 | |
45 | static int numa_enabled = 1; | |
46 | ||
1daa6d08 BS |
47 | static char *cmdline __initdata; |
48 | ||
1da177e4 LT |
49 | static int numa_debug; |
50 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | |
51 | ||
45fb6cea | 52 | int numa_cpu_lookup_table[NR_CPUS]; |
25863de0 | 53 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
1da177e4 | 54 | struct pglist_data *node_data[MAX_NUMNODES]; |
45fb6cea AB |
55 | |
56 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
25863de0 | 57 | EXPORT_SYMBOL(node_to_cpumask_map); |
45fb6cea AB |
58 | EXPORT_SYMBOL(node_data); |
59 | ||
1da177e4 | 60 | static int min_common_depth; |
237a0989 | 61 | static int n_mem_addr_cells, n_mem_size_cells; |
41eab6f8 AB |
62 | static int form1_affinity; |
63 | ||
64 | #define MAX_DISTANCE_REF_POINTS 4 | |
65 | static int distance_ref_points_depth; | |
b08a2a12 | 66 | static const __be32 *distance_ref_points; |
41eab6f8 | 67 | static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; |
1da177e4 | 68 | |
25863de0 AB |
69 | /* |
70 | * Allocate node_to_cpumask_map based on number of available nodes | |
71 | * Requires node_possible_map to be valid. | |
72 | * | |
9512938b | 73 | * Note: cpumask_of_node() is not valid until after this is done. |
25863de0 AB |
74 | */ |
75 | static void __init setup_node_to_cpumask_map(void) | |
76 | { | |
f9d531b8 | 77 | unsigned int node; |
25863de0 AB |
78 | |
79 | /* setup nr_node_ids if not done yet */ | |
f9d531b8 CS |
80 | if (nr_node_ids == MAX_NUMNODES) |
81 | setup_nr_node_ids(); | |
25863de0 AB |
82 | |
83 | /* allocate the map */ | |
c118baf8 | 84 | for_each_node(node) |
25863de0 AB |
85 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); |
86 | ||
87 | /* cpumask_of_node() will now work */ | |
88 | dbg("Node to cpumask map for %d nodes\n", nr_node_ids); | |
89 | } | |
90 | ||
55671f3c | 91 | static int __init fake_numa_create_new_node(unsigned long end_pfn, |
1daa6d08 BS |
92 | unsigned int *nid) |
93 | { | |
94 | unsigned long long mem; | |
95 | char *p = cmdline; | |
96 | static unsigned int fake_nid; | |
97 | static unsigned long long curr_boundary; | |
98 | ||
99 | /* | |
100 | * Modify node id, iff we started creating NUMA nodes | |
101 | * We want to continue from where we left of the last time | |
102 | */ | |
103 | if (fake_nid) | |
104 | *nid = fake_nid; | |
105 | /* | |
106 | * In case there are no more arguments to parse, the | |
107 | * node_id should be the same as the last fake node id | |
108 | * (we've handled this above). | |
109 | */ | |
110 | if (!p) | |
111 | return 0; | |
112 | ||
113 | mem = memparse(p, &p); | |
114 | if (!mem) | |
115 | return 0; | |
116 | ||
117 | if (mem < curr_boundary) | |
118 | return 0; | |
119 | ||
120 | curr_boundary = mem; | |
121 | ||
122 | if ((end_pfn << PAGE_SHIFT) > mem) { | |
123 | /* | |
124 | * Skip commas and spaces | |
125 | */ | |
126 | while (*p == ',' || *p == ' ' || *p == '\t') | |
127 | p++; | |
128 | ||
129 | cmdline = p; | |
130 | fake_nid++; | |
131 | *nid = fake_nid; | |
132 | dbg("created new fake_node with id %d\n", fake_nid); | |
133 | return 1; | |
134 | } | |
135 | return 0; | |
136 | } | |
137 | ||
d4edc5b6 SB |
138 | static void reset_numa_cpu_lookup_table(void) |
139 | { | |
140 | unsigned int cpu; | |
141 | ||
142 | for_each_possible_cpu(cpu) | |
143 | numa_cpu_lookup_table[cpu] = -1; | |
144 | } | |
145 | ||
d4edc5b6 SB |
146 | static void map_cpu_to_node(int cpu, int node) |
147 | { | |
148 | update_numa_cpu_lookup_table(cpu, node); | |
45fb6cea | 149 | |
bf4b85b0 NL |
150 | dbg("adding cpu %d to node %d\n", cpu, node); |
151 | ||
25863de0 AB |
152 | if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) |
153 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | |
1da177e4 LT |
154 | } |
155 | ||
39bf990e | 156 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) |
1da177e4 LT |
157 | static void unmap_cpu_from_node(unsigned long cpu) |
158 | { | |
159 | int node = numa_cpu_lookup_table[cpu]; | |
160 | ||
161 | dbg("removing cpu %lu from node %d\n", cpu, node); | |
162 | ||
25863de0 | 163 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { |
429f4d8d | 164 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
1da177e4 LT |
165 | } else { |
166 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | |
167 | cpu, node); | |
168 | } | |
169 | } | |
39bf990e | 170 | #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ |
1da177e4 | 171 | |
1da177e4 | 172 | /* must hold reference to node during call */ |
b08a2a12 | 173 | static const __be32 *of_get_associativity(struct device_node *dev) |
1da177e4 | 174 | { |
e2eb6392 | 175 | return of_get_property(dev, "ibm,associativity", NULL); |
1da177e4 LT |
176 | } |
177 | ||
41eab6f8 AB |
178 | int __node_distance(int a, int b) |
179 | { | |
180 | int i; | |
181 | int distance = LOCAL_DISTANCE; | |
182 | ||
183 | if (!form1_affinity) | |
7122beee | 184 | return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); |
41eab6f8 AB |
185 | |
186 | for (i = 0; i < distance_ref_points_depth; i++) { | |
187 | if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) | |
188 | break; | |
189 | ||
190 | /* Double the distance for each NUMA level */ | |
191 | distance *= 2; | |
192 | } | |
193 | ||
194 | return distance; | |
195 | } | |
12c743eb | 196 | EXPORT_SYMBOL(__node_distance); |
41eab6f8 AB |
197 | |
198 | static void initialize_distance_lookup_table(int nid, | |
b08a2a12 | 199 | const __be32 *associativity) |
41eab6f8 AB |
200 | { |
201 | int i; | |
202 | ||
203 | if (!form1_affinity) | |
204 | return; | |
205 | ||
206 | for (i = 0; i < distance_ref_points_depth; i++) { | |
b08a2a12 AP |
207 | const __be32 *entry; |
208 | ||
1d805440 | 209 | entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1]; |
b08a2a12 | 210 | distance_lookup_table[nid][i] = of_read_number(entry, 1); |
41eab6f8 AB |
211 | } |
212 | } | |
213 | ||
482ec7c4 NL |
214 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa |
215 | * info is found. | |
216 | */ | |
b08a2a12 | 217 | static int associativity_to_nid(const __be32 *associativity) |
1da177e4 | 218 | { |
482ec7c4 | 219 | int nid = -1; |
1da177e4 LT |
220 | |
221 | if (min_common_depth == -1) | |
482ec7c4 | 222 | goto out; |
1da177e4 | 223 | |
b08a2a12 AP |
224 | if (of_read_number(associativity, 1) >= min_common_depth) |
225 | nid = of_read_number(&associativity[min_common_depth], 1); | |
bc16a759 NL |
226 | |
227 | /* POWER4 LPAR uses 0xffff as invalid node */ | |
482ec7c4 NL |
228 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
229 | nid = -1; | |
41eab6f8 | 230 | |
b08a2a12 | 231 | if (nid > 0 && |
1d805440 ND |
232 | of_read_number(associativity, 1) >= distance_ref_points_depth) { |
233 | /* | |
234 | * Skip the length field and send start of associativity array | |
235 | */ | |
236 | initialize_distance_lookup_table(nid, associativity + 1); | |
237 | } | |
41eab6f8 | 238 | |
482ec7c4 | 239 | out: |
cf950b7a | 240 | return nid; |
1da177e4 LT |
241 | } |
242 | ||
9eff1a38 JL |
243 | /* Returns the nid associated with the given device tree node, |
244 | * or -1 if not found. | |
245 | */ | |
246 | static int of_node_to_nid_single(struct device_node *device) | |
247 | { | |
248 | int nid = -1; | |
b08a2a12 | 249 | const __be32 *tmp; |
9eff1a38 JL |
250 | |
251 | tmp = of_get_associativity(device); | |
252 | if (tmp) | |
253 | nid = associativity_to_nid(tmp); | |
254 | return nid; | |
255 | } | |
256 | ||
953039c8 JK |
257 | /* Walk the device tree upwards, looking for an associativity id */ |
258 | int of_node_to_nid(struct device_node *device) | |
259 | { | |
953039c8 JK |
260 | int nid = -1; |
261 | ||
262 | of_node_get(device); | |
263 | while (device) { | |
264 | nid = of_node_to_nid_single(device); | |
265 | if (nid != -1) | |
266 | break; | |
267 | ||
1def3758 | 268 | device = of_get_next_parent(device); |
953039c8 JK |
269 | } |
270 | of_node_put(device); | |
271 | ||
272 | return nid; | |
273 | } | |
be9ba9ff | 274 | EXPORT_SYMBOL(of_node_to_nid); |
953039c8 | 275 | |
1da177e4 LT |
276 | static int __init find_min_common_depth(void) |
277 | { | |
41eab6f8 | 278 | int depth; |
e70606eb | 279 | struct device_node *root; |
1da177e4 | 280 | |
1c8ee733 DS |
281 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
282 | root = of_find_node_by_path("/ibm,opal"); | |
283 | else | |
284 | root = of_find_node_by_path("/rtas"); | |
e70606eb ME |
285 | if (!root) |
286 | root = of_find_node_by_path("/"); | |
1da177e4 LT |
287 | |
288 | /* | |
41eab6f8 AB |
289 | * This property is a set of 32-bit integers, each representing |
290 | * an index into the ibm,associativity nodes. | |
291 | * | |
292 | * With form 0 affinity the first integer is for an SMP configuration | |
293 | * (should be all 0's) and the second is for a normal NUMA | |
294 | * configuration. We have only one level of NUMA. | |
295 | * | |
296 | * With form 1 affinity the first integer is the most significant | |
297 | * NUMA boundary and the following are progressively less significant | |
298 | * boundaries. There can be more than one level of NUMA. | |
1da177e4 | 299 | */ |
e70606eb | 300 | distance_ref_points = of_get_property(root, |
41eab6f8 AB |
301 | "ibm,associativity-reference-points", |
302 | &distance_ref_points_depth); | |
303 | ||
304 | if (!distance_ref_points) { | |
305 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); | |
306 | goto err; | |
307 | } | |
308 | ||
309 | distance_ref_points_depth /= sizeof(int); | |
1da177e4 | 310 | |
8002b0c5 NF |
311 | if (firmware_has_feature(FW_FEATURE_OPAL) || |
312 | firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) { | |
313 | dbg("Using form 1 affinity\n"); | |
1c8ee733 | 314 | form1_affinity = 1; |
4b83c330 AB |
315 | } |
316 | ||
41eab6f8 | 317 | if (form1_affinity) { |
b08a2a12 | 318 | depth = of_read_number(distance_ref_points, 1); |
1da177e4 | 319 | } else { |
41eab6f8 AB |
320 | if (distance_ref_points_depth < 2) { |
321 | printk(KERN_WARNING "NUMA: " | |
322 | "short ibm,associativity-reference-points\n"); | |
323 | goto err; | |
324 | } | |
325 | ||
b08a2a12 | 326 | depth = of_read_number(&distance_ref_points[1], 1); |
1da177e4 | 327 | } |
1da177e4 | 328 | |
41eab6f8 AB |
329 | /* |
330 | * Warn and cap if the hardware supports more than | |
331 | * MAX_DISTANCE_REF_POINTS domains. | |
332 | */ | |
333 | if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { | |
334 | printk(KERN_WARNING "NUMA: distance array capped at " | |
335 | "%d entries\n", MAX_DISTANCE_REF_POINTS); | |
336 | distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; | |
337 | } | |
338 | ||
e70606eb | 339 | of_node_put(root); |
1da177e4 | 340 | return depth; |
41eab6f8 AB |
341 | |
342 | err: | |
e70606eb | 343 | of_node_put(root); |
41eab6f8 | 344 | return -1; |
1da177e4 LT |
345 | } |
346 | ||
84c9fdd1 | 347 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
1da177e4 LT |
348 | { |
349 | struct device_node *memory = NULL; | |
1da177e4 LT |
350 | |
351 | memory = of_find_node_by_type(memory, "memory"); | |
54c23310 | 352 | if (!memory) |
84c9fdd1 | 353 | panic("numa.c: No memory nodes found!"); |
54c23310 | 354 | |
a8bda5dd | 355 | *n_addr_cells = of_n_addr_cells(memory); |
9213feea | 356 | *n_size_cells = of_n_size_cells(memory); |
84c9fdd1 | 357 | of_node_put(memory); |
1da177e4 LT |
358 | } |
359 | ||
b08a2a12 | 360 | static unsigned long read_n_cells(int n, const __be32 **buf) |
1da177e4 LT |
361 | { |
362 | unsigned long result = 0; | |
363 | ||
364 | while (n--) { | |
b08a2a12 | 365 | result = (result << 32) | of_read_number(*buf, 1); |
1da177e4 LT |
366 | (*buf)++; |
367 | } | |
368 | return result; | |
369 | } | |
370 | ||
8342681d NF |
371 | struct assoc_arrays { |
372 | u32 n_arrays; | |
373 | u32 array_sz; | |
b08a2a12 | 374 | const __be32 *arrays; |
8342681d NF |
375 | }; |
376 | ||
377 | /* | |
25985edc | 378 | * Retrieve and validate the list of associativity arrays for drconf |
8342681d NF |
379 | * memory from the ibm,associativity-lookup-arrays property of the |
380 | * device tree.. | |
381 | * | |
382 | * The layout of the ibm,associativity-lookup-arrays property is a number N | |
383 | * indicating the number of associativity arrays, followed by a number M | |
384 | * indicating the size of each associativity array, followed by a list | |
385 | * of N associativity arrays. | |
386 | */ | |
35f80deb | 387 | static int of_get_assoc_arrays(struct assoc_arrays *aa) |
8342681d | 388 | { |
35f80deb | 389 | struct device_node *memory; |
b08a2a12 | 390 | const __be32 *prop; |
8342681d NF |
391 | u32 len; |
392 | ||
35f80deb NF |
393 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
394 | if (!memory) | |
395 | return -1; | |
396 | ||
8342681d | 397 | prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); |
35f80deb NF |
398 | if (!prop || len < 2 * sizeof(unsigned int)) { |
399 | of_node_put(memory); | |
8342681d | 400 | return -1; |
35f80deb | 401 | } |
8342681d | 402 | |
b08a2a12 AP |
403 | aa->n_arrays = of_read_number(prop++, 1); |
404 | aa->array_sz = of_read_number(prop++, 1); | |
8342681d | 405 | |
35f80deb NF |
406 | of_node_put(memory); |
407 | ||
42b2aa86 | 408 | /* Now that we know the number of arrays and size of each array, |
8342681d NF |
409 | * revalidate the size of the property read in. |
410 | */ | |
411 | if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) | |
412 | return -1; | |
413 | ||
414 | aa->arrays = prop; | |
415 | return 0; | |
416 | } | |
417 | ||
418 | /* | |
419 | * This is like of_node_to_nid_single() for memory represented in the | |
420 | * ibm,dynamic-reconfiguration-memory node. | |
421 | */ | |
514a9cb3 | 422 | static int of_drconf_to_nid_single(struct drmem_lmb *lmb) |
8342681d | 423 | { |
b88fc309 | 424 | struct assoc_arrays aa = { .arrays = NULL }; |
8342681d NF |
425 | int default_nid = 0; |
426 | int nid = default_nid; | |
b88fc309 NF |
427 | int rc, index; |
428 | ||
429 | rc = of_get_assoc_arrays(&aa); | |
430 | if (rc) | |
431 | return default_nid; | |
8342681d | 432 | |
b88fc309 | 433 | if (min_common_depth > 0 && min_common_depth <= aa.array_sz && |
514a9cb3 NF |
434 | !(lmb->flags & DRCONF_MEM_AI_INVALID) && |
435 | lmb->aa_index < aa.n_arrays) { | |
436 | index = lmb->aa_index * aa.array_sz + min_common_depth - 1; | |
b88fc309 | 437 | nid = of_read_number(&aa.arrays[index], 1); |
8342681d NF |
438 | |
439 | if (nid == 0xffff || nid >= MAX_NUMNODES) | |
440 | nid = default_nid; | |
1d805440 ND |
441 | |
442 | if (nid > 0) { | |
514a9cb3 | 443 | index = lmb->aa_index * aa.array_sz; |
1d805440 | 444 | initialize_distance_lookup_table(nid, |
b88fc309 | 445 | &aa.arrays[index]); |
1d805440 | 446 | } |
8342681d NF |
447 | } |
448 | ||
449 | return nid; | |
450 | } | |
451 | ||
1da177e4 LT |
452 | /* |
453 | * Figure out to which domain a cpu belongs and stick it there. | |
454 | * Return the id of the domain used. | |
455 | */ | |
061d19f2 | 456 | static int numa_setup_cpu(unsigned long lcpu) |
1da177e4 | 457 | { |
297cf502 | 458 | int nid = -1; |
d4edc5b6 SB |
459 | struct device_node *cpu; |
460 | ||
461 | /* | |
462 | * If a valid cpu-to-node mapping is already available, use it | |
463 | * directly instead of querying the firmware, since it represents | |
464 | * the most recent mapping notified to us by the platform (eg: VPHN). | |
465 | */ | |
466 | if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) { | |
467 | map_cpu_to_node(lcpu, nid); | |
468 | return nid; | |
469 | } | |
470 | ||
471 | cpu = of_get_cpu_node(lcpu, NULL); | |
1da177e4 LT |
472 | |
473 | if (!cpu) { | |
474 | WARN_ON(1); | |
297cf502 LZ |
475 | if (cpu_present(lcpu)) |
476 | goto out_present; | |
477 | else | |
478 | goto out; | |
1da177e4 LT |
479 | } |
480 | ||
953039c8 | 481 | nid = of_node_to_nid_single(cpu); |
1da177e4 | 482 | |
297cf502 | 483 | out_present: |
ea05ba7c | 484 | if (nid < 0 || !node_possible(nid)) |
72c33688 | 485 | nid = first_online_node; |
1da177e4 | 486 | |
297cf502 | 487 | map_cpu_to_node(lcpu, nid); |
1da177e4 | 488 | of_node_put(cpu); |
297cf502 | 489 | out: |
cf950b7a | 490 | return nid; |
1da177e4 LT |
491 | } |
492 | ||
68fb18aa SB |
493 | static void verify_cpu_node_mapping(int cpu, int node) |
494 | { | |
495 | int base, sibling, i; | |
496 | ||
497 | /* Verify that all the threads in the core belong to the same node */ | |
498 | base = cpu_first_thread_sibling(cpu); | |
499 | ||
500 | for (i = 0; i < threads_per_core; i++) { | |
501 | sibling = base + i; | |
502 | ||
503 | if (sibling == cpu || cpu_is_offline(sibling)) | |
504 | continue; | |
505 | ||
506 | if (cpu_to_node(sibling) != node) { | |
507 | WARN(1, "CPU thread siblings %d and %d don't belong" | |
508 | " to the same node!\n", cpu, sibling); | |
509 | break; | |
510 | } | |
511 | } | |
512 | } | |
513 | ||
bdab88e0 SAS |
514 | /* Must run before sched domains notifier. */ |
515 | static int ppc_numa_cpu_prepare(unsigned int cpu) | |
516 | { | |
517 | int nid; | |
518 | ||
519 | nid = numa_setup_cpu(cpu); | |
520 | verify_cpu_node_mapping(cpu, nid); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | static int ppc_numa_cpu_dead(unsigned int cpu) | |
525 | { | |
1da177e4 | 526 | #ifdef CONFIG_HOTPLUG_CPU |
bdab88e0 | 527 | unmap_cpu_from_node(cpu); |
1da177e4 | 528 | #endif |
bdab88e0 | 529 | return 0; |
1da177e4 LT |
530 | } |
531 | ||
532 | /* | |
533 | * Check and possibly modify a memory region to enforce the memory limit. | |
534 | * | |
535 | * Returns the size the region should have to enforce the memory limit. | |
536 | * This will either be the original value of size, a truncated value, | |
537 | * or zero. If the returned value of size is 0 the region should be | |
25985edc | 538 | * discarded as it lies wholly above the memory limit. |
1da177e4 | 539 | */ |
45fb6cea AB |
540 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
541 | unsigned long size) | |
1da177e4 LT |
542 | { |
543 | /* | |
95f72d1e | 544 | * We use memblock_end_of_DRAM() in here instead of memory_limit because |
1da177e4 | 545 | * we've already adjusted it for the limit and it takes care of |
fe55249d MM |
546 | * having memory holes below the limit. Also, in the case of |
547 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | |
1da177e4 | 548 | */ |
1da177e4 | 549 | |
95f72d1e | 550 | if (start + size <= memblock_end_of_DRAM()) |
1da177e4 LT |
551 | return size; |
552 | ||
95f72d1e | 553 | if (start >= memblock_end_of_DRAM()) |
1da177e4 LT |
554 | return 0; |
555 | ||
95f72d1e | 556 | return memblock_end_of_DRAM() - start; |
1da177e4 LT |
557 | } |
558 | ||
cf00085d C |
559 | /* |
560 | * Reads the counter for a given entry in | |
561 | * linux,drconf-usable-memory property | |
562 | */ | |
b08a2a12 | 563 | static inline int __init read_usm_ranges(const __be32 **usm) |
cf00085d C |
564 | { |
565 | /* | |
3fdfd990 | 566 | * For each lmb in ibm,dynamic-memory a corresponding |
cf00085d C |
567 | * entry in linux,drconf-usable-memory property contains |
568 | * a counter followed by that many (base, size) duple. | |
569 | * read the counter from linux,drconf-usable-memory | |
570 | */ | |
571 | return read_n_cells(n_mem_size_cells, usm); | |
572 | } | |
573 | ||
0204568a PM |
574 | /* |
575 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory | |
576 | * node. This assumes n_mem_{addr,size}_cells have been set. | |
577 | */ | |
514a9cb3 NF |
578 | static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, |
579 | const __be32 **usm) | |
0204568a | 580 | { |
514a9cb3 NF |
581 | unsigned int ranges, is_kexec_kdump = 0; |
582 | unsigned long base, size, sz; | |
8342681d | 583 | int nid; |
8342681d | 584 | |
514a9cb3 NF |
585 | /* |
586 | * Skip this block if the reserved bit is set in flags (0x80) | |
587 | * or if the block is not assigned to this partition (0x8) | |
588 | */ | |
589 | if ((lmb->flags & DRCONF_MEM_RESERVED) | |
590 | || !(lmb->flags & DRCONF_MEM_ASSIGNED)) | |
8342681d NF |
591 | return; |
592 | ||
514a9cb3 | 593 | if (*usm) |
cf00085d C |
594 | is_kexec_kdump = 1; |
595 | ||
514a9cb3 NF |
596 | base = lmb->base_addr; |
597 | size = drmem_lmb_size(); | |
598 | ranges = 1; | |
8342681d | 599 | |
514a9cb3 NF |
600 | if (is_kexec_kdump) { |
601 | ranges = read_usm_ranges(usm); | |
602 | if (!ranges) /* there are no (base, size) duple */ | |
603 | return; | |
604 | } | |
8342681d | 605 | |
514a9cb3 | 606 | do { |
cf00085d | 607 | if (is_kexec_kdump) { |
514a9cb3 NF |
608 | base = read_n_cells(n_mem_addr_cells, usm); |
609 | size = read_n_cells(n_mem_size_cells, usm); | |
cf00085d | 610 | } |
514a9cb3 NF |
611 | |
612 | nid = of_drconf_to_nid_single(lmb); | |
613 | fake_numa_create_new_node(((base + size) >> PAGE_SHIFT), | |
614 | &nid); | |
615 | node_set_online(nid); | |
616 | sz = numa_enforce_memory_limit(base, size); | |
617 | if (sz) | |
618 | memblock_set_node(base, sz, &memblock.memory, nid); | |
619 | } while (--ranges); | |
0204568a PM |
620 | } |
621 | ||
1da177e4 LT |
622 | static int __init parse_numa_properties(void) |
623 | { | |
94db7c5e | 624 | struct device_node *memory; |
482ec7c4 | 625 | int default_nid = 0; |
1da177e4 LT |
626 | unsigned long i; |
627 | ||
628 | if (numa_enabled == 0) { | |
629 | printk(KERN_WARNING "NUMA disabled by user\n"); | |
630 | return -1; | |
631 | } | |
632 | ||
1da177e4 LT |
633 | min_common_depth = find_min_common_depth(); |
634 | ||
1da177e4 LT |
635 | if (min_common_depth < 0) |
636 | return min_common_depth; | |
637 | ||
bf4b85b0 NL |
638 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); |
639 | ||
1da177e4 | 640 | /* |
482ec7c4 NL |
641 | * Even though we connect cpus to numa domains later in SMP |
642 | * init, we need to know the node ids now. This is because | |
643 | * each node to be onlined must have NODE_DATA etc backing it. | |
1da177e4 | 644 | */ |
482ec7c4 | 645 | for_each_present_cpu(i) { |
dfbe93a2 | 646 | struct device_node *cpu; |
cf950b7a | 647 | int nid; |
1da177e4 | 648 | |
8b16cd23 | 649 | cpu = of_get_cpu_node(i, NULL); |
482ec7c4 | 650 | BUG_ON(!cpu); |
953039c8 | 651 | nid = of_node_to_nid_single(cpu); |
482ec7c4 | 652 | of_node_put(cpu); |
1da177e4 | 653 | |
482ec7c4 NL |
654 | /* |
655 | * Don't fall back to default_nid yet -- we will plug | |
656 | * cpus into nodes once the memory scan has discovered | |
657 | * the topology. | |
658 | */ | |
659 | if (nid < 0) | |
660 | continue; | |
661 | node_set_online(nid); | |
1da177e4 LT |
662 | } |
663 | ||
237a0989 | 664 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
94db7c5e AB |
665 | |
666 | for_each_node_by_type(memory, "memory") { | |
1da177e4 LT |
667 | unsigned long start; |
668 | unsigned long size; | |
cf950b7a | 669 | int nid; |
1da177e4 | 670 | int ranges; |
b08a2a12 | 671 | const __be32 *memcell_buf; |
1da177e4 LT |
672 | unsigned int len; |
673 | ||
e2eb6392 | 674 | memcell_buf = of_get_property(memory, |
ba759485 ME |
675 | "linux,usable-memory", &len); |
676 | if (!memcell_buf || len <= 0) | |
e2eb6392 | 677 | memcell_buf = of_get_property(memory, "reg", &len); |
1da177e4 LT |
678 | if (!memcell_buf || len <= 0) |
679 | continue; | |
680 | ||
cc5d0189 BH |
681 | /* ranges in cell */ |
682 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1da177e4 LT |
683 | new_range: |
684 | /* these are order-sensitive, and modify the buffer pointer */ | |
237a0989 MK |
685 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
686 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1da177e4 | 687 | |
482ec7c4 NL |
688 | /* |
689 | * Assumption: either all memory nodes or none will | |
690 | * have associativity properties. If none, then | |
691 | * everything goes to default_nid. | |
692 | */ | |
953039c8 | 693 | nid = of_node_to_nid_single(memory); |
482ec7c4 NL |
694 | if (nid < 0) |
695 | nid = default_nid; | |
1daa6d08 BS |
696 | |
697 | fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); | |
482ec7c4 | 698 | node_set_online(nid); |
1da177e4 | 699 | |
7656cd8e RA |
700 | size = numa_enforce_memory_limit(start, size); |
701 | if (size) | |
702 | memblock_set_node(start, size, &memblock.memory, nid); | |
1da177e4 LT |
703 | |
704 | if (--ranges) | |
705 | goto new_range; | |
706 | } | |
707 | ||
0204568a | 708 | /* |
dfbe93a2 AB |
709 | * Now do the same thing for each MEMBLOCK listed in the |
710 | * ibm,dynamic-memory property in the | |
711 | * ibm,dynamic-reconfiguration-memory node. | |
0204568a PM |
712 | */ |
713 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
514a9cb3 NF |
714 | if (memory) { |
715 | walk_drmem_lmbs(memory, numa_setup_drmem_lmb); | |
716 | of_node_put(memory); | |
717 | } | |
0204568a | 718 | |
1da177e4 LT |
719 | return 0; |
720 | } | |
721 | ||
722 | static void __init setup_nonnuma(void) | |
723 | { | |
95f72d1e YL |
724 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
725 | unsigned long total_ram = memblock_phys_mem_size(); | |
c67c3cb4 | 726 | unsigned long start_pfn, end_pfn; |
28be7072 BH |
727 | unsigned int nid = 0; |
728 | struct memblock_region *reg; | |
1da177e4 | 729 | |
e110b281 | 730 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
1da177e4 | 731 | top_of_ram, total_ram); |
e110b281 | 732 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
1da177e4 LT |
733 | (top_of_ram - total_ram) >> 20); |
734 | ||
28be7072 | 735 | for_each_memblock(memory, reg) { |
c7fc2de0 YL |
736 | start_pfn = memblock_region_memory_base_pfn(reg); |
737 | end_pfn = memblock_region_memory_end_pfn(reg); | |
1daa6d08 BS |
738 | |
739 | fake_numa_create_new_node(end_pfn, &nid); | |
1d7cfe18 | 740 | memblock_set_node(PFN_PHYS(start_pfn), |
e7e8de59 TC |
741 | PFN_PHYS(end_pfn - start_pfn), |
742 | &memblock.memory, nid); | |
1daa6d08 | 743 | node_set_online(nid); |
c67c3cb4 | 744 | } |
1da177e4 LT |
745 | } |
746 | ||
4b703a23 AB |
747 | void __init dump_numa_cpu_topology(void) |
748 | { | |
749 | unsigned int node; | |
750 | unsigned int cpu, count; | |
751 | ||
752 | if (min_common_depth == -1 || !numa_enabled) | |
753 | return; | |
754 | ||
755 | for_each_online_node(node) { | |
8467801c | 756 | pr_info("Node %d CPUs:", node); |
4b703a23 AB |
757 | |
758 | count = 0; | |
759 | /* | |
760 | * If we used a CPU iterator here we would miss printing | |
761 | * the holes in the cpumap. | |
762 | */ | |
25863de0 AB |
763 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { |
764 | if (cpumask_test_cpu(cpu, | |
765 | node_to_cpumask_map[node])) { | |
4b703a23 | 766 | if (count == 0) |
8467801c | 767 | pr_cont(" %u", cpu); |
4b703a23 AB |
768 | ++count; |
769 | } else { | |
770 | if (count > 1) | |
8467801c | 771 | pr_cont("-%u", cpu - 1); |
4b703a23 AB |
772 | count = 0; |
773 | } | |
774 | } | |
775 | ||
776 | if (count > 1) | |
8467801c AK |
777 | pr_cont("-%u", nr_cpu_ids - 1); |
778 | pr_cont("\n"); | |
4b703a23 AB |
779 | } |
780 | } | |
781 | ||
10239733 AB |
782 | /* Initialize NODE_DATA for a node on the local memory */ |
783 | static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) | |
4a618669 | 784 | { |
10239733 AB |
785 | u64 spanned_pages = end_pfn - start_pfn; |
786 | const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); | |
787 | u64 nd_pa; | |
788 | void *nd; | |
789 | int tnid; | |
4a618669 | 790 | |
10239733 AB |
791 | nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); |
792 | nd = __va(nd_pa); | |
4a618669 | 793 | |
10239733 AB |
794 | /* report and initialize */ |
795 | pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", | |
796 | nd_pa, nd_pa + nd_size - 1); | |
797 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); | |
798 | if (tnid != nid) | |
799 | pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); | |
4a618669 | 800 | |
10239733 AB |
801 | node_data[nid] = nd; |
802 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); | |
803 | NODE_DATA(nid)->node_id = nid; | |
804 | NODE_DATA(nid)->node_start_pfn = start_pfn; | |
805 | NODE_DATA(nid)->node_spanned_pages = spanned_pages; | |
806 | } | |
4a618669 | 807 | |
a346137e MB |
808 | static void __init find_possible_nodes(void) |
809 | { | |
810 | struct device_node *rtas; | |
811 | u32 numnodes, i; | |
812 | ||
813 | if (min_common_depth <= 0) | |
814 | return; | |
815 | ||
816 | rtas = of_find_node_by_path("/rtas"); | |
817 | if (!rtas) | |
818 | return; | |
819 | ||
820 | if (of_property_read_u32_index(rtas, | |
821 | "ibm,max-associativity-domains", | |
822 | min_common_depth, &numnodes)) | |
823 | goto out; | |
824 | ||
825 | for (i = 0; i < numnodes; i++) { | |
ea05ba7c | 826 | if (!node_possible(i)) |
a346137e | 827 | node_set(i, node_possible_map); |
a346137e MB |
828 | } |
829 | ||
830 | out: | |
831 | of_node_put(rtas); | |
832 | } | |
833 | ||
9bd9be00 | 834 | void __init mem_topology_setup(void) |
1da177e4 | 835 | { |
9bd9be00 | 836 | int cpu; |
1da177e4 LT |
837 | |
838 | if (parse_numa_properties()) | |
839 | setup_nonnuma(); | |
1da177e4 | 840 | |
3af229f2 | 841 | /* |
a346137e MB |
842 | * Modify the set of possible NUMA nodes to reflect information |
843 | * available about the set of online nodes, and the set of nodes | |
844 | * that we expect to make use of for this platform's affinity | |
845 | * calculations. | |
3af229f2 NA |
846 | */ |
847 | nodes_and(node_possible_map, node_possible_map, node_online_map); | |
848 | ||
a346137e MB |
849 | find_possible_nodes(); |
850 | ||
9bd9be00 NP |
851 | setup_node_to_cpumask_map(); |
852 | ||
853 | reset_numa_cpu_lookup_table(); | |
854 | ||
855 | for_each_present_cpu(cpu) | |
856 | numa_setup_cpu(cpu); | |
857 | } | |
858 | ||
859 | void __init initmem_init(void) | |
860 | { | |
861 | int nid; | |
862 | ||
863 | max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | |
864 | max_pfn = max_low_pfn; | |
865 | ||
866 | memblock_dump_all(); | |
867 | ||
1da177e4 | 868 | for_each_online_node(nid) { |
c67c3cb4 | 869 | unsigned long start_pfn, end_pfn; |
1da177e4 | 870 | |
c67c3cb4 | 871 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
10239733 | 872 | setup_node_data(nid, start_pfn, end_pfn); |
8f64e1f2 | 873 | sparse_memory_present_with_active_regions(nid); |
4a618669 | 874 | } |
d3f6204a | 875 | |
21098b9e | 876 | sparse_init(); |
25863de0 | 877 | |
2fabf084 NA |
878 | /* |
879 | * We need the numa_cpu_lookup_table to be accurate for all CPUs, | |
880 | * even before we online them, so that we can use cpu_to_{node,mem} | |
881 | * early in boot, cf. smp_prepare_cpus(). | |
bdab88e0 SAS |
882 | * _nocalls() + manual invocation is used because cpuhp is not yet |
883 | * initialized for the boot CPU. | |
2fabf084 | 884 | */ |
73c1b41e | 885 | cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", |
bdab88e0 | 886 | ppc_numa_cpu_prepare, ppc_numa_cpu_dead); |
1da177e4 LT |
887 | } |
888 | ||
1da177e4 LT |
889 | static int __init early_numa(char *p) |
890 | { | |
891 | if (!p) | |
892 | return 0; | |
893 | ||
894 | if (strstr(p, "off")) | |
895 | numa_enabled = 0; | |
896 | ||
897 | if (strstr(p, "debug")) | |
898 | numa_debug = 1; | |
899 | ||
1daa6d08 BS |
900 | p = strstr(p, "fake="); |
901 | if (p) | |
902 | cmdline = p + strlen("fake="); | |
903 | ||
1da177e4 LT |
904 | return 0; |
905 | } | |
906 | early_param("numa", early_numa); | |
237a0989 | 907 | |
2d73bae1 NA |
908 | static bool topology_updates_enabled = true; |
909 | ||
910 | static int __init early_topology_updates(char *p) | |
911 | { | |
912 | if (!p) | |
913 | return 0; | |
914 | ||
915 | if (!strcmp(p, "off")) { | |
916 | pr_info("Disabling topology updates\n"); | |
917 | topology_updates_enabled = false; | |
918 | } | |
919 | ||
920 | return 0; | |
921 | } | |
922 | early_param("topology_updates", early_topology_updates); | |
923 | ||
237a0989 | 924 | #ifdef CONFIG_MEMORY_HOTPLUG |
0db9360a | 925 | /* |
0f16ef7f NF |
926 | * Find the node associated with a hot added memory section for |
927 | * memory represented in the device tree by the property | |
928 | * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. | |
0db9360a | 929 | */ |
514a9cb3 | 930 | static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) |
0db9360a | 931 | { |
514a9cb3 | 932 | struct drmem_lmb *lmb; |
3fdfd990 | 933 | unsigned long lmb_size; |
0f16ef7f | 934 | int nid = -1; |
0db9360a | 935 | |
514a9cb3 | 936 | lmb_size = drmem_lmb_size(); |
0db9360a | 937 | |
514a9cb3 | 938 | for_each_drmem_lmb(lmb) { |
0db9360a NF |
939 | /* skip this block if it is reserved or not assigned to |
940 | * this partition */ | |
514a9cb3 NF |
941 | if ((lmb->flags & DRCONF_MEM_RESERVED) |
942 | || !(lmb->flags & DRCONF_MEM_ASSIGNED)) | |
0db9360a NF |
943 | continue; |
944 | ||
514a9cb3 NF |
945 | if ((scn_addr < lmb->base_addr) |
946 | || (scn_addr >= (lmb->base_addr + lmb_size))) | |
0f16ef7f NF |
947 | continue; |
948 | ||
514a9cb3 | 949 | nid = of_drconf_to_nid_single(lmb); |
0f16ef7f NF |
950 | break; |
951 | } | |
952 | ||
953 | return nid; | |
954 | } | |
955 | ||
956 | /* | |
957 | * Find the node associated with a hot added memory section for memory | |
958 | * represented in the device tree as a node (i.e. memory@XXXX) for | |
95f72d1e | 959 | * each memblock. |
0f16ef7f | 960 | */ |
ec32dd66 | 961 | static int hot_add_node_scn_to_nid(unsigned long scn_addr) |
0f16ef7f | 962 | { |
94db7c5e | 963 | struct device_node *memory; |
0f16ef7f NF |
964 | int nid = -1; |
965 | ||
94db7c5e | 966 | for_each_node_by_type(memory, "memory") { |
0f16ef7f NF |
967 | unsigned long start, size; |
968 | int ranges; | |
b08a2a12 | 969 | const __be32 *memcell_buf; |
0f16ef7f NF |
970 | unsigned int len; |
971 | ||
972 | memcell_buf = of_get_property(memory, "reg", &len); | |
973 | if (!memcell_buf || len <= 0) | |
974 | continue; | |
975 | ||
976 | /* ranges in cell */ | |
977 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
978 | ||
979 | while (ranges--) { | |
980 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
981 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
982 | ||
983 | if ((scn_addr < start) || (scn_addr >= (start + size))) | |
984 | continue; | |
985 | ||
986 | nid = of_node_to_nid_single(memory); | |
987 | break; | |
988 | } | |
0db9360a | 989 | |
0f16ef7f NF |
990 | if (nid >= 0) |
991 | break; | |
0db9360a NF |
992 | } |
993 | ||
60831842 AB |
994 | of_node_put(memory); |
995 | ||
0f16ef7f | 996 | return nid; |
0db9360a NF |
997 | } |
998 | ||
237a0989 MK |
999 | /* |
1000 | * Find the node associated with a hot added memory section. Section | |
95f72d1e YL |
1001 | * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that |
1002 | * sections are fully contained within a single MEMBLOCK. | |
237a0989 MK |
1003 | */ |
1004 | int hot_add_scn_to_nid(unsigned long scn_addr) | |
1005 | { | |
1006 | struct device_node *memory = NULL; | |
4a3bac4e | 1007 | int nid; |
237a0989 MK |
1008 | |
1009 | if (!numa_enabled || (min_common_depth < 0)) | |
72c33688 | 1010 | return first_online_node; |
0db9360a NF |
1011 | |
1012 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
1013 | if (memory) { | |
514a9cb3 | 1014 | nid = hot_add_drconf_scn_to_nid(scn_addr); |
0db9360a | 1015 | of_node_put(memory); |
0f16ef7f NF |
1016 | } else { |
1017 | nid = hot_add_node_scn_to_nid(scn_addr); | |
0db9360a | 1018 | } |
237a0989 | 1019 | |
2a8628d4 | 1020 | if (nid < 0 || !node_possible(nid)) |
72c33688 | 1021 | nid = first_online_node; |
237a0989 | 1022 | |
0f16ef7f | 1023 | return nid; |
237a0989 | 1024 | } |
0f16ef7f | 1025 | |
cd34206e NA |
1026 | static u64 hot_add_drconf_memory_max(void) |
1027 | { | |
e70bd3ae | 1028 | struct device_node *memory = NULL; |
45b64ee6 | 1029 | struct device_node *dn = NULL; |
45b64ee6 | 1030 | const __be64 *lrdr = NULL; |
45b64ee6 BR |
1031 | |
1032 | dn = of_find_node_by_path("/rtas"); | |
1033 | if (dn) { | |
1034 | lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL); | |
1035 | of_node_put(dn); | |
1036 | if (lrdr) | |
1037 | return be64_to_cpup(lrdr); | |
1038 | } | |
cd34206e | 1039 | |
e70bd3ae BR |
1040 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
1041 | if (memory) { | |
e70bd3ae | 1042 | of_node_put(memory); |
514a9cb3 | 1043 | return drmem_lmb_memory_max(); |
e70bd3ae | 1044 | } |
45b64ee6 | 1045 | return 0; |
cd34206e NA |
1046 | } |
1047 | ||
1048 | /* | |
1049 | * memory_hotplug_max - return max address of memory that may be added | |
1050 | * | |
1051 | * This is currently only used on systems that support drconfig memory | |
1052 | * hotplug. | |
1053 | */ | |
1054 | u64 memory_hotplug_max(void) | |
1055 | { | |
1056 | return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); | |
1057 | } | |
237a0989 | 1058 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
9eff1a38 | 1059 | |
bd03403a | 1060 | /* Virtual Processor Home Node (VPHN) support */ |
39bf990e | 1061 | #ifdef CONFIG_PPC_SPLPAR |
4b6cfb2a GK |
1062 | |
1063 | #include "vphn.h" | |
1064 | ||
30c05350 NF |
1065 | struct topology_update_data { |
1066 | struct topology_update_data *next; | |
1067 | unsigned int cpu; | |
1068 | int old_nid; | |
1069 | int new_nid; | |
1070 | }; | |
1071 | ||
cee5405d MB |
1072 | #define TOPOLOGY_DEF_TIMER_SECS 60 |
1073 | ||
5de16699 | 1074 | static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; |
9eff1a38 JL |
1075 | static cpumask_t cpu_associativity_changes_mask; |
1076 | static int vphn_enabled; | |
5d88aa85 JL |
1077 | static int prrn_enabled; |
1078 | static void reset_topology_timer(void); | |
cee5405d | 1079 | static int topology_timer_secs = 1; |
17f444c0 MB |
1080 | static int topology_inited; |
1081 | static int topology_update_needed; | |
9eff1a38 | 1082 | |
cee5405d MB |
1083 | /* |
1084 | * Change polling interval for associativity changes. | |
1085 | */ | |
1086 | int timed_topology_update(int nsecs) | |
1087 | { | |
1088 | if (vphn_enabled) { | |
1089 | if (nsecs > 0) | |
1090 | topology_timer_secs = nsecs; | |
1091 | else | |
1092 | topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS; | |
1093 | ||
1094 | reset_topology_timer(); | |
1095 | } | |
1096 | ||
1097 | return 0; | |
1098 | } | |
9eff1a38 JL |
1099 | |
1100 | /* | |
1101 | * Store the current values of the associativity change counters in the | |
1102 | * hypervisor. | |
1103 | */ | |
1104 | static void setup_cpu_associativity_change_counters(void) | |
1105 | { | |
cd9d6cc7 | 1106 | int cpu; |
9eff1a38 | 1107 | |
5de16699 AB |
1108 | /* The VPHN feature supports a maximum of 8 reference points */ |
1109 | BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); | |
1110 | ||
9eff1a38 | 1111 | for_each_possible_cpu(cpu) { |
cd9d6cc7 | 1112 | int i; |
9eff1a38 | 1113 | u8 *counts = vphn_cpu_change_counts[cpu]; |
499dcd41 | 1114 | volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts; |
9eff1a38 | 1115 | |
5de16699 | 1116 | for (i = 0; i < distance_ref_points_depth; i++) |
9eff1a38 | 1117 | counts[i] = hypervisor_counts[i]; |
9eff1a38 JL |
1118 | } |
1119 | } | |
1120 | ||
1121 | /* | |
1122 | * The hypervisor maintains a set of 8 associativity change counters in | |
1123 | * the VPA of each cpu that correspond to the associativity levels in the | |
1124 | * ibm,associativity-reference-points property. When an associativity | |
1125 | * level changes, the corresponding counter is incremented. | |
1126 | * | |
1127 | * Set a bit in cpu_associativity_changes_mask for each cpu whose home | |
1128 | * node associativity levels have changed. | |
1129 | * | |
1130 | * Returns the number of cpus with unhandled associativity changes. | |
1131 | */ | |
1132 | static int update_cpu_associativity_changes_mask(void) | |
1133 | { | |
5d88aa85 | 1134 | int cpu; |
9eff1a38 JL |
1135 | cpumask_t *changes = &cpu_associativity_changes_mask; |
1136 | ||
9eff1a38 JL |
1137 | for_each_possible_cpu(cpu) { |
1138 | int i, changed = 0; | |
1139 | u8 *counts = vphn_cpu_change_counts[cpu]; | |
499dcd41 | 1140 | volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts; |
9eff1a38 | 1141 | |
5de16699 | 1142 | for (i = 0; i < distance_ref_points_depth; i++) { |
d69043e8 | 1143 | if (hypervisor_counts[i] != counts[i]) { |
9eff1a38 JL |
1144 | counts[i] = hypervisor_counts[i]; |
1145 | changed = 1; | |
1146 | } | |
1147 | } | |
1148 | if (changed) { | |
3be7db6a RJ |
1149 | cpumask_or(changes, changes, cpu_sibling_mask(cpu)); |
1150 | cpu = cpu_last_thread_sibling(cpu); | |
9eff1a38 JL |
1151 | } |
1152 | } | |
1153 | ||
5d88aa85 | 1154 | return cpumask_weight(changes); |
9eff1a38 JL |
1155 | } |
1156 | ||
9eff1a38 JL |
1157 | /* |
1158 | * Retrieve the new associativity information for a virtual processor's | |
1159 | * home node. | |
1160 | */ | |
b08a2a12 | 1161 | static long hcall_vphn(unsigned long cpu, __be32 *associativity) |
9eff1a38 | 1162 | { |
cd9d6cc7 | 1163 | long rc; |
9eff1a38 JL |
1164 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; |
1165 | u64 flags = 1; | |
1166 | int hwcpu = get_hard_smp_processor_id(cpu); | |
1167 | ||
1168 | rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); | |
1169 | vphn_unpack_associativity(retbuf, associativity); | |
1170 | ||
1171 | return rc; | |
1172 | } | |
1173 | ||
1174 | static long vphn_get_associativity(unsigned long cpu, | |
b08a2a12 | 1175 | __be32 *associativity) |
9eff1a38 | 1176 | { |
cd9d6cc7 | 1177 | long rc; |
9eff1a38 JL |
1178 | |
1179 | rc = hcall_vphn(cpu, associativity); | |
1180 | ||
1181 | switch (rc) { | |
1182 | case H_FUNCTION: | |
1183 | printk(KERN_INFO | |
1184 | "VPHN is not supported. Disabling polling...\n"); | |
1185 | stop_topology_update(); | |
1186 | break; | |
1187 | case H_HARDWARE: | |
1188 | printk(KERN_ERR | |
1189 | "hcall_vphn() experienced a hardware fault " | |
1190 | "preventing VPHN. Disabling polling...\n"); | |
1191 | stop_topology_update(); | |
17f444c0 MB |
1192 | break; |
1193 | case H_SUCCESS: | |
1194 | dbg("VPHN hcall succeeded. Reset polling...\n"); | |
cee5405d | 1195 | timed_topology_update(0); |
17f444c0 | 1196 | break; |
9eff1a38 JL |
1197 | } |
1198 | ||
1199 | return rc; | |
1200 | } | |
1201 | ||
e67e02a5 | 1202 | int find_and_online_cpu_nid(int cpu) |
ea05ba7c MB |
1203 | { |
1204 | __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; | |
1205 | int new_nid; | |
1206 | ||
1207 | /* Use associativity from first thread for all siblings */ | |
1208 | vphn_get_associativity(cpu, associativity); | |
1209 | new_nid = associativity_to_nid(associativity); | |
1210 | if (new_nid < 0 || !node_possible(new_nid)) | |
1211 | new_nid = first_online_node; | |
1212 | ||
1213 | if (NODE_DATA(new_nid) == NULL) { | |
1214 | #ifdef CONFIG_MEMORY_HOTPLUG | |
1215 | /* | |
1216 | * Need to ensure that NODE_DATA is initialized for a node from | |
1217 | * available memory (see memblock_alloc_try_nid). If unable to | |
1218 | * init the node, then default to nearest node that has memory | |
1219 | * installed. | |
1220 | */ | |
1221 | if (try_online_node(new_nid)) | |
1222 | new_nid = first_online_node; | |
1223 | #else | |
1224 | /* | |
1225 | * Default to using the nearest node that has memory installed. | |
1226 | * Otherwise, it would be necessary to patch the kernel MM code | |
1227 | * to deal with more memoryless-node error conditions. | |
1228 | */ | |
1229 | new_nid = first_online_node; | |
1230 | #endif | |
1231 | } | |
1232 | ||
e67e02a5 MB |
1233 | pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__, |
1234 | cpu, new_nid); | |
ea05ba7c MB |
1235 | return new_nid; |
1236 | } | |
1237 | ||
30c05350 NF |
1238 | /* |
1239 | * Update the CPU maps and sysfs entries for a single CPU when its NUMA | |
1240 | * characteristics change. This function doesn't perform any locking and is | |
1241 | * only safe to call from stop_machine(). | |
1242 | */ | |
1243 | static int update_cpu_topology(void *data) | |
1244 | { | |
1245 | struct topology_update_data *update; | |
1246 | unsigned long cpu; | |
1247 | ||
1248 | if (!data) | |
1249 | return -EINVAL; | |
1250 | ||
3be7db6a | 1251 | cpu = smp_processor_id(); |
30c05350 NF |
1252 | |
1253 | for (update = data; update; update = update->next) { | |
2c0a33f9 | 1254 | int new_nid = update->new_nid; |
30c05350 NF |
1255 | if (cpu != update->cpu) |
1256 | continue; | |
1257 | ||
49f8d8c0 | 1258 | unmap_cpu_from_node(cpu); |
2c0a33f9 NA |
1259 | map_cpu_to_node(cpu, new_nid); |
1260 | set_cpu_numa_node(cpu, new_nid); | |
1261 | set_cpu_numa_mem(cpu, local_memory_node(new_nid)); | |
176bbf14 | 1262 | vdso_getcpu_init(); |
30c05350 NF |
1263 | } |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
d4edc5b6 SB |
1268 | static int update_lookup_table(void *data) |
1269 | { | |
1270 | struct topology_update_data *update; | |
1271 | ||
1272 | if (!data) | |
1273 | return -EINVAL; | |
1274 | ||
1275 | /* | |
1276 | * Upon topology update, the numa-cpu lookup table needs to be updated | |
1277 | * for all threads in the core, including offline CPUs, to ensure that | |
1278 | * future hotplug operations respect the cpu-to-node associativity | |
1279 | * properly. | |
1280 | */ | |
1281 | for (update = data; update; update = update->next) { | |
1282 | int nid, base, j; | |
1283 | ||
1284 | nid = update->new_nid; | |
1285 | base = cpu_first_thread_sibling(update->cpu); | |
1286 | ||
1287 | for (j = 0; j < threads_per_core; j++) { | |
1288 | update_numa_cpu_lookup_table(base + j, nid); | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | return 0; | |
1293 | } | |
1294 | ||
9eff1a38 JL |
1295 | /* |
1296 | * Update the node maps and sysfs entries for each cpu whose home node | |
79c5fceb | 1297 | * has changed. Returns 1 when the topology has changed, and 0 otherwise. |
3e401f7a TJB |
1298 | * |
1299 | * cpus_locked says whether we already hold cpu_hotplug_lock. | |
9eff1a38 | 1300 | */ |
3e401f7a | 1301 | int numa_update_cpu_topology(bool cpus_locked) |
9eff1a38 | 1302 | { |
3be7db6a | 1303 | unsigned int cpu, sibling, changed = 0; |
30c05350 | 1304 | struct topology_update_data *updates, *ud; |
176bbf14 | 1305 | cpumask_t updated_cpus; |
8a25a2fd | 1306 | struct device *dev; |
3be7db6a | 1307 | int weight, new_nid, i = 0; |
9eff1a38 | 1308 | |
17f444c0 MB |
1309 | if (!prrn_enabled && !vphn_enabled) { |
1310 | if (!topology_inited) | |
1311 | topology_update_needed = 1; | |
2d73bae1 | 1312 | return 0; |
17f444c0 | 1313 | } |
2d73bae1 | 1314 | |
30c05350 NF |
1315 | weight = cpumask_weight(&cpu_associativity_changes_mask); |
1316 | if (!weight) | |
1317 | return 0; | |
1318 | ||
6396bb22 | 1319 | updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL); |
30c05350 NF |
1320 | if (!updates) |
1321 | return 0; | |
9eff1a38 | 1322 | |
176bbf14 JL |
1323 | cpumask_clear(&updated_cpus); |
1324 | ||
5d88aa85 | 1325 | for_each_cpu(cpu, &cpu_associativity_changes_mask) { |
3be7db6a RJ |
1326 | /* |
1327 | * If siblings aren't flagged for changes, updates list | |
1328 | * will be too short. Skip on this update and set for next | |
1329 | * update. | |
1330 | */ | |
1331 | if (!cpumask_subset(cpu_sibling_mask(cpu), | |
1332 | &cpu_associativity_changes_mask)) { | |
1333 | pr_info("Sibling bits not set for associativity " | |
1334 | "change, cpu%d\n", cpu); | |
1335 | cpumask_or(&cpu_associativity_changes_mask, | |
1336 | &cpu_associativity_changes_mask, | |
1337 | cpu_sibling_mask(cpu)); | |
1338 | cpu = cpu_last_thread_sibling(cpu); | |
1339 | continue; | |
1340 | } | |
9eff1a38 | 1341 | |
ea05ba7c | 1342 | new_nid = find_and_online_cpu_nid(cpu); |
3be7db6a RJ |
1343 | |
1344 | if (new_nid == numa_cpu_lookup_table[cpu]) { | |
1345 | cpumask_andnot(&cpu_associativity_changes_mask, | |
1346 | &cpu_associativity_changes_mask, | |
1347 | cpu_sibling_mask(cpu)); | |
17f444c0 MB |
1348 | dbg("Assoc chg gives same node %d for cpu%d\n", |
1349 | new_nid, cpu); | |
3be7db6a RJ |
1350 | cpu = cpu_last_thread_sibling(cpu); |
1351 | continue; | |
1352 | } | |
9eff1a38 | 1353 | |
3be7db6a RJ |
1354 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) { |
1355 | ud = &updates[i++]; | |
8bc93149 | 1356 | ud->next = &updates[i]; |
3be7db6a RJ |
1357 | ud->cpu = sibling; |
1358 | ud->new_nid = new_nid; | |
1359 | ud->old_nid = numa_cpu_lookup_table[sibling]; | |
1360 | cpumask_set_cpu(sibling, &updated_cpus); | |
3be7db6a RJ |
1361 | } |
1362 | cpu = cpu_last_thread_sibling(cpu); | |
30c05350 NF |
1363 | } |
1364 | ||
8bc93149 MB |
1365 | /* |
1366 | * Prevent processing of 'updates' from overflowing array | |
1367 | * where last entry filled in a 'next' pointer. | |
1368 | */ | |
1369 | if (i) | |
1370 | updates[i-1].next = NULL; | |
1371 | ||
2d73bae1 NA |
1372 | pr_debug("Topology update for the following CPUs:\n"); |
1373 | if (cpumask_weight(&updated_cpus)) { | |
1374 | for (ud = &updates[0]; ud; ud = ud->next) { | |
1375 | pr_debug("cpu %d moving from node %d " | |
1376 | "to %d\n", ud->cpu, | |
1377 | ud->old_nid, ud->new_nid); | |
1378 | } | |
1379 | } | |
1380 | ||
9a013361 MW |
1381 | /* |
1382 | * In cases where we have nothing to update (because the updates list | |
1383 | * is too short or because the new topology is same as the old one), | |
1384 | * skip invoking update_cpu_topology() via stop-machine(). This is | |
1385 | * necessary (and not just a fast-path optimization) since stop-machine | |
1386 | * can end up electing a random CPU to run update_cpu_topology(), and | |
1387 | * thus trick us into setting up incorrect cpu-node mappings (since | |
1388 | * 'updates' is kzalloc()'ed). | |
1389 | * | |
1390 | * And for the similar reason, we will skip all the following updating. | |
1391 | */ | |
1392 | if (!cpumask_weight(&updated_cpus)) | |
1393 | goto out; | |
1394 | ||
3e401f7a TJB |
1395 | if (cpus_locked) |
1396 | stop_machine_cpuslocked(update_cpu_topology, &updates[0], | |
1397 | &updated_cpus); | |
1398 | else | |
1399 | stop_machine(update_cpu_topology, &updates[0], &updated_cpus); | |
30c05350 | 1400 | |
d4edc5b6 SB |
1401 | /* |
1402 | * Update the numa-cpu lookup table with the new mappings, even for | |
1403 | * offline CPUs. It is best to perform this update from the stop- | |
1404 | * machine context. | |
1405 | */ | |
3e401f7a TJB |
1406 | if (cpus_locked) |
1407 | stop_machine_cpuslocked(update_lookup_table, &updates[0], | |
d4edc5b6 | 1408 | cpumask_of(raw_smp_processor_id())); |
3e401f7a TJB |
1409 | else |
1410 | stop_machine(update_lookup_table, &updates[0], | |
1411 | cpumask_of(raw_smp_processor_id())); | |
d4edc5b6 | 1412 | |
30c05350 | 1413 | for (ud = &updates[0]; ud; ud = ud->next) { |
dd023217 NF |
1414 | unregister_cpu_under_node(ud->cpu, ud->old_nid); |
1415 | register_cpu_under_node(ud->cpu, ud->new_nid); | |
1416 | ||
30c05350 | 1417 | dev = get_cpu_device(ud->cpu); |
8a25a2fd KS |
1418 | if (dev) |
1419 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | |
30c05350 | 1420 | cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask); |
79c5fceb | 1421 | changed = 1; |
9eff1a38 JL |
1422 | } |
1423 | ||
9a013361 | 1424 | out: |
30c05350 | 1425 | kfree(updates); |
17f444c0 | 1426 | topology_update_needed = 0; |
79c5fceb | 1427 | return changed; |
9eff1a38 JL |
1428 | } |
1429 | ||
3e401f7a TJB |
1430 | int arch_update_cpu_topology(void) |
1431 | { | |
3e401f7a TJB |
1432 | return numa_update_cpu_topology(true); |
1433 | } | |
1434 | ||
9eff1a38 JL |
1435 | static void topology_work_fn(struct work_struct *work) |
1436 | { | |
1437 | rebuild_sched_domains(); | |
1438 | } | |
1439 | static DECLARE_WORK(topology_work, topology_work_fn); | |
1440 | ||
ec32dd66 | 1441 | static void topology_schedule_update(void) |
9eff1a38 JL |
1442 | { |
1443 | schedule_work(&topology_work); | |
1444 | } | |
1445 | ||
df7e828c | 1446 | static void topology_timer_fn(struct timer_list *unused) |
9eff1a38 | 1447 | { |
5d88aa85 | 1448 | if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) |
9eff1a38 | 1449 | topology_schedule_update(); |
5d88aa85 JL |
1450 | else if (vphn_enabled) { |
1451 | if (update_cpu_associativity_changes_mask() > 0) | |
1452 | topology_schedule_update(); | |
1453 | reset_topology_timer(); | |
1454 | } | |
9eff1a38 | 1455 | } |
df7e828c | 1456 | static struct timer_list topology_timer; |
9eff1a38 | 1457 | |
5d88aa85 | 1458 | static void reset_topology_timer(void) |
9eff1a38 | 1459 | { |
5b0e2cb0 | 1460 | mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); |
9eff1a38 JL |
1461 | } |
1462 | ||
601abdc3 NF |
1463 | #ifdef CONFIG_SMP |
1464 | ||
5d88aa85 JL |
1465 | static void stage_topology_update(int core_id) |
1466 | { | |
1467 | cpumask_or(&cpu_associativity_changes_mask, | |
1468 | &cpu_associativity_changes_mask, cpu_sibling_mask(core_id)); | |
1469 | reset_topology_timer(); | |
1470 | } | |
1471 | ||
1472 | static int dt_update_callback(struct notifier_block *nb, | |
1473 | unsigned long action, void *data) | |
1474 | { | |
f5242e5a | 1475 | struct of_reconfig_data *update = data; |
5d88aa85 JL |
1476 | int rc = NOTIFY_DONE; |
1477 | ||
1478 | switch (action) { | |
5d88aa85 | 1479 | case OF_RECONFIG_UPDATE_PROPERTY: |
30c05350 NF |
1480 | if (!of_prop_cmp(update->dn->type, "cpu") && |
1481 | !of_prop_cmp(update->prop->name, "ibm,associativity")) { | |
5d88aa85 JL |
1482 | u32 core_id; |
1483 | of_property_read_u32(update->dn, "reg", &core_id); | |
1484 | stage_topology_update(core_id); | |
1485 | rc = NOTIFY_OK; | |
1486 | } | |
1487 | break; | |
1488 | } | |
1489 | ||
1490 | return rc; | |
9eff1a38 JL |
1491 | } |
1492 | ||
5d88aa85 JL |
1493 | static struct notifier_block dt_update_nb = { |
1494 | .notifier_call = dt_update_callback, | |
1495 | }; | |
1496 | ||
601abdc3 NF |
1497 | #endif |
1498 | ||
9eff1a38 | 1499 | /* |
5d88aa85 | 1500 | * Start polling for associativity changes. |
9eff1a38 JL |
1501 | */ |
1502 | int start_topology_update(void) | |
1503 | { | |
1504 | int rc = 0; | |
1505 | ||
5d88aa85 JL |
1506 | if (firmware_has_feature(FW_FEATURE_PRRN)) { |
1507 | if (!prrn_enabled) { | |
1508 | prrn_enabled = 1; | |
601abdc3 | 1509 | #ifdef CONFIG_SMP |
5d88aa85 | 1510 | rc = of_reconfig_notifier_register(&dt_update_nb); |
601abdc3 | 1511 | #endif |
5d88aa85 | 1512 | } |
a3496e91 MB |
1513 | } |
1514 | if (firmware_has_feature(FW_FEATURE_VPHN) && | |
f13c13a0 | 1515 | lppaca_shared_proc(get_lppaca())) { |
5d88aa85 | 1516 | if (!vphn_enabled) { |
5d88aa85 JL |
1517 | vphn_enabled = 1; |
1518 | setup_cpu_associativity_change_counters(); | |
df7e828c KC |
1519 | timer_setup(&topology_timer, topology_timer_fn, |
1520 | TIMER_DEFERRABLE); | |
5d88aa85 JL |
1521 | reset_topology_timer(); |
1522 | } | |
9eff1a38 JL |
1523 | } |
1524 | ||
1525 | return rc; | |
1526 | } | |
9eff1a38 JL |
1527 | |
1528 | /* | |
1529 | * Disable polling for VPHN associativity changes. | |
1530 | */ | |
1531 | int stop_topology_update(void) | |
1532 | { | |
5d88aa85 JL |
1533 | int rc = 0; |
1534 | ||
1535 | if (prrn_enabled) { | |
1536 | prrn_enabled = 0; | |
601abdc3 | 1537 | #ifdef CONFIG_SMP |
5d88aa85 | 1538 | rc = of_reconfig_notifier_unregister(&dt_update_nb); |
601abdc3 | 1539 | #endif |
a3496e91 MB |
1540 | } |
1541 | if (vphn_enabled) { | |
5d88aa85 JL |
1542 | vphn_enabled = 0; |
1543 | rc = del_timer_sync(&topology_timer); | |
1544 | } | |
1545 | ||
1546 | return rc; | |
9eff1a38 | 1547 | } |
e04fa612 NF |
1548 | |
1549 | int prrn_is_enabled(void) | |
1550 | { | |
1551 | return prrn_enabled; | |
1552 | } | |
1553 | ||
1554 | static int topology_read(struct seq_file *file, void *v) | |
1555 | { | |
1556 | if (vphn_enabled || prrn_enabled) | |
1557 | seq_puts(file, "on\n"); | |
1558 | else | |
1559 | seq_puts(file, "off\n"); | |
1560 | ||
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | static int topology_open(struct inode *inode, struct file *file) | |
1565 | { | |
1566 | return single_open(file, topology_read, NULL); | |
1567 | } | |
1568 | ||
1569 | static ssize_t topology_write(struct file *file, const char __user *buf, | |
1570 | size_t count, loff_t *off) | |
1571 | { | |
1572 | char kbuf[4]; /* "on" or "off" plus null. */ | |
1573 | int read_len; | |
1574 | ||
1575 | read_len = count < 3 ? count : 3; | |
1576 | if (copy_from_user(kbuf, buf, read_len)) | |
1577 | return -EINVAL; | |
1578 | ||
1579 | kbuf[read_len] = '\0'; | |
1580 | ||
1581 | if (!strncmp(kbuf, "on", 2)) | |
1582 | start_topology_update(); | |
1583 | else if (!strncmp(kbuf, "off", 3)) | |
1584 | stop_topology_update(); | |
1585 | else | |
1586 | return -EINVAL; | |
1587 | ||
1588 | return count; | |
1589 | } | |
1590 | ||
1591 | static const struct file_operations topology_ops = { | |
1592 | .read = seq_read, | |
1593 | .write = topology_write, | |
1594 | .open = topology_open, | |
1595 | .release = single_release | |
1596 | }; | |
1597 | ||
1598 | static int topology_update_init(void) | |
1599 | { | |
2d73bae1 NA |
1600 | /* Do not poll for changes if disabled at boot */ |
1601 | if (topology_updates_enabled) | |
1602 | start_topology_update(); | |
1603 | ||
17f444c0 MB |
1604 | if (vphn_enabled) |
1605 | topology_schedule_update(); | |
1606 | ||
2d15b9b4 NA |
1607 | if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops)) |
1608 | return -ENOMEM; | |
e04fa612 | 1609 | |
17f444c0 MB |
1610 | topology_inited = 1; |
1611 | if (topology_update_needed) | |
1612 | bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask), | |
1613 | nr_cpumask_bits); | |
1614 | ||
e04fa612 | 1615 | return 0; |
9eff1a38 | 1616 | } |
e04fa612 | 1617 | device_initcall(topology_update_init); |
39bf990e | 1618 | #endif /* CONFIG_PPC_SPLPAR */ |