ACPI: remove duplicated lines of merging problems with acpi_processor_add
[linux-2.6-block.git] / drivers / base / cpu.c
... / ...
CommitLineData
1/*
2 * CPU subsystem support
3 */
4
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/sched.h>
9#include <linux/cpu.h>
10#include <linux/topology.h>
11#include <linux/device.h>
12#include <linux/node.h>
13#include <linux/gfp.h>
14#include <linux/percpu.h>
15
16#include "base.h"
17
18struct bus_type cpu_subsys = {
19 .name = "cpu",
20 .dev_name = "cpu",
21};
22EXPORT_SYMBOL_GPL(cpu_subsys);
23
24static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
25
26#ifdef CONFIG_HOTPLUG_CPU
27static ssize_t show_online(struct device *dev,
28 struct device_attribute *attr,
29 char *buf)
30{
31 struct cpu *cpu = container_of(dev, struct cpu, dev);
32
33 return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
34}
35
36static ssize_t __ref store_online(struct device *dev,
37 struct device_attribute *attr,
38 const char *buf, size_t count)
39{
40 struct cpu *cpu = container_of(dev, struct cpu, dev);
41 ssize_t ret;
42
43 cpu_hotplug_driver_lock();
44 switch (buf[0]) {
45 case '0':
46 ret = cpu_down(cpu->dev.id);
47 if (!ret)
48 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
49 break;
50 case '1':
51 ret = cpu_up(cpu->dev.id);
52 if (!ret)
53 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
54 break;
55 default:
56 ret = -EINVAL;
57 }
58 cpu_hotplug_driver_unlock();
59
60 if (ret >= 0)
61 ret = count;
62 return ret;
63}
64static DEVICE_ATTR(online, 0644, show_online, store_online);
65
66static void __cpuinit register_cpu_control(struct cpu *cpu)
67{
68 device_create_file(&cpu->dev, &dev_attr_online);
69}
70void unregister_cpu(struct cpu *cpu)
71{
72 int logical_cpu = cpu->dev.id;
73
74 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
75
76 device_remove_file(&cpu->dev, &dev_attr_online);
77
78 device_unregister(&cpu->dev);
79 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
80 return;
81}
82
83#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
84static ssize_t cpu_probe_store(struct device *dev,
85 struct device_attribute *attr,
86 const char *buf,
87 size_t count)
88{
89 return arch_cpu_probe(buf, count);
90}
91
92static ssize_t cpu_release_store(struct device *dev,
93 struct device_attribute *attr,
94 const char *buf,
95 size_t count)
96{
97 return arch_cpu_release(buf, count);
98}
99
100static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
101static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
102#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
103
104#else /* ... !CONFIG_HOTPLUG_CPU */
105static inline void register_cpu_control(struct cpu *cpu)
106{
107}
108#endif /* CONFIG_HOTPLUG_CPU */
109
110#ifdef CONFIG_KEXEC
111#include <linux/kexec.h>
112
113static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
114 char *buf)
115{
116 struct cpu *cpu = container_of(dev, struct cpu, dev);
117 ssize_t rc;
118 unsigned long long addr;
119 int cpunum;
120
121 cpunum = cpu->dev.id;
122
123 /*
124 * Might be reading other cpu's data based on which cpu read thread
125 * has been scheduled. But cpu data (memory) is allocated once during
126 * boot up and this data does not change there after. Hence this
127 * operation should be safe. No locking required.
128 */
129 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
130 rc = sprintf(buf, "%Lx\n", addr);
131 return rc;
132}
133static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
134#endif
135
136/*
137 * Print cpu online, possible, present, and system maps
138 */
139
140struct cpu_attr {
141 struct device_attribute attr;
142 const struct cpumask *const * const map;
143};
144
145static ssize_t show_cpus_attr(struct device *dev,
146 struct device_attribute *attr,
147 char *buf)
148{
149 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
150 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
151
152 buf[n++] = '\n';
153 buf[n] = '\0';
154 return n;
155}
156
157#define _CPU_ATTR(name, map) \
158 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
159
160/* Keep in sync with cpu_subsys_attrs */
161static struct cpu_attr cpu_attrs[] = {
162 _CPU_ATTR(online, &cpu_online_mask),
163 _CPU_ATTR(possible, &cpu_possible_mask),
164 _CPU_ATTR(present, &cpu_present_mask),
165};
166
167/*
168 * Print values for NR_CPUS and offlined cpus
169 */
170static ssize_t print_cpus_kernel_max(struct device *dev,
171 struct device_attribute *attr, char *buf)
172{
173 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
174 return n;
175}
176static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
177
178/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
179unsigned int total_cpus;
180
181static ssize_t print_cpus_offline(struct device *dev,
182 struct device_attribute *attr, char *buf)
183{
184 int n = 0, len = PAGE_SIZE-2;
185 cpumask_var_t offline;
186
187 /* display offline cpus < nr_cpu_ids */
188 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
189 return -ENOMEM;
190 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
191 n = cpulist_scnprintf(buf, len, offline);
192 free_cpumask_var(offline);
193
194 /* display offline cpus >= nr_cpu_ids */
195 if (total_cpus && nr_cpu_ids < total_cpus) {
196 if (n && n < len)
197 buf[n++] = ',';
198
199 if (nr_cpu_ids == total_cpus-1)
200 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
201 else
202 n += snprintf(&buf[n], len - n, "%d-%d",
203 nr_cpu_ids, total_cpus-1);
204 }
205
206 n += snprintf(&buf[n], len - n, "\n");
207 return n;
208}
209static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
210
211static void cpu_device_release(struct device *dev)
212{
213 /*
214 * This is an empty function to prevent the driver core from spitting a
215 * warning at us. Yes, I know this is directly opposite of what the
216 * documentation for the driver core and kobjects say, and the author
217 * of this code has already been publically ridiculed for doing
218 * something as foolish as this. However, at this point in time, it is
219 * the only way to handle the issue of statically allocated cpu
220 * devices. The different architectures will have their cpu device
221 * code reworked to properly handle this in the near future, so this
222 * function will then be changed to correctly free up the memory held
223 * by the cpu device.
224 *
225 * Never copy this way of doing things, or you too will be made fun of
226 * on the linux-kerenl list, you have been warned.
227 */
228}
229
230/*
231 * register_cpu - Setup a sysfs device for a CPU.
232 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
233 * sysfs for this CPU.
234 * @num - CPU number to use when creating the device.
235 *
236 * Initialize and register the CPU device.
237 */
238int __cpuinit register_cpu(struct cpu *cpu, int num)
239{
240 int error;
241
242 cpu->node_id = cpu_to_node(num);
243 cpu->dev.id = num;
244 cpu->dev.bus = &cpu_subsys;
245 cpu->dev.release = cpu_device_release;
246 error = device_register(&cpu->dev);
247 if (!error && cpu->hotpluggable)
248 register_cpu_control(cpu);
249 if (!error)
250 per_cpu(cpu_sys_devices, num) = &cpu->dev;
251 if (!error)
252 register_cpu_under_node(num, cpu_to_node(num));
253
254#ifdef CONFIG_KEXEC
255 if (!error)
256 error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
257#endif
258 return error;
259}
260
261struct device *get_cpu_device(unsigned cpu)
262{
263 if (cpu < nr_cpu_ids && cpu_possible(cpu))
264 return per_cpu(cpu_sys_devices, cpu);
265 else
266 return NULL;
267}
268EXPORT_SYMBOL_GPL(get_cpu_device);
269
270static struct attribute *cpu_root_attrs[] = {
271#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
272 &dev_attr_probe.attr,
273 &dev_attr_release.attr,
274#endif
275 &cpu_attrs[0].attr.attr,
276 &cpu_attrs[1].attr.attr,
277 &cpu_attrs[2].attr.attr,
278 &dev_attr_kernel_max.attr,
279 &dev_attr_offline.attr,
280 NULL
281};
282
283static struct attribute_group cpu_root_attr_group = {
284 .attrs = cpu_root_attrs,
285};
286
287static const struct attribute_group *cpu_root_attr_groups[] = {
288 &cpu_root_attr_group,
289 NULL,
290};
291
292bool cpu_is_hotpluggable(unsigned cpu)
293{
294 struct device *dev = get_cpu_device(cpu);
295 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
296}
297EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
298
299#ifdef CONFIG_GENERIC_CPU_DEVICES
300static DEFINE_PER_CPU(struct cpu, cpu_devices);
301#endif
302
303static void __init cpu_dev_register_generic(void)
304{
305#ifdef CONFIG_GENERIC_CPU_DEVICES
306 int i;
307
308 for_each_possible_cpu(i) {
309 if (register_cpu(&per_cpu(cpu_devices, i), i))
310 panic("Failed to register CPU device");
311 }
312#endif
313}
314
315void __init cpu_dev_init(void)
316{
317 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
318 panic("Failed to register CPU subsystem");
319
320 cpu_dev_register_generic();
321
322#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
323 sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
324#endif
325}