11f70c1e2797cb1c44479d261b526f4a2232ac59
[linux-block.git] / arch / s390 / kernel / processor.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright IBM Corp. 2008
4  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  */
6
7 #define KMSG_COMPONENT "cpu"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/stop_machine.h>
11 #include <linux/cpufeature.h>
12 #include <linux/bitops.h>
13 #include <linux/kernel.h>
14 #include <linux/random.h>
15 #include <linux/sched/mm.h>
16 #include <linux/init.h>
17 #include <linux/seq_file.h>
18 #include <linux/mm_types.h>
19 #include <linux/delay.h>
20 #include <linux/cpu.h>
21 #include <linux/smp.h>
22 #include <asm/text-patching.h>
23 #include <asm/machine.h>
24 #include <asm/diag.h>
25 #include <asm/facility.h>
26 #include <asm/elf.h>
27 #include <asm/lowcore.h>
28 #include <asm/param.h>
29 #include <asm/sclp.h>
30 #include <asm/smp.h>
31
32 unsigned long __read_mostly elf_hwcap;
33 char elf_platform[ELF_PLATFORM_SIZE];
34
35 struct cpu_info {
36         unsigned int cpu_mhz_dynamic;
37         unsigned int cpu_mhz_static;
38         struct cpuid cpu_id;
39 };
40
41 static DEFINE_PER_CPU(struct cpu_info, cpu_info);
42 static DEFINE_PER_CPU(int, cpu_relax_retry);
43
44 static bool machine_has_cpu_mhz;
45
46 void __init cpu_detect_mhz_feature(void)
47 {
48         if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
49                 machine_has_cpu_mhz = true;
50 }
51
52 static void update_cpu_mhz(void *arg)
53 {
54         unsigned long mhz;
55         struct cpu_info *c;
56
57         mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
58         c = this_cpu_ptr(&cpu_info);
59         c->cpu_mhz_dynamic = mhz >> 32;
60         c->cpu_mhz_static = mhz & 0xffffffff;
61 }
62
63 void s390_update_cpu_mhz(void)
64 {
65         s390_adjust_jiffies();
66         if (machine_has_cpu_mhz)
67                 on_each_cpu(update_cpu_mhz, NULL, 0);
68 }
69
70 void notrace stop_machine_yield(const struct cpumask *cpumask)
71 {
72         int cpu, this_cpu;
73
74         this_cpu = smp_processor_id();
75         if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
76                 __this_cpu_write(cpu_relax_retry, 0);
77                 cpu = cpumask_next_wrap(this_cpu, cpumask);
78                 if (cpu >= nr_cpu_ids)
79                         return;
80                 if (arch_vcpu_is_preempted(cpu))
81                         smp_yield_cpu(cpu);
82         }
83 }
84
85 static void do_sync_core(void *info)
86 {
87         sync_core();
88 }
89
90 void text_poke_sync(void)
91 {
92         on_each_cpu(do_sync_core, NULL, 1);
93 }
94
95 void text_poke_sync_lock(void)
96 {
97         cpus_read_lock();
98         text_poke_sync();
99         cpus_read_unlock();
100 }
101
102 /*
103  * cpu_init - initializes state that is per-CPU.
104  */
105 void cpu_init(void)
106 {
107         struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
108
109         get_cpu_id(id);
110         if (machine_has_cpu_mhz)
111                 update_cpu_mhz(NULL);
112         mmgrab(&init_mm);
113         current->active_mm = &init_mm;
114         BUG_ON(current->mm);
115         enter_lazy_tlb(&init_mm, current);
116 }
117
118 static void show_facilities(struct seq_file *m)
119 {
120         unsigned int bit;
121
122         seq_puts(m, "facilities      :");
123         for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
124                 seq_printf(m, " %d", bit);
125         seq_putc(m, '\n');
126 }
127
128 static void show_cpu_summary(struct seq_file *m, void *v)
129 {
130         static const char *hwcap_str[] = {
131                 [HWCAP_NR_ESAN3]        = "esan3",
132                 [HWCAP_NR_ZARCH]        = "zarch",
133                 [HWCAP_NR_STFLE]        = "stfle",
134                 [HWCAP_NR_MSA]          = "msa",
135                 [HWCAP_NR_LDISP]        = "ldisp",
136                 [HWCAP_NR_EIMM]         = "eimm",
137                 [HWCAP_NR_DFP]          = "dfp",
138                 [HWCAP_NR_HPAGE]        = "edat",
139                 [HWCAP_NR_ETF3EH]       = "etf3eh",
140                 [HWCAP_NR_HIGH_GPRS]    = "highgprs",
141                 [HWCAP_NR_TE]           = "te",
142                 [HWCAP_NR_VXRS]         = "vx",
143                 [HWCAP_NR_VXRS_BCD]     = "vxd",
144                 [HWCAP_NR_VXRS_EXT]     = "vxe",
145                 [HWCAP_NR_GS]           = "gs",
146                 [HWCAP_NR_VXRS_EXT2]    = "vxe2",
147                 [HWCAP_NR_VXRS_PDE]     = "vxp",
148                 [HWCAP_NR_SORT]         = "sort",
149                 [HWCAP_NR_DFLT]         = "dflt",
150                 [HWCAP_NR_VXRS_PDE2]    = "vxp2",
151                 [HWCAP_NR_NNPA]         = "nnpa",
152                 [HWCAP_NR_PCI_MIO]      = "pcimio",
153                 [HWCAP_NR_SIE]          = "sie",
154         };
155         int i, cpu;
156
157         BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
158         seq_printf(m, "vendor_id       : IBM/S390\n"
159                    "# processors    : %i\n"
160                    "bogomips per cpu: %lu.%02lu\n",
161                    num_online_cpus(), loops_per_jiffy/(500000/HZ),
162                    (loops_per_jiffy/(5000/HZ))%100);
163         seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
164         seq_puts(m, "features\t: ");
165         for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
166                 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
167                         seq_printf(m, "%s ", hwcap_str[i]);
168         seq_puts(m, "\n");
169         show_facilities(m);
170         show_cacheinfo(m);
171         for_each_online_cpu(cpu) {
172                 struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
173
174                 seq_printf(m, "processor %d: "
175                            "version = %02X,  "
176                            "identification = %06X,  "
177                            "machine = %04X\n",
178                            cpu, id->version, id->ident, id->machine);
179         }
180 }
181
182 static int __init setup_hwcaps(void)
183 {
184         /* instructions named N3, "backported" to esa-mode */
185         elf_hwcap |= HWCAP_ESAN3;
186
187         /* z/Architecture mode active */
188         elf_hwcap |= HWCAP_ZARCH;
189
190         /* store-facility-list-extended */
191         if (test_facility(7))
192                 elf_hwcap |= HWCAP_STFLE;
193
194         /* message-security assist */
195         if (test_facility(17))
196                 elf_hwcap |= HWCAP_MSA;
197
198         /* long-displacement */
199         if (test_facility(19))
200                 elf_hwcap |= HWCAP_LDISP;
201
202         /* extended-immediate */
203         elf_hwcap |= HWCAP_EIMM;
204
205         /* extended-translation facility 3 enhancement */
206         if (test_facility(22) && test_facility(30))
207                 elf_hwcap |= HWCAP_ETF3EH;
208
209         /* decimal floating point & perform floating point operation */
210         if (test_facility(42) && test_facility(44))
211                 elf_hwcap |= HWCAP_DFP;
212
213         /* huge page support */
214         if (cpu_has_edat1())
215                 elf_hwcap |= HWCAP_HPAGE;
216
217         /* 64-bit register support for 31-bit processes */
218         elf_hwcap |= HWCAP_HIGH_GPRS;
219
220         /* transactional execution */
221         if (machine_has_tx())
222                 elf_hwcap |= HWCAP_TE;
223
224         /* vector */
225         if (test_facility(129)) {
226                 elf_hwcap |= HWCAP_VXRS;
227                 if (test_facility(134))
228                         elf_hwcap |= HWCAP_VXRS_BCD;
229                 if (test_facility(135))
230                         elf_hwcap |= HWCAP_VXRS_EXT;
231                 if (test_facility(148))
232                         elf_hwcap |= HWCAP_VXRS_EXT2;
233                 if (test_facility(152))
234                         elf_hwcap |= HWCAP_VXRS_PDE;
235                 if (test_facility(192))
236                         elf_hwcap |= HWCAP_VXRS_PDE2;
237         }
238
239         if (test_facility(150))
240                 elf_hwcap |= HWCAP_SORT;
241
242         if (test_facility(151))
243                 elf_hwcap |= HWCAP_DFLT;
244
245         if (test_facility(165))
246                 elf_hwcap |= HWCAP_NNPA;
247
248         /* guarded storage */
249         if (cpu_has_gs())
250                 elf_hwcap |= HWCAP_GS;
251
252         if (test_machine_feature(MFEATURE_PCI_MIO))
253                 elf_hwcap |= HWCAP_PCI_MIO;
254
255         /* virtualization support */
256         if (sclp.has_sief2)
257                 elf_hwcap |= HWCAP_SIE;
258
259         return 0;
260 }
261 arch_initcall(setup_hwcaps);
262
263 static int __init setup_elf_platform(void)
264 {
265         struct cpuid cpu_id;
266
267         get_cpu_id(&cpu_id);
268         add_device_randomness(&cpu_id, sizeof(cpu_id));
269         switch (cpu_id.machine) {
270         default:        /* Use "z10" as default. */
271                 strscpy(elf_platform, "z10");
272                 break;
273         case 0x2817:
274         case 0x2818:
275                 strscpy(elf_platform, "z196");
276                 break;
277         case 0x2827:
278         case 0x2828:
279                 strscpy(elf_platform, "zEC12");
280                 break;
281         case 0x2964:
282         case 0x2965:
283                 strscpy(elf_platform, "z13");
284                 break;
285         case 0x3906:
286         case 0x3907:
287                 strscpy(elf_platform, "z14");
288                 break;
289         case 0x8561:
290         case 0x8562:
291                 strscpy(elf_platform, "z15");
292                 break;
293         case 0x3931:
294         case 0x3932:
295                 strscpy(elf_platform, "z16");
296                 break;
297         case 0x9175:
298         case 0x9176:
299                 strscpy(elf_platform, "z17");
300                 break;
301         }
302         return 0;
303 }
304 arch_initcall(setup_elf_platform);
305
306 static void show_cpu_topology(struct seq_file *m, unsigned long n)
307 {
308 #ifdef CONFIG_SCHED_TOPOLOGY
309         seq_printf(m, "physical id     : %d\n", topology_physical_package_id(n));
310         seq_printf(m, "core id         : %d\n", topology_core_id(n));
311         seq_printf(m, "book id         : %d\n", topology_book_id(n));
312         seq_printf(m, "drawer id       : %d\n", topology_drawer_id(n));
313         seq_printf(m, "dedicated       : %d\n", topology_cpu_dedicated(n));
314         seq_printf(m, "address         : %d\n", smp_cpu_get_cpu_address(n));
315         seq_printf(m, "siblings        : %d\n", cpumask_weight(topology_core_cpumask(n)));
316         seq_printf(m, "cpu cores       : %d\n", topology_booted_cores(n));
317 #endif /* CONFIG_SCHED_TOPOLOGY */
318 }
319
320 static void show_cpu_ids(struct seq_file *m, unsigned long n)
321 {
322         struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
323
324         seq_printf(m, "version         : %02X\n", id->version);
325         seq_printf(m, "identification  : %06X\n", id->ident);
326         seq_printf(m, "machine         : %04X\n", id->machine);
327 }
328
329 static void show_cpu_mhz(struct seq_file *m, unsigned long n)
330 {
331         struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
332
333         if (!machine_has_cpu_mhz)
334                 return;
335         seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
336         seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
337 }
338
339 /*
340  * show_cpuinfo - Get information on one CPU for use by procfs.
341  */
342 static int show_cpuinfo(struct seq_file *m, void *v)
343 {
344         unsigned long n = (unsigned long) v - 1;
345         unsigned long first = cpumask_first(cpu_online_mask);
346
347         if (n == first)
348                 show_cpu_summary(m, v);
349         seq_printf(m, "\ncpu number      : %ld\n", n);
350         show_cpu_topology(m, n);
351         show_cpu_ids(m, n);
352         show_cpu_mhz(m, n);
353         return 0;
354 }
355
356 static inline void *c_update(loff_t *pos)
357 {
358         if (*pos)
359                 *pos = cpumask_next(*pos - 1, cpu_online_mask);
360         else
361                 *pos = cpumask_first(cpu_online_mask);
362         return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
363 }
364
365 static void *c_start(struct seq_file *m, loff_t *pos)
366 {
367         cpus_read_lock();
368         return c_update(pos);
369 }
370
371 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
372 {
373         ++*pos;
374         return c_update(pos);
375 }
376
377 static void c_stop(struct seq_file *m, void *v)
378 {
379         cpus_read_unlock();
380 }
381
382 const struct seq_operations cpuinfo_op = {
383         .start  = c_start,
384         .next   = c_next,
385         .stop   = c_stop,
386         .show   = show_cpuinfo,
387 };