xref: /linux-6.15/arch/s390/kernel/processor.c (revision 449fbd71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright IBM Corp. 2008
4  *  Author(s): Martin Schwidefsky ([email protected])
5  */
6 
7 #define KMSG_COMPONENT "cpu"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/stop_machine.h>
11 #include <linux/cpufeature.h>
12 #include <linux/bitops.h>
13 #include <linux/kernel.h>
14 #include <linux/random.h>
15 #include <linux/sched/mm.h>
16 #include <linux/init.h>
17 #include <linux/seq_file.h>
18 #include <linux/mm_types.h>
19 #include <linux/delay.h>
20 #include <linux/cpu.h>
21 
22 #include <asm/diag.h>
23 #include <asm/facility.h>
24 #include <asm/elf.h>
25 #include <asm/lowcore.h>
26 #include <asm/param.h>
27 #include <asm/sclp.h>
28 #include <asm/smp.h>
29 
30 unsigned long __read_mostly elf_hwcap;
31 char elf_platform[ELF_PLATFORM_SIZE];
32 
33 unsigned long int_hwcap;
34 
35 struct cpu_info {
36 	unsigned int cpu_mhz_dynamic;
37 	unsigned int cpu_mhz_static;
38 	struct cpuid cpu_id;
39 };
40 
41 static DEFINE_PER_CPU(struct cpu_info, cpu_info);
42 static DEFINE_PER_CPU(int, cpu_relax_retry);
43 
44 static bool machine_has_cpu_mhz;
45 
46 void __init cpu_detect_mhz_feature(void)
47 {
48 	if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
49 		machine_has_cpu_mhz = true;
50 }
51 
52 static void update_cpu_mhz(void *arg)
53 {
54 	unsigned long mhz;
55 	struct cpu_info *c;
56 
57 	mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
58 	c = this_cpu_ptr(&cpu_info);
59 	c->cpu_mhz_dynamic = mhz >> 32;
60 	c->cpu_mhz_static = mhz & 0xffffffff;
61 }
62 
63 void s390_update_cpu_mhz(void)
64 {
65 	s390_adjust_jiffies();
66 	if (machine_has_cpu_mhz)
67 		on_each_cpu(update_cpu_mhz, NULL, 0);
68 }
69 
70 void notrace stop_machine_yield(const struct cpumask *cpumask)
71 {
72 	int cpu, this_cpu;
73 
74 	this_cpu = smp_processor_id();
75 	if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
76 		__this_cpu_write(cpu_relax_retry, 0);
77 		cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
78 		if (cpu >= nr_cpu_ids)
79 			return;
80 		if (arch_vcpu_is_preempted(cpu))
81 			smp_yield_cpu(cpu);
82 	}
83 }
84 
85 /*
86  * cpu_init - initializes state that is per-CPU.
87  */
88 void cpu_init(void)
89 {
90 	struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
91 
92 	get_cpu_id(id);
93 	if (machine_has_cpu_mhz)
94 		update_cpu_mhz(NULL);
95 	mmgrab(&init_mm);
96 	current->active_mm = &init_mm;
97 	BUG_ON(current->mm);
98 	enter_lazy_tlb(&init_mm, current);
99 }
100 
101 /*
102  * cpu_have_feature - Test CPU features on module initialization
103  */
104 int cpu_have_feature(unsigned int num)
105 {
106 	return elf_hwcap & (1UL << num);
107 }
108 EXPORT_SYMBOL(cpu_have_feature);
109 
110 static void show_facilities(struct seq_file *m)
111 {
112 	unsigned int bit;
113 
114 	seq_puts(m, "facilities      :");
115 	for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
116 		seq_printf(m, " %d", bit);
117 	seq_putc(m, '\n');
118 }
119 
120 static void show_cpu_summary(struct seq_file *m, void *v)
121 {
122 	static const char *hwcap_str[] = {
123 		[HWCAP_NR_ESAN3]	= "esan3",
124 		[HWCAP_NR_ZARCH]	= "zarch",
125 		[HWCAP_NR_STFLE]	= "stfle",
126 		[HWCAP_NR_MSA]		= "msa",
127 		[HWCAP_NR_LDISP]	= "ldisp",
128 		[HWCAP_NR_EIMM]		= "eimm",
129 		[HWCAP_NR_DFP]		= "dfp",
130 		[HWCAP_NR_HPAGE]	= "edat",
131 		[HWCAP_NR_ETF3EH]	= "etf3eh",
132 		[HWCAP_NR_HIGH_GPRS]	= "highgprs",
133 		[HWCAP_NR_TE]		= "te",
134 		[HWCAP_NR_VXRS]		= "vx",
135 		[HWCAP_NR_VXRS_BCD]	= "vxd",
136 		[HWCAP_NR_VXRS_EXT]	= "vxe",
137 		[HWCAP_NR_GS]		= "gs",
138 		[HWCAP_NR_VXRS_EXT2]	= "vxe2",
139 		[HWCAP_NR_VXRS_PDE]	= "vxp",
140 		[HWCAP_NR_SORT]		= "sort",
141 		[HWCAP_NR_DFLT]		= "dflt",
142 		[HWCAP_NR_VXRS_PDE2]	= "vxp2",
143 		[HWCAP_NR_NNPA]		= "nnpa",
144 		[HWCAP_NR_PCI_MIO]	= "pcimio",
145 	};
146 	static const char * const int_hwcap_str[] = {
147 		[HWCAP_INT_NR_SIE]	= "sie",
148 	};
149 	int i, cpu;
150 
151 	BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
152 	BUILD_BUG_ON(ARRAY_SIZE(int_hwcap_str) != HWCAP_INT_NR_MAX);
153 	seq_printf(m, "vendor_id       : IBM/S390\n"
154 		   "# processors    : %i\n"
155 		   "bogomips per cpu: %lu.%02lu\n",
156 		   num_online_cpus(), loops_per_jiffy/(500000/HZ),
157 		   (loops_per_jiffy/(5000/HZ))%100);
158 	seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
159 	seq_puts(m, "features\t: ");
160 	for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
161 		if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
162 			seq_printf(m, "%s ", hwcap_str[i]);
163 	for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
164 		if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
165 			seq_printf(m, "%s ", int_hwcap_str[i]);
166 	seq_puts(m, "\n");
167 	show_facilities(m);
168 	show_cacheinfo(m);
169 	for_each_online_cpu(cpu) {
170 		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
171 
172 		seq_printf(m, "processor %d: "
173 			   "version = %02X,  "
174 			   "identification = %06X,  "
175 			   "machine = %04X\n",
176 			   cpu, id->version, id->ident, id->machine);
177 	}
178 }
179 
180 static int __init setup_hwcaps(void)
181 {
182 	/* instructions named N3, "backported" to esa-mode */
183 	if (test_facility(0))
184 		elf_hwcap |= HWCAP_ESAN3;
185 
186 	/* z/Architecture mode active */
187 	if (test_facility(2))
188 		elf_hwcap |= HWCAP_ZARCH;
189 
190 	/* store-facility-list-extended */
191 	if (test_facility(7))
192 		elf_hwcap |= HWCAP_STFLE;
193 
194 	/* message-security assist */
195 	if (test_facility(17))
196 		elf_hwcap |= HWCAP_MSA;
197 
198 	/* long-displacement */
199 	if (test_facility(19))
200 		elf_hwcap |= HWCAP_LDISP;
201 
202 	/* extended-immediate */
203 	if (test_facility(21))
204 		elf_hwcap |= HWCAP_EIMM;
205 
206 	/* extended-translation facility 3 enhancement */
207 	if (test_facility(22) && test_facility(30))
208 		elf_hwcap |= HWCAP_ETF3EH;
209 
210 	/* decimal floating point & perform floating point operation */
211 	if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
212 		elf_hwcap |= HWCAP_DFP;
213 
214 	/* huge page support */
215 	if (MACHINE_HAS_EDAT1)
216 		elf_hwcap |= HWCAP_HPAGE;
217 
218 	/* 64-bit register support for 31-bit processes */
219 	elf_hwcap |= HWCAP_HIGH_GPRS;
220 
221 	/* transactional execution */
222 	if (MACHINE_HAS_TE)
223 		elf_hwcap |= HWCAP_TE;
224 
225 	/*
226 	 * Vector extension can be disabled with the "novx" parameter.
227 	 * Use MACHINE_HAS_VX instead of facility bit 129.
228 	 */
229 	if (MACHINE_HAS_VX) {
230 		elf_hwcap |= HWCAP_VXRS;
231 		if (test_facility(134))
232 			elf_hwcap |= HWCAP_VXRS_BCD;
233 		if (test_facility(135))
234 			elf_hwcap |= HWCAP_VXRS_EXT;
235 		if (test_facility(148))
236 			elf_hwcap |= HWCAP_VXRS_EXT2;
237 		if (test_facility(152))
238 			elf_hwcap |= HWCAP_VXRS_PDE;
239 		if (test_facility(192))
240 			elf_hwcap |= HWCAP_VXRS_PDE2;
241 	}
242 
243 	if (test_facility(150))
244 		elf_hwcap |= HWCAP_SORT;
245 
246 	if (test_facility(151))
247 		elf_hwcap |= HWCAP_DFLT;
248 
249 	if (test_facility(165))
250 		elf_hwcap |= HWCAP_NNPA;
251 
252 	/* guarded storage */
253 	if (MACHINE_HAS_GS)
254 		elf_hwcap |= HWCAP_GS;
255 
256 	if (MACHINE_HAS_PCI_MIO)
257 		elf_hwcap |= HWCAP_PCI_MIO;
258 
259 	/* virtualization support */
260 	if (sclp.has_sief2)
261 		int_hwcap |= HWCAP_INT_SIE;
262 
263 	return 0;
264 }
265 arch_initcall(setup_hwcaps);
266 
267 static int __init setup_elf_platform(void)
268 {
269 	struct cpuid cpu_id;
270 
271 	get_cpu_id(&cpu_id);
272 	add_device_randomness(&cpu_id, sizeof(cpu_id));
273 	switch (cpu_id.machine) {
274 	case 0x2064:
275 	case 0x2066:
276 	default:	/* Use "z900" as default for 64 bit kernels. */
277 		strcpy(elf_platform, "z900");
278 		break;
279 	case 0x2084:
280 	case 0x2086:
281 		strcpy(elf_platform, "z990");
282 		break;
283 	case 0x2094:
284 	case 0x2096:
285 		strcpy(elf_platform, "z9-109");
286 		break;
287 	case 0x2097:
288 	case 0x2098:
289 		strcpy(elf_platform, "z10");
290 		break;
291 	case 0x2817:
292 	case 0x2818:
293 		strcpy(elf_platform, "z196");
294 		break;
295 	case 0x2827:
296 	case 0x2828:
297 		strcpy(elf_platform, "zEC12");
298 		break;
299 	case 0x2964:
300 	case 0x2965:
301 		strcpy(elf_platform, "z13");
302 		break;
303 	case 0x3906:
304 	case 0x3907:
305 		strcpy(elf_platform, "z14");
306 		break;
307 	case 0x8561:
308 	case 0x8562:
309 		strcpy(elf_platform, "z15");
310 		break;
311 	}
312 	return 0;
313 }
314 arch_initcall(setup_elf_platform);
315 
316 static void show_cpu_topology(struct seq_file *m, unsigned long n)
317 {
318 #ifdef CONFIG_SCHED_TOPOLOGY
319 	seq_printf(m, "physical id     : %d\n", topology_physical_package_id(n));
320 	seq_printf(m, "core id         : %d\n", topology_core_id(n));
321 	seq_printf(m, "book id         : %d\n", topology_book_id(n));
322 	seq_printf(m, "drawer id       : %d\n", topology_drawer_id(n));
323 	seq_printf(m, "dedicated       : %d\n", topology_cpu_dedicated(n));
324 	seq_printf(m, "address         : %d\n", smp_cpu_get_cpu_address(n));
325 	seq_printf(m, "siblings        : %d\n", cpumask_weight(topology_core_cpumask(n)));
326 	seq_printf(m, "cpu cores       : %d\n", topology_booted_cores(n));
327 #endif /* CONFIG_SCHED_TOPOLOGY */
328 }
329 
330 static void show_cpu_ids(struct seq_file *m, unsigned long n)
331 {
332 	struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
333 
334 	seq_printf(m, "version         : %02X\n", id->version);
335 	seq_printf(m, "identification  : %06X\n", id->ident);
336 	seq_printf(m, "machine         : %04X\n", id->machine);
337 }
338 
339 static void show_cpu_mhz(struct seq_file *m, unsigned long n)
340 {
341 	struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
342 
343 	if (!machine_has_cpu_mhz)
344 		return;
345 	seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
346 	seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
347 }
348 
349 /*
350  * show_cpuinfo - Get information on one CPU for use by procfs.
351  */
352 static int show_cpuinfo(struct seq_file *m, void *v)
353 {
354 	unsigned long n = (unsigned long) v - 1;
355 	unsigned long first = cpumask_first(cpu_online_mask);
356 
357 	if (n == first)
358 		show_cpu_summary(m, v);
359 	seq_printf(m, "\ncpu number      : %ld\n", n);
360 	show_cpu_topology(m, n);
361 	show_cpu_ids(m, n);
362 	show_cpu_mhz(m, n);
363 	return 0;
364 }
365 
366 static inline void *c_update(loff_t *pos)
367 {
368 	if (*pos)
369 		*pos = cpumask_next(*pos - 1, cpu_online_mask);
370 	else
371 		*pos = cpumask_first(cpu_online_mask);
372 	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
373 }
374 
375 static void *c_start(struct seq_file *m, loff_t *pos)
376 {
377 	get_online_cpus();
378 	return c_update(pos);
379 }
380 
381 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
382 {
383 	++*pos;
384 	return c_update(pos);
385 }
386 
387 static void c_stop(struct seq_file *m, void *v)
388 {
389 	put_online_cpus();
390 }
391 
392 const struct seq_operations cpuinfo_op = {
393 	.start	= c_start,
394 	.next	= c_next,
395 	.stop	= c_stop,
396 	.show	= show_cpuinfo,
397 };
398 
399 int s390_isolate_bp(void)
400 {
401 	if (!test_facility(82))
402 		return -EOPNOTSUPP;
403 	set_thread_flag(TIF_ISOLATE_BP);
404 	return 0;
405 }
406 EXPORT_SYMBOL(s390_isolate_bp);
407 
408 int s390_isolate_bp_guest(void)
409 {
410 	if (!test_facility(82))
411 		return -EOPNOTSUPP;
412 	set_thread_flag(TIF_ISOLATE_BP_GUEST);
413 	return 0;
414 }
415 EXPORT_SYMBOL(s390_isolate_bp_guest);
416