xref: /linux-6.15/arch/x86/kernel/acpi/cppc.c (revision 2819bfef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * cppc.c: CPPC Interface for x86
4  * Copyright (c) 2016, Intel Corporation.
5  */
6 
7 #include <acpi/cppc_acpi.h>
8 #include <asm/msr.h>
9 #include <asm/processor.h>
10 #include <asm/topology.h>
11 
12 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
13 
14 bool cpc_supported_by_cpu(void)
15 {
16 	switch (boot_cpu_data.x86_vendor) {
17 	case X86_VENDOR_AMD:
18 	case X86_VENDOR_HYGON:
19 		if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
20 		    (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
21 			return true;
22 		else if (boot_cpu_data.x86 == 0x17 &&
23 			 boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
24 			return true;
25 		return boot_cpu_has(X86_FEATURE_CPPC);
26 	}
27 	return false;
28 }
29 
30 bool cpc_ffh_supported(void)
31 {
32 	return true;
33 }
34 
35 int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
36 {
37 	int err;
38 
39 	err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
40 	if (!err) {
41 		u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
42 				       reg->bit_offset);
43 
44 		*val &= mask;
45 		*val >>= reg->bit_offset;
46 	}
47 	return err;
48 }
49 
50 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
51 {
52 	u64 rd_val;
53 	int err;
54 
55 	err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
56 	if (!err) {
57 		u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
58 				       reg->bit_offset);
59 
60 		val <<= reg->bit_offset;
61 		val &= mask;
62 		rd_val &= ~mask;
63 		rd_val |= val;
64 		err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
65 	}
66 	return err;
67 }
68 
69 static void amd_set_max_freq_ratio(void)
70 {
71 	struct cppc_perf_caps perf_caps;
72 	u64 numerator, nominal_perf;
73 	u64 perf_ratio;
74 	int rc;
75 
76 	rc = cppc_get_perf_caps(0, &perf_caps);
77 	if (rc) {
78 		pr_warn("Could not retrieve perf counters (%d)\n", rc);
79 		return;
80 	}
81 
82 	rc = amd_get_boost_ratio_numerator(0, &numerator);
83 	if (rc) {
84 		pr_warn("Could not retrieve highest performance (%d)\n", rc);
85 		return;
86 	}
87 	nominal_perf = perf_caps.nominal_perf;
88 
89 	if (!nominal_perf) {
90 		pr_warn("Could not retrieve nominal performance\n");
91 		return;
92 	}
93 
94 	/* midpoint between max_boost and max_P */
95 	perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
96 
97 	freq_invariance_set_perf_ratio(perf_ratio, false);
98 }
99 
100 static DEFINE_MUTEX(freq_invariance_lock);
101 
102 void init_freq_invariance_cppc(void)
103 {
104 	static bool init_done;
105 
106 	if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
107 		return;
108 
109 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
110 		return;
111 
112 	mutex_lock(&freq_invariance_lock);
113 	if (!init_done)
114 		amd_set_max_freq_ratio();
115 	init_done = true;
116 	mutex_unlock(&freq_invariance_lock);
117 }
118 
119 /*
120  * Get the highest performance register value.
121  * @cpu: CPU from which to get highest performance.
122  * @highest_perf: Return address for highest performance value.
123  *
124  * Return: 0 for success, negative error code otherwise.
125  */
126 int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
127 {
128 	u64 val;
129 	int ret;
130 
131 	if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
132 		ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
133 		if (ret)
134 			goto out;
135 
136 		val = AMD_CPPC_HIGHEST_PERF(val);
137 	} else {
138 		ret = cppc_get_highest_perf(cpu, &val);
139 		if (ret)
140 			goto out;
141 	}
142 
143 	WRITE_ONCE(*highest_perf, (u32)val);
144 out:
145 	return ret;
146 }
147 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
148 
149 /**
150  * amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation
151  * @cpu: CPU to get numerator for.
152  * @numerator: Output variable for numerator.
153  *
154  * Determine the numerator to use for calculating the boost ratio on
155  * a CPU. On systems that support preferred cores, this will be a hardcoded
156  * value. On other systems this will the highest performance register value.
157  *
158  * Return: 0 for success, negative error code otherwise.
159  */
160 int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
161 {
162 	struct cpuinfo_x86 *c = &boot_cpu_data;
163 
164 	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
165 			       (c->x86_model >= 0x70 && c->x86_model < 0x80))) {
166 		*numerator = 166;
167 		return 0;
168 	}
169 
170 	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
171 			       (c->x86_model >= 0x40 && c->x86_model < 0x70))) {
172 		*numerator = 166;
173 		return 0;
174 	}
175 	*numerator = 255;
176 
177 	return 0;
178 }
179 EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator);
180