xref: /linux-6.15/arch/x86/kernel/amd_node.c (revision bebe0afb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Node helper functions and common defines
4  *
5  * Copyright (c) 2024, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Yazen Ghannam <[email protected]>
9  */
10 
11 #include <asm/amd_node.h>
12 
13 /*
14  * AMD Nodes are a physical collection of I/O devices within an SoC. There can be one
15  * or more nodes per package.
16  *
17  * The nodes are software-visible through PCI config space. All nodes are enumerated
18  * on segment 0 bus 0. The device (slot) numbers range from 0x18 to 0x1F (maximum 8
19  * nodes) with 0x18 corresponding to node 0, 0x19 to node 1, etc. Each node can be a
20  * multi-function device.
21  *
22  * On legacy systems, these node devices represent integrated Northbridge functionality.
23  * On Zen-based systems, these node devices represent Data Fabric functionality.
24  *
25  * See "Configuration Space Accesses" section in BKDGs or
26  * "Processor x86 Core" -> "Configuration Space" section in PPRs.
27  */
28 struct pci_dev *amd_node_get_func(u16 node, u8 func)
29 {
30 	if (node >= MAX_AMD_NUM_NODES)
31 		return NULL;
32 
33 	return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
34 }
35 
36 #define DF_BLK_INST_CNT		0x040
37 #define	DF_CFG_ADDR_CNTL_LEGACY	0x084
38 #define	DF_CFG_ADDR_CNTL_DF4	0xC04
39 
40 #define DF_MAJOR_REVISION	GENMASK(27, 24)
41 
42 static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0)
43 {
44 	u32 reg;
45 
46 	/*
47 	 * Revision fields added for DF4 and later.
48 	 *
49 	 * Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
50 	 */
51 	if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, &reg))
52 		return 0;
53 
54 	if (reg & DF_MAJOR_REVISION)
55 		return DF_CFG_ADDR_CNTL_DF4;
56 
57 	return DF_CFG_ADDR_CNTL_LEGACY;
58 }
59 
60 struct pci_dev *amd_node_get_root(u16 node)
61 {
62 	struct pci_dev *root;
63 	u16 cntl_off;
64 	u8 bus;
65 
66 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
67 		return NULL;
68 
69 	/*
70 	 * D18F0xXXX [Config Address Control] (DF::CfgAddressCntl)
71 	 * Bits [7:0] (SecBusNum) holds the bus number of the root device for
72 	 * this Data Fabric instance. The segment, device, and function will be 0.
73 	 */
74 	struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0);
75 	if (!df_f0)
76 		return NULL;
77 
78 	cntl_off = get_cfg_addr_cntl_offset(df_f0);
79 	if (!cntl_off)
80 		return NULL;
81 
82 	if (pci_read_config_byte(df_f0, cntl_off, &bus))
83 		return NULL;
84 
85 	/* Grab the pointer for the actual root device instance. */
86 	root = pci_get_domain_bus_and_slot(0, bus, 0);
87 
88 	pci_dbg(root, "is root for AMD node %u\n", node);
89 	return root;
90 }
91 
92 static struct pci_dev **amd_roots;
93 
94 /* Protect the PCI config register pairs used for SMN. */
95 static DEFINE_MUTEX(smn_mutex);
96 static bool smn_exclusive;
97 
98 #define SMN_INDEX_OFFSET	0x60
99 #define SMN_DATA_OFFSET		0x64
100 
101 #define HSMP_INDEX_OFFSET	0xc4
102 #define HSMP_DATA_OFFSET	0xc8
103 
104 /*
105  * SMN accesses may fail in ways that are difficult to detect here in the called
106  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
107  * their own checking based on what behavior they expect.
108  *
109  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
110  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
111  * can be checked here, and a proper error code can be returned.
112  *
113  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
114  * correct in some cases, so callers must check that this correct is for the
115  * register/fields they need.
116  *
117  * For SMN writes, success can be determined through a "write and read back"
118  * However, this is not robust when done here.
119  *
120  * Possible issues:
121  *
122  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
123  *    *not* match the write value.
124  *
125  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
126  *    known here.
127  *
128  * 3) Bits that are "Reserved / Set to 1". Ditto above.
129  *
130  * Callers of amd_smn_write() should do the "write and read back" check
131  * themselves, if needed.
132  *
133  * For #1, they can see if their target bits got cleared.
134  *
135  * For #2 and #3, they can check if their target bits got set as intended.
136  *
137  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
138  * the operation is considered a success, and the caller does their own
139  * checking.
140  */
141 static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, bool write)
142 {
143 	struct pci_dev *root;
144 	int err = -ENODEV;
145 
146 	if (node >= amd_num_nodes())
147 		return err;
148 
149 	root = amd_roots[node];
150 	if (!root)
151 		return err;
152 
153 	if (!smn_exclusive)
154 		return err;
155 
156 	guard(mutex)(&smn_mutex);
157 
158 	err = pci_write_config_dword(root, i_off, address);
159 	if (err) {
160 		pr_warn("Error programming SMN address 0x%x.\n", address);
161 		return pcibios_err_to_errno(err);
162 	}
163 
164 	err = (write ? pci_write_config_dword(root, d_off, *value)
165 		     : pci_read_config_dword(root, d_off, value));
166 
167 	return pcibios_err_to_errno(err);
168 }
169 
170 int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
171 {
172 	int err = __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, value, false);
173 
174 	if (PCI_POSSIBLE_ERROR(*value)) {
175 		err = -ENODEV;
176 		*value = 0;
177 	}
178 
179 	return err;
180 }
181 EXPORT_SYMBOL_GPL(amd_smn_read);
182 
183 int __must_check amd_smn_write(u16 node, u32 address, u32 value)
184 {
185 	return __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, &value, true);
186 }
187 EXPORT_SYMBOL_GPL(amd_smn_write);
188 
189 int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
190 {
191 	return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
192 }
193 EXPORT_SYMBOL_GPL(amd_smn_hsmp_rdwr);
194 
195 static int amd_cache_roots(void)
196 {
197 	u16 node, num_nodes = amd_num_nodes();
198 
199 	amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
200 	if (!amd_roots)
201 		return -ENOMEM;
202 
203 	for (node = 0; node < num_nodes; node++)
204 		amd_roots[node] = amd_node_get_root(node);
205 
206 	return 0;
207 }
208 
209 static int reserve_root_config_spaces(void)
210 {
211 	struct pci_dev *root = NULL;
212 	struct pci_bus *bus = NULL;
213 
214 	while ((bus = pci_find_next_bus(bus))) {
215 		/* Root device is Device 0 Function 0 on each Primary Bus. */
216 		root = pci_get_slot(bus, 0);
217 		if (!root)
218 			continue;
219 
220 		if (root->vendor != PCI_VENDOR_ID_AMD &&
221 		    root->vendor != PCI_VENDOR_ID_HYGON)
222 			continue;
223 
224 		pci_dbg(root, "Reserving PCI config space\n");
225 
226 		/*
227 		 * There are a few SMN index/data pairs and other registers
228 		 * that shouldn't be accessed by user space.
229 		 * So reserve the entire PCI config space for simplicity rather
230 		 * than covering specific registers piecemeal.
231 		 */
232 		if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
233 			pci_err(root, "Failed to reserve config space\n");
234 			return -EEXIST;
235 		}
236 	}
237 
238 	smn_exclusive = true;
239 	return 0;
240 }
241 
242 static int __init amd_smn_init(void)
243 {
244 	int err;
245 
246 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
247 		return 0;
248 
249 	guard(mutex)(&smn_mutex);
250 
251 	if (amd_roots)
252 		return 0;
253 
254 	err = amd_cache_roots();
255 	if (err)
256 		return err;
257 
258 	err = reserve_root_config_spaces();
259 	if (err)
260 		return err;
261 
262 	return 0;
263 }
264 
265 fs_initcall(amd_smn_init);
266