xref: /linux-6.15/arch/x86/kernel/amd_node.c (revision 8a3dc0f7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Node helper functions and common defines
4  *
5  * Copyright (c) 2024, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Yazen Ghannam <[email protected]>
9  */
10 
11 #include <asm/amd_node.h>
12 
13 /*
14  * AMD Nodes are a physical collection of I/O devices within an SoC. There can be one
15  * or more nodes per package.
16  *
17  * The nodes are software-visible through PCI config space. All nodes are enumerated
18  * on segment 0 bus 0. The device (slot) numbers range from 0x18 to 0x1F (maximum 8
19  * nodes) with 0x18 corresponding to node 0, 0x19 to node 1, etc. Each node can be a
20  * multi-function device.
21  *
22  * On legacy systems, these node devices represent integrated Northbridge functionality.
23  * On Zen-based systems, these node devices represent Data Fabric functionality.
24  *
25  * See "Configuration Space Accesses" section in BKDGs or
26  * "Processor x86 Core" -> "Configuration Space" section in PPRs.
27  */
28 struct pci_dev *amd_node_get_func(u16 node, u8 func)
29 {
30 	if (node >= MAX_AMD_NUM_NODES)
31 		return NULL;
32 
33 	return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
34 }
35 
36 #define DF_BLK_INST_CNT		0x040
37 #define	DF_CFG_ADDR_CNTL_LEGACY	0x084
38 #define	DF_CFG_ADDR_CNTL_DF4	0xC04
39 
40 #define DF_MAJOR_REVISION	GENMASK(27, 24)
41 
42 static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0)
43 {
44 	u32 reg;
45 
46 	/*
47 	 * Revision fields added for DF4 and later.
48 	 *
49 	 * Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
50 	 */
51 	if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, &reg))
52 		return 0;
53 
54 	if (reg & DF_MAJOR_REVISION)
55 		return DF_CFG_ADDR_CNTL_DF4;
56 
57 	return DF_CFG_ADDR_CNTL_LEGACY;
58 }
59 
60 struct pci_dev *amd_node_get_root(u16 node)
61 {
62 	struct pci_dev *root;
63 	u16 cntl_off;
64 	u8 bus;
65 
66 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
67 		return NULL;
68 
69 	/*
70 	 * D18F0xXXX [Config Address Control] (DF::CfgAddressCntl)
71 	 * Bits [7:0] (SecBusNum) holds the bus number of the root device for
72 	 * this Data Fabric instance. The segment, device, and function will be 0.
73 	 */
74 	struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0);
75 	if (!df_f0)
76 		return NULL;
77 
78 	cntl_off = get_cfg_addr_cntl_offset(df_f0);
79 	if (!cntl_off)
80 		return NULL;
81 
82 	if (pci_read_config_byte(df_f0, cntl_off, &bus))
83 		return NULL;
84 
85 	/* Grab the pointer for the actual root device instance. */
86 	root = pci_get_domain_bus_and_slot(0, bus, 0);
87 
88 	pci_dbg(root, "is root for AMD node %u\n", node);
89 	return root;
90 }
91 
92 static struct pci_dev **amd_roots;
93 
94 /* Protect the PCI config register pairs used for SMN. */
95 static DEFINE_MUTEX(smn_mutex);
96 
97 #define SMN_INDEX_OFFSET	0x60
98 #define SMN_DATA_OFFSET		0x64
99 
100 #define HSMP_INDEX_OFFSET	0xc4
101 #define HSMP_DATA_OFFSET	0xc8
102 
103 /*
104  * SMN accesses may fail in ways that are difficult to detect here in the called
105  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
106  * their own checking based on what behavior they expect.
107  *
108  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
109  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
110  * can be checked here, and a proper error code can be returned.
111  *
112  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
113  * correct in some cases, so callers must check that this correct is for the
114  * register/fields they need.
115  *
116  * For SMN writes, success can be determined through a "write and read back"
117  * However, this is not robust when done here.
118  *
119  * Possible issues:
120  *
121  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
122  *    *not* match the write value.
123  *
124  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
125  *    known here.
126  *
127  * 3) Bits that are "Reserved / Set to 1". Ditto above.
128  *
129  * Callers of amd_smn_write() should do the "write and read back" check
130  * themselves, if needed.
131  *
132  * For #1, they can see if their target bits got cleared.
133  *
134  * For #2 and #3, they can check if their target bits got set as intended.
135  *
136  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
137  * the operation is considered a success, and the caller does their own
138  * checking.
139  */
140 static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, bool write)
141 {
142 	struct pci_dev *root;
143 	int err = -ENODEV;
144 
145 	if (node >= amd_num_nodes())
146 		return err;
147 
148 	root = amd_roots[node];
149 	if (!root)
150 		return err;
151 
152 	guard(mutex)(&smn_mutex);
153 
154 	err = pci_write_config_dword(root, i_off, address);
155 	if (err) {
156 		pr_warn("Error programming SMN address 0x%x.\n", address);
157 		return pcibios_err_to_errno(err);
158 	}
159 
160 	err = (write ? pci_write_config_dword(root, d_off, *value)
161 		     : pci_read_config_dword(root, d_off, value));
162 
163 	return pcibios_err_to_errno(err);
164 }
165 
166 int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
167 {
168 	int err = __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, value, false);
169 
170 	if (PCI_POSSIBLE_ERROR(*value)) {
171 		err = -ENODEV;
172 		*value = 0;
173 	}
174 
175 	return err;
176 }
177 EXPORT_SYMBOL_GPL(amd_smn_read);
178 
179 int __must_check amd_smn_write(u16 node, u32 address, u32 value)
180 {
181 	return __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, &value, true);
182 }
183 EXPORT_SYMBOL_GPL(amd_smn_write);
184 
185 int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
186 {
187 	return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
188 }
189 EXPORT_SYMBOL_GPL(amd_smn_hsmp_rdwr);
190 
191 static int amd_cache_roots(void)
192 {
193 	u16 node, num_nodes = amd_num_nodes();
194 
195 	amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
196 	if (!amd_roots)
197 		return -ENOMEM;
198 
199 	for (node = 0; node < num_nodes; node++)
200 		amd_roots[node] = amd_node_get_root(node);
201 
202 	return 0;
203 }
204 
205 static int __init amd_smn_init(void)
206 {
207 	int err;
208 
209 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
210 		return 0;
211 
212 	guard(mutex)(&smn_mutex);
213 
214 	if (amd_roots)
215 		return 0;
216 
217 	err = amd_cache_roots();
218 	if (err)
219 		return err;
220 
221 	return 0;
222 }
223 
224 fs_initcall(amd_smn_init);
225