xref: /linux-6.15/arch/x86/kernel/amd_node.c (revision 9c19cc1f)
1e6e6e5e8SYazen Ghannam // SPDX-License-Identifier: GPL-2.0-or-later
2e6e6e5e8SYazen Ghannam /*
3e6e6e5e8SYazen Ghannam  * AMD Node helper functions and common defines
4e6e6e5e8SYazen Ghannam  *
5e6e6e5e8SYazen Ghannam  * Copyright (c) 2024, Advanced Micro Devices, Inc.
6e6e6e5e8SYazen Ghannam  * All Rights Reserved.
7e6e6e5e8SYazen Ghannam  *
8e6e6e5e8SYazen Ghannam  * Author: Yazen Ghannam <[email protected]>
9e6e6e5e8SYazen Ghannam  */
10e6e6e5e8SYazen Ghannam 
11*9c19cc1fSMario Limonciello #include <linux/debugfs.h>
12e6e6e5e8SYazen Ghannam #include <asm/amd_node.h>
13e6e6e5e8SYazen Ghannam 
14e6e6e5e8SYazen Ghannam /*
15e6e6e5e8SYazen Ghannam  * AMD Nodes are a physical collection of I/O devices within an SoC. There can be one
16e6e6e5e8SYazen Ghannam  * or more nodes per package.
17e6e6e5e8SYazen Ghannam  *
18e6e6e5e8SYazen Ghannam  * The nodes are software-visible through PCI config space. All nodes are enumerated
19e6e6e5e8SYazen Ghannam  * on segment 0 bus 0. The device (slot) numbers range from 0x18 to 0x1F (maximum 8
20e6e6e5e8SYazen Ghannam  * nodes) with 0x18 corresponding to node 0, 0x19 to node 1, etc. Each node can be a
21e6e6e5e8SYazen Ghannam  * multi-function device.
22e6e6e5e8SYazen Ghannam  *
23e6e6e5e8SYazen Ghannam  * On legacy systems, these node devices represent integrated Northbridge functionality.
24e6e6e5e8SYazen Ghannam  * On Zen-based systems, these node devices represent Data Fabric functionality.
25e6e6e5e8SYazen Ghannam  *
26e6e6e5e8SYazen Ghannam  * See "Configuration Space Accesses" section in BKDGs or
27e6e6e5e8SYazen Ghannam  * "Processor x86 Core" -> "Configuration Space" section in PPRs.
28e6e6e5e8SYazen Ghannam  */
amd_node_get_func(u16 node,u8 func)29e6e6e5e8SYazen Ghannam struct pci_dev *amd_node_get_func(u16 node, u8 func)
30e6e6e5e8SYazen Ghannam {
31e6e6e5e8SYazen Ghannam 	if (node >= MAX_AMD_NUM_NODES)
32e6e6e5e8SYazen Ghannam 		return NULL;
33e6e6e5e8SYazen Ghannam 
34e6e6e5e8SYazen Ghannam 	return pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(AMD_NODE0_PCI_SLOT + node, func));
35e6e6e5e8SYazen Ghannam }
3640a5f6ffSYazen Ghannam 
3740a5f6ffSYazen Ghannam #define DF_BLK_INST_CNT		0x040
3840a5f6ffSYazen Ghannam #define	DF_CFG_ADDR_CNTL_LEGACY	0x084
3940a5f6ffSYazen Ghannam #define	DF_CFG_ADDR_CNTL_DF4	0xC04
4040a5f6ffSYazen Ghannam 
4140a5f6ffSYazen Ghannam #define DF_MAJOR_REVISION	GENMASK(27, 24)
4240a5f6ffSYazen Ghannam 
get_cfg_addr_cntl_offset(struct pci_dev * df_f0)4340a5f6ffSYazen Ghannam static u16 get_cfg_addr_cntl_offset(struct pci_dev *df_f0)
4440a5f6ffSYazen Ghannam {
4540a5f6ffSYazen Ghannam 	u32 reg;
4640a5f6ffSYazen Ghannam 
4740a5f6ffSYazen Ghannam 	/*
4840a5f6ffSYazen Ghannam 	 * Revision fields added for DF4 and later.
4940a5f6ffSYazen Ghannam 	 *
5040a5f6ffSYazen Ghannam 	 * Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
5140a5f6ffSYazen Ghannam 	 */
5240a5f6ffSYazen Ghannam 	if (pci_read_config_dword(df_f0, DF_BLK_INST_CNT, &reg))
5340a5f6ffSYazen Ghannam 		return 0;
5440a5f6ffSYazen Ghannam 
5540a5f6ffSYazen Ghannam 	if (reg & DF_MAJOR_REVISION)
5640a5f6ffSYazen Ghannam 		return DF_CFG_ADDR_CNTL_DF4;
5740a5f6ffSYazen Ghannam 
5840a5f6ffSYazen Ghannam 	return DF_CFG_ADDR_CNTL_LEGACY;
5940a5f6ffSYazen Ghannam }
6040a5f6ffSYazen Ghannam 
amd_node_get_root(u16 node)6140a5f6ffSYazen Ghannam struct pci_dev *amd_node_get_root(u16 node)
6240a5f6ffSYazen Ghannam {
6340a5f6ffSYazen Ghannam 	struct pci_dev *root;
6440a5f6ffSYazen Ghannam 	u16 cntl_off;
6540a5f6ffSYazen Ghannam 	u8 bus;
6640a5f6ffSYazen Ghannam 
6740a5f6ffSYazen Ghannam 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
6840a5f6ffSYazen Ghannam 		return NULL;
6940a5f6ffSYazen Ghannam 
7040a5f6ffSYazen Ghannam 	/*
7140a5f6ffSYazen Ghannam 	 * D18F0xXXX [Config Address Control] (DF::CfgAddressCntl)
7240a5f6ffSYazen Ghannam 	 * Bits [7:0] (SecBusNum) holds the bus number of the root device for
7340a5f6ffSYazen Ghannam 	 * this Data Fabric instance. The segment, device, and function will be 0.
7440a5f6ffSYazen Ghannam 	 */
7540a5f6ffSYazen Ghannam 	struct pci_dev *df_f0 __free(pci_dev_put) = amd_node_get_func(node, 0);
7640a5f6ffSYazen Ghannam 	if (!df_f0)
7740a5f6ffSYazen Ghannam 		return NULL;
7840a5f6ffSYazen Ghannam 
7940a5f6ffSYazen Ghannam 	cntl_off = get_cfg_addr_cntl_offset(df_f0);
8040a5f6ffSYazen Ghannam 	if (!cntl_off)
8140a5f6ffSYazen Ghannam 		return NULL;
8240a5f6ffSYazen Ghannam 
8340a5f6ffSYazen Ghannam 	if (pci_read_config_byte(df_f0, cntl_off, &bus))
8440a5f6ffSYazen Ghannam 		return NULL;
8540a5f6ffSYazen Ghannam 
8640a5f6ffSYazen Ghannam 	/* Grab the pointer for the actual root device instance. */
8740a5f6ffSYazen Ghannam 	root = pci_get_domain_bus_and_slot(0, bus, 0);
8840a5f6ffSYazen Ghannam 
8940a5f6ffSYazen Ghannam 	pci_dbg(root, "is root for AMD node %u\n", node);
9040a5f6ffSYazen Ghannam 	return root;
9140a5f6ffSYazen Ghannam }
92d6caeafaSMario Limonciello 
9377466b79SYazen Ghannam static struct pci_dev **amd_roots;
9477466b79SYazen Ghannam 
95d6caeafaSMario Limonciello /* Protect the PCI config register pairs used for SMN. */
96d6caeafaSMario Limonciello static DEFINE_MUTEX(smn_mutex);
9783518453SMario Limonciello static bool smn_exclusive;
98d6caeafaSMario Limonciello 
9979821b90SYazen Ghannam #define SMN_INDEX_OFFSET	0x60
10079821b90SYazen Ghannam #define SMN_DATA_OFFSET		0x64
10179821b90SYazen Ghannam 
1028a3dc0f7SYazen Ghannam #define HSMP_INDEX_OFFSET	0xc4
1038a3dc0f7SYazen Ghannam #define HSMP_DATA_OFFSET	0xc8
1048a3dc0f7SYazen Ghannam 
105d6caeafaSMario Limonciello /*
106d6caeafaSMario Limonciello  * SMN accesses may fail in ways that are difficult to detect here in the called
107d6caeafaSMario Limonciello  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
108d6caeafaSMario Limonciello  * their own checking based on what behavior they expect.
109d6caeafaSMario Limonciello  *
110d6caeafaSMario Limonciello  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
111d6caeafaSMario Limonciello  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
112d6caeafaSMario Limonciello  * can be checked here, and a proper error code can be returned.
113d6caeafaSMario Limonciello  *
114d6caeafaSMario Limonciello  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
115d6caeafaSMario Limonciello  * correct in some cases, so callers must check that this correct is for the
116d6caeafaSMario Limonciello  * register/fields they need.
117d6caeafaSMario Limonciello  *
118d6caeafaSMario Limonciello  * For SMN writes, success can be determined through a "write and read back"
119d6caeafaSMario Limonciello  * However, this is not robust when done here.
120d6caeafaSMario Limonciello  *
121d6caeafaSMario Limonciello  * Possible issues:
122d6caeafaSMario Limonciello  *
123d6caeafaSMario Limonciello  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
124d6caeafaSMario Limonciello  *    *not* match the write value.
125d6caeafaSMario Limonciello  *
126d6caeafaSMario Limonciello  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
127d6caeafaSMario Limonciello  *    known here.
128d6caeafaSMario Limonciello  *
129d6caeafaSMario Limonciello  * 3) Bits that are "Reserved / Set to 1". Ditto above.
130d6caeafaSMario Limonciello  *
131d6caeafaSMario Limonciello  * Callers of amd_smn_write() should do the "write and read back" check
132d6caeafaSMario Limonciello  * themselves, if needed.
133d6caeafaSMario Limonciello  *
134d6caeafaSMario Limonciello  * For #1, they can see if their target bits got cleared.
135d6caeafaSMario Limonciello  *
136d6caeafaSMario Limonciello  * For #2 and #3, they can check if their target bits got set as intended.
137d6caeafaSMario Limonciello  *
138d6caeafaSMario Limonciello  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
139d6caeafaSMario Limonciello  * the operation is considered a success, and the caller does their own
140d6caeafaSMario Limonciello  * checking.
141d6caeafaSMario Limonciello  */
__amd_smn_rw(u8 i_off,u8 d_off,u16 node,u32 address,u32 * value,bool write)14279821b90SYazen Ghannam static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, bool write)
143d6caeafaSMario Limonciello {
144d6caeafaSMario Limonciello 	struct pci_dev *root;
145d6caeafaSMario Limonciello 	int err = -ENODEV;
146d6caeafaSMario Limonciello 
14777466b79SYazen Ghannam 	if (node >= amd_num_nodes())
14835df7976SYazen Ghannam 		return err;
149d6caeafaSMario Limonciello 
15077466b79SYazen Ghannam 	root = amd_roots[node];
151d6caeafaSMario Limonciello 	if (!root)
15235df7976SYazen Ghannam 		return err;
153d6caeafaSMario Limonciello 
15483518453SMario Limonciello 	if (!smn_exclusive)
15583518453SMario Limonciello 		return err;
15683518453SMario Limonciello 
15735df7976SYazen Ghannam 	guard(mutex)(&smn_mutex);
158d6caeafaSMario Limonciello 
15979821b90SYazen Ghannam 	err = pci_write_config_dword(root, i_off, address);
160d6caeafaSMario Limonciello 	if (err) {
161d6caeafaSMario Limonciello 		pr_warn("Error programming SMN address 0x%x.\n", address);
16235df7976SYazen Ghannam 		return pcibios_err_to_errno(err);
163d6caeafaSMario Limonciello 	}
164d6caeafaSMario Limonciello 
16579821b90SYazen Ghannam 	err = (write ? pci_write_config_dword(root, d_off, *value)
16679821b90SYazen Ghannam 		     : pci_read_config_dword(root, d_off, value));
167d6caeafaSMario Limonciello 
16835df7976SYazen Ghannam 	return pcibios_err_to_errno(err);
169d6caeafaSMario Limonciello }
170d6caeafaSMario Limonciello 
amd_smn_read(u16 node,u32 address,u32 * value)171d6caeafaSMario Limonciello int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
172d6caeafaSMario Limonciello {
17379821b90SYazen Ghannam 	int err = __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, value, false);
174d6caeafaSMario Limonciello 
175d6caeafaSMario Limonciello 	if (PCI_POSSIBLE_ERROR(*value)) {
176d6caeafaSMario Limonciello 		err = -ENODEV;
177d6caeafaSMario Limonciello 		*value = 0;
178d6caeafaSMario Limonciello 	}
179d6caeafaSMario Limonciello 
180d6caeafaSMario Limonciello 	return err;
181d6caeafaSMario Limonciello }
182d6caeafaSMario Limonciello EXPORT_SYMBOL_GPL(amd_smn_read);
183d6caeafaSMario Limonciello 
amd_smn_write(u16 node,u32 address,u32 value)184d6caeafaSMario Limonciello int __must_check amd_smn_write(u16 node, u32 address, u32 value)
185d6caeafaSMario Limonciello {
18679821b90SYazen Ghannam 	return __amd_smn_rw(SMN_INDEX_OFFSET, SMN_DATA_OFFSET, node, address, &value, true);
187d6caeafaSMario Limonciello }
188d6caeafaSMario Limonciello EXPORT_SYMBOL_GPL(amd_smn_write);
18977466b79SYazen Ghannam 
amd_smn_hsmp_rdwr(u16 node,u32 address,u32 * value,bool write)1908a3dc0f7SYazen Ghannam int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
1918a3dc0f7SYazen Ghannam {
1928a3dc0f7SYazen Ghannam 	return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
1938a3dc0f7SYazen Ghannam }
1948a3dc0f7SYazen Ghannam EXPORT_SYMBOL_GPL(amd_smn_hsmp_rdwr);
1958a3dc0f7SYazen Ghannam 
196*9c19cc1fSMario Limonciello static struct dentry *debugfs_dir;
197*9c19cc1fSMario Limonciello static u16 debug_node;
198*9c19cc1fSMario Limonciello static u32 debug_address;
199*9c19cc1fSMario Limonciello 
smn_node_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)200*9c19cc1fSMario Limonciello static ssize_t smn_node_write(struct file *file, const char __user *userbuf,
201*9c19cc1fSMario Limonciello 			      size_t count, loff_t *ppos)
202*9c19cc1fSMario Limonciello {
203*9c19cc1fSMario Limonciello 	u16 node;
204*9c19cc1fSMario Limonciello 	int ret;
205*9c19cc1fSMario Limonciello 
206*9c19cc1fSMario Limonciello 	ret = kstrtou16_from_user(userbuf, count, 0, &node);
207*9c19cc1fSMario Limonciello 	if (ret)
208*9c19cc1fSMario Limonciello 		return ret;
209*9c19cc1fSMario Limonciello 
210*9c19cc1fSMario Limonciello 	if (node >= amd_num_nodes())
211*9c19cc1fSMario Limonciello 		return -ENODEV;
212*9c19cc1fSMario Limonciello 
213*9c19cc1fSMario Limonciello 	debug_node = node;
214*9c19cc1fSMario Limonciello 	return count;
215*9c19cc1fSMario Limonciello }
216*9c19cc1fSMario Limonciello 
smn_node_show(struct seq_file * m,void * v)217*9c19cc1fSMario Limonciello static int smn_node_show(struct seq_file *m, void *v)
218*9c19cc1fSMario Limonciello {
219*9c19cc1fSMario Limonciello 	seq_printf(m, "0x%08x\n", debug_node);
220*9c19cc1fSMario Limonciello 	return 0;
221*9c19cc1fSMario Limonciello }
222*9c19cc1fSMario Limonciello 
smn_address_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)223*9c19cc1fSMario Limonciello static ssize_t smn_address_write(struct file *file, const char __user *userbuf,
224*9c19cc1fSMario Limonciello 				 size_t count, loff_t *ppos)
225*9c19cc1fSMario Limonciello {
226*9c19cc1fSMario Limonciello 	int ret;
227*9c19cc1fSMario Limonciello 
228*9c19cc1fSMario Limonciello 	ret = kstrtouint_from_user(userbuf, count, 0, &debug_address);
229*9c19cc1fSMario Limonciello 	if (ret)
230*9c19cc1fSMario Limonciello 		return ret;
231*9c19cc1fSMario Limonciello 
232*9c19cc1fSMario Limonciello 	return count;
233*9c19cc1fSMario Limonciello }
234*9c19cc1fSMario Limonciello 
smn_address_show(struct seq_file * m,void * v)235*9c19cc1fSMario Limonciello static int smn_address_show(struct seq_file *m, void *v)
236*9c19cc1fSMario Limonciello {
237*9c19cc1fSMario Limonciello 	seq_printf(m, "0x%08x\n", debug_address);
238*9c19cc1fSMario Limonciello 	return 0;
239*9c19cc1fSMario Limonciello }
240*9c19cc1fSMario Limonciello 
smn_value_show(struct seq_file * m,void * v)241*9c19cc1fSMario Limonciello static int smn_value_show(struct seq_file *m, void *v)
242*9c19cc1fSMario Limonciello {
243*9c19cc1fSMario Limonciello 	u32 val;
244*9c19cc1fSMario Limonciello 	int ret;
245*9c19cc1fSMario Limonciello 
246*9c19cc1fSMario Limonciello 	ret = amd_smn_read(debug_node, debug_address, &val);
247*9c19cc1fSMario Limonciello 	if (ret)
248*9c19cc1fSMario Limonciello 		return ret;
249*9c19cc1fSMario Limonciello 
250*9c19cc1fSMario Limonciello 	seq_printf(m, "0x%08x\n", val);
251*9c19cc1fSMario Limonciello 	return 0;
252*9c19cc1fSMario Limonciello }
253*9c19cc1fSMario Limonciello 
smn_value_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)254*9c19cc1fSMario Limonciello static ssize_t smn_value_write(struct file *file, const char __user *userbuf,
255*9c19cc1fSMario Limonciello 			       size_t count, loff_t *ppos)
256*9c19cc1fSMario Limonciello {
257*9c19cc1fSMario Limonciello 	u32 val;
258*9c19cc1fSMario Limonciello 	int ret;
259*9c19cc1fSMario Limonciello 
260*9c19cc1fSMario Limonciello 	ret = kstrtouint_from_user(userbuf, count, 0, &val);
261*9c19cc1fSMario Limonciello 	if (ret)
262*9c19cc1fSMario Limonciello 		return ret;
263*9c19cc1fSMario Limonciello 
264*9c19cc1fSMario Limonciello 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
265*9c19cc1fSMario Limonciello 
266*9c19cc1fSMario Limonciello 	ret = amd_smn_write(debug_node, debug_address, val);
267*9c19cc1fSMario Limonciello 	if (ret)
268*9c19cc1fSMario Limonciello 		return ret;
269*9c19cc1fSMario Limonciello 
270*9c19cc1fSMario Limonciello 	return count;
271*9c19cc1fSMario Limonciello }
272*9c19cc1fSMario Limonciello 
273*9c19cc1fSMario Limonciello DEFINE_SHOW_STORE_ATTRIBUTE(smn_node);
274*9c19cc1fSMario Limonciello DEFINE_SHOW_STORE_ATTRIBUTE(smn_address);
275*9c19cc1fSMario Limonciello DEFINE_SHOW_STORE_ATTRIBUTE(smn_value);
276*9c19cc1fSMario Limonciello 
amd_cache_roots(void)27777466b79SYazen Ghannam static int amd_cache_roots(void)
27877466b79SYazen Ghannam {
27977466b79SYazen Ghannam 	u16 node, num_nodes = amd_num_nodes();
28077466b79SYazen Ghannam 
28177466b79SYazen Ghannam 	amd_roots = kcalloc(num_nodes, sizeof(*amd_roots), GFP_KERNEL);
28277466b79SYazen Ghannam 	if (!amd_roots)
28377466b79SYazen Ghannam 		return -ENOMEM;
28477466b79SYazen Ghannam 
28577466b79SYazen Ghannam 	for (node = 0; node < num_nodes; node++)
28677466b79SYazen Ghannam 		amd_roots[node] = amd_node_get_root(node);
28777466b79SYazen Ghannam 
28877466b79SYazen Ghannam 	return 0;
28977466b79SYazen Ghannam }
29077466b79SYazen Ghannam 
reserve_root_config_spaces(void)29183518453SMario Limonciello static int reserve_root_config_spaces(void)
29283518453SMario Limonciello {
29383518453SMario Limonciello 	struct pci_dev *root = NULL;
29483518453SMario Limonciello 	struct pci_bus *bus = NULL;
29583518453SMario Limonciello 
29683518453SMario Limonciello 	while ((bus = pci_find_next_bus(bus))) {
29783518453SMario Limonciello 		/* Root device is Device 0 Function 0 on each Primary Bus. */
29883518453SMario Limonciello 		root = pci_get_slot(bus, 0);
29983518453SMario Limonciello 		if (!root)
30083518453SMario Limonciello 			continue;
30183518453SMario Limonciello 
30283518453SMario Limonciello 		if (root->vendor != PCI_VENDOR_ID_AMD &&
30383518453SMario Limonciello 		    root->vendor != PCI_VENDOR_ID_HYGON)
30483518453SMario Limonciello 			continue;
30583518453SMario Limonciello 
30683518453SMario Limonciello 		pci_dbg(root, "Reserving PCI config space\n");
30783518453SMario Limonciello 
30883518453SMario Limonciello 		/*
30983518453SMario Limonciello 		 * There are a few SMN index/data pairs and other registers
31083518453SMario Limonciello 		 * that shouldn't be accessed by user space.
31183518453SMario Limonciello 		 * So reserve the entire PCI config space for simplicity rather
31283518453SMario Limonciello 		 * than covering specific registers piecemeal.
31383518453SMario Limonciello 		 */
31483518453SMario Limonciello 		if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
31583518453SMario Limonciello 			pci_err(root, "Failed to reserve config space\n");
31683518453SMario Limonciello 			return -EEXIST;
31783518453SMario Limonciello 		}
31883518453SMario Limonciello 	}
31983518453SMario Limonciello 
32083518453SMario Limonciello 	smn_exclusive = true;
32183518453SMario Limonciello 	return 0;
32283518453SMario Limonciello }
32383518453SMario Limonciello 
324*9c19cc1fSMario Limonciello static bool enable_dfs;
325*9c19cc1fSMario Limonciello 
amd_smn_enable_dfs(char * str)326*9c19cc1fSMario Limonciello static int __init amd_smn_enable_dfs(char *str)
327*9c19cc1fSMario Limonciello {
328*9c19cc1fSMario Limonciello 	enable_dfs = true;
329*9c19cc1fSMario Limonciello 	return 1;
330*9c19cc1fSMario Limonciello }
331*9c19cc1fSMario Limonciello __setup("amd_smn_debugfs_enable", amd_smn_enable_dfs);
332*9c19cc1fSMario Limonciello 
amd_smn_init(void)33377466b79SYazen Ghannam static int __init amd_smn_init(void)
33477466b79SYazen Ghannam {
33577466b79SYazen Ghannam 	int err;
33677466b79SYazen Ghannam 
33777466b79SYazen Ghannam 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
33877466b79SYazen Ghannam 		return 0;
33977466b79SYazen Ghannam 
34077466b79SYazen Ghannam 	guard(mutex)(&smn_mutex);
34177466b79SYazen Ghannam 
34277466b79SYazen Ghannam 	if (amd_roots)
34377466b79SYazen Ghannam 		return 0;
34477466b79SYazen Ghannam 
34577466b79SYazen Ghannam 	err = amd_cache_roots();
34677466b79SYazen Ghannam 	if (err)
34777466b79SYazen Ghannam 		return err;
34877466b79SYazen Ghannam 
34983518453SMario Limonciello 	err = reserve_root_config_spaces();
35083518453SMario Limonciello 	if (err)
35183518453SMario Limonciello 		return err;
35283518453SMario Limonciello 
353*9c19cc1fSMario Limonciello 	if (enable_dfs) {
354*9c19cc1fSMario Limonciello 		debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir);
355*9c19cc1fSMario Limonciello 
356*9c19cc1fSMario Limonciello 		debugfs_create_file("node",	0600, debugfs_dir, NULL, &smn_node_fops);
357*9c19cc1fSMario Limonciello 		debugfs_create_file("address",	0600, debugfs_dir, NULL, &smn_address_fops);
358*9c19cc1fSMario Limonciello 		debugfs_create_file("value",	0600, debugfs_dir, NULL, &smn_value_fops);
359*9c19cc1fSMario Limonciello 	}
360*9c19cc1fSMario Limonciello 
36177466b79SYazen Ghannam 	return 0;
36277466b79SYazen Ghannam }
36377466b79SYazen Ghannam 
36477466b79SYazen Ghannam fs_initcall(amd_smn_init);
365