12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2bad5fa63SBorislav Petkov /*
3bad5fa63SBorislav Petkov * Intel CPU Microcode Update Driver for Linux
4bad5fa63SBorislav Petkov *
5cea58224SAndrew Morton * Copyright (C) 2000-2006 Tigran Aivazian <[email protected]>
6bad5fa63SBorislav Petkov * 2006 Shaohua Li <[email protected]>
7bad5fa63SBorislav Petkov *
8fe055896SBorislav Petkov * Intel CPU microcode early update for Linux
9fe055896SBorislav Petkov *
10fe055896SBorislav Petkov * Copyright (C) 2012 Fenghua Yu <[email protected]>
11fe055896SBorislav Petkov * H Peter Anvin" <[email protected]>
12bad5fa63SBorislav Petkov */
136b26e1bfSBorislav Petkov #define pr_fmt(fmt) "microcode: " fmt
14fe055896SBorislav Petkov #include <linux/earlycpio.h>
15bad5fa63SBorislav Petkov #include <linux/firmware.h>
16bad5fa63SBorislav Petkov #include <linux/uaccess.h>
17fe055896SBorislav Petkov #include <linux/initrd.h>
18bad5fa63SBorislav Petkov #include <linux/kernel.h>
19fe055896SBorislav Petkov #include <linux/slab.h>
20fe055896SBorislav Petkov #include <linux/cpu.h>
217e94a7b6SJann Horn #include <linux/uio.h>
22fe055896SBorislav Petkov #include <linux/mm.h>
23bad5fa63SBorislav Petkov
24375a7564STony Luck #include <asm/cpu_device_id.h>
25bad5fa63SBorislav Petkov #include <asm/processor.h>
26fe055896SBorislav Petkov #include <asm/tlbflush.h>
27fe055896SBorislav Petkov #include <asm/setup.h>
28bad5fa63SBorislav Petkov #include <asm/msr.h>
29bad5fa63SBorislav Petkov
30d02a0efdSThomas Gleixner #include "internal.h"
31d02a0efdSThomas Gleixner
3206b8534cSBorislav Petkov static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
33f8bb45e2SBorislav Petkov
34dd5e3e3cSThomas Gleixner #define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
35dd5e3e3cSThomas Gleixner
36c26665abSBorislav Petkov /* Current microcode patch used in early patching on the APs. */
37dd5e3e3cSThomas Gleixner static struct microcode_intel *ucode_patch_va __read_mostly;
382a1dada3SThomas Gleixner static struct microcode_intel *ucode_patch_late __read_mostly;
396c545647SBorislav Petkov
407e702d17SJia Zhang /* last level cache size per core */
410177669eSThomas Gleixner static unsigned int llc_size_per_core __ro_after_init;
427e702d17SJia Zhang
434da2131fSThomas Gleixner /* microcode format is extended from prescott processors */
444da2131fSThomas Gleixner struct extended_signature {
454da2131fSThomas Gleixner unsigned int sig;
464da2131fSThomas Gleixner unsigned int pf;
474da2131fSThomas Gleixner unsigned int cksum;
484da2131fSThomas Gleixner };
494da2131fSThomas Gleixner
504da2131fSThomas Gleixner struct extended_sigtable {
514da2131fSThomas Gleixner unsigned int count;
524da2131fSThomas Gleixner unsigned int cksum;
534da2131fSThomas Gleixner unsigned int reserved[3];
544da2131fSThomas Gleixner struct extended_signature sigs[];
554da2131fSThomas Gleixner };
564da2131fSThomas Gleixner
574da2131fSThomas Gleixner #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
584da2131fSThomas Gleixner #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
594da2131fSThomas Gleixner #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
604da2131fSThomas Gleixner
get_totalsize(struct microcode_header_intel * hdr)614da2131fSThomas Gleixner static inline unsigned int get_totalsize(struct microcode_header_intel *hdr)
624da2131fSThomas Gleixner {
634da2131fSThomas Gleixner return hdr->datasize ? hdr->totalsize : DEFAULT_UCODE_TOTALSIZE;
644da2131fSThomas Gleixner }
654da2131fSThomas Gleixner
exttable_size(struct extended_sigtable * et)664da2131fSThomas Gleixner static inline unsigned int exttable_size(struct extended_sigtable *et)
674da2131fSThomas Gleixner {
684da2131fSThomas Gleixner return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;
694da2131fSThomas Gleixner }
704da2131fSThomas Gleixner
intel_collect_cpu_info(struct cpu_signature * sig)71164aa1caSThomas Gleixner void intel_collect_cpu_info(struct cpu_signature *sig)
724da2131fSThomas Gleixner {
73164aa1caSThomas Gleixner sig->sig = cpuid_eax(1);
74164aa1caSThomas Gleixner sig->pf = 0;
75164aa1caSThomas Gleixner sig->rev = intel_get_microcode_revision();
76164aa1caSThomas Gleixner
777e6b0a2eSSohil Mehta if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) {
784da2131fSThomas Gleixner unsigned int val[2];
794da2131fSThomas Gleixner
804da2131fSThomas Gleixner /* get processor flags from MSR 0x17 */
814da2131fSThomas Gleixner native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
82164aa1caSThomas Gleixner sig->pf = 1 << ((val[1] >> 18) & 7);
834da2131fSThomas Gleixner }
844da2131fSThomas Gleixner }
85164aa1caSThomas Gleixner EXPORT_SYMBOL_GPL(intel_collect_cpu_info);
864da2131fSThomas Gleixner
cpu_signatures_match(struct cpu_signature * s1,unsigned int sig2,unsigned int pf2)87b7fcd995SThomas Gleixner static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2,
88b7fcd995SThomas Gleixner unsigned int pf2)
89b7fcd995SThomas Gleixner {
90b7fcd995SThomas Gleixner if (s1->sig != sig2)
91b7fcd995SThomas Gleixner return false;
92b7fcd995SThomas Gleixner
93b7fcd995SThomas Gleixner /* Processor flags are either both 0 or they intersect. */
94b7fcd995SThomas Gleixner return ((!s1->pf && !pf2) || (s1->pf & pf2));
95b7fcd995SThomas Gleixner }
96b7fcd995SThomas Gleixner
intel_find_matching_signature(void * mc,struct cpu_signature * sig)97b7fcd995SThomas Gleixner bool intel_find_matching_signature(void *mc, struct cpu_signature *sig)
984da2131fSThomas Gleixner {
994da2131fSThomas Gleixner struct microcode_header_intel *mc_hdr = mc;
1004da2131fSThomas Gleixner struct extended_signature *ext_sig;
101b7fcd995SThomas Gleixner struct extended_sigtable *ext_hdr;
1024da2131fSThomas Gleixner int i;
1034da2131fSThomas Gleixner
104b7fcd995SThomas Gleixner if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf))
105b7fcd995SThomas Gleixner return true;
1064da2131fSThomas Gleixner
1074da2131fSThomas Gleixner /* Look for ext. headers: */
108b0e67db1SAshok Raj if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)
109b7fcd995SThomas Gleixner return false;
1104da2131fSThomas Gleixner
111b0e67db1SAshok Raj ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;
1124da2131fSThomas Gleixner ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
1134da2131fSThomas Gleixner
1144da2131fSThomas Gleixner for (i = 0; i < ext_hdr->count; i++) {
115b7fcd995SThomas Gleixner if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf))
116b7fcd995SThomas Gleixner return true;
1174da2131fSThomas Gleixner ext_sig++;
1184da2131fSThomas Gleixner }
1194da2131fSThomas Gleixner return 0;
1204da2131fSThomas Gleixner }
1214da2131fSThomas Gleixner EXPORT_SYMBOL_GPL(intel_find_matching_signature);
1224da2131fSThomas Gleixner
1234da2131fSThomas Gleixner /**
1244da2131fSThomas Gleixner * intel_microcode_sanity_check() - Sanity check microcode file.
1254da2131fSThomas Gleixner * @mc: Pointer to the microcode file contents.
1264da2131fSThomas Gleixner * @print_err: Display failure reason if true, silent if false.
1274da2131fSThomas Gleixner * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.
1284da2131fSThomas Gleixner * Validate if the microcode header type matches with the type
1294da2131fSThomas Gleixner * specified here.
1304da2131fSThomas Gleixner *
1314da2131fSThomas Gleixner * Validate certain header fields and verify if computed checksum matches
1324da2131fSThomas Gleixner * with the one specified in the header.
1334da2131fSThomas Gleixner *
1344da2131fSThomas Gleixner * Return: 0 if the file passes all the checks, -EINVAL if any of the checks
1354da2131fSThomas Gleixner * fail.
1364da2131fSThomas Gleixner */
intel_microcode_sanity_check(void * mc,bool print_err,int hdr_type)1374da2131fSThomas Gleixner int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
1384da2131fSThomas Gleixner {
1394da2131fSThomas Gleixner unsigned long total_size, data_size, ext_table_size;
1404da2131fSThomas Gleixner struct microcode_header_intel *mc_header = mc;
1414da2131fSThomas Gleixner struct extended_sigtable *ext_header = NULL;
1424da2131fSThomas Gleixner u32 sum, orig_sum, ext_sigcount = 0, i;
1434da2131fSThomas Gleixner struct extended_signature *ext_sig;
1444da2131fSThomas Gleixner
1454da2131fSThomas Gleixner total_size = get_totalsize(mc_header);
146b0e67db1SAshok Raj data_size = intel_microcode_get_datasize(mc_header);
1474da2131fSThomas Gleixner
1484da2131fSThomas Gleixner if (data_size + MC_HEADER_SIZE > total_size) {
1494da2131fSThomas Gleixner if (print_err)
1504da2131fSThomas Gleixner pr_err("Error: bad microcode data file size.\n");
1514da2131fSThomas Gleixner return -EINVAL;
1524da2131fSThomas Gleixner }
1534da2131fSThomas Gleixner
1544da2131fSThomas Gleixner if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) {
1554da2131fSThomas Gleixner if (print_err)
1564da2131fSThomas Gleixner pr_err("Error: invalid/unknown microcode update format. Header type %d\n",
1574da2131fSThomas Gleixner mc_header->hdrver);
1584da2131fSThomas Gleixner return -EINVAL;
1594da2131fSThomas Gleixner }
1604da2131fSThomas Gleixner
1614da2131fSThomas Gleixner ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
1624da2131fSThomas Gleixner if (ext_table_size) {
1634da2131fSThomas Gleixner u32 ext_table_sum = 0;
1644da2131fSThomas Gleixner u32 *ext_tablep;
1654da2131fSThomas Gleixner
1664da2131fSThomas Gleixner if (ext_table_size < EXT_HEADER_SIZE ||
1674da2131fSThomas Gleixner ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
1684da2131fSThomas Gleixner if (print_err)
1694da2131fSThomas Gleixner pr_err("Error: truncated extended signature table.\n");
1704da2131fSThomas Gleixner return -EINVAL;
1714da2131fSThomas Gleixner }
1724da2131fSThomas Gleixner
1734da2131fSThomas Gleixner ext_header = mc + MC_HEADER_SIZE + data_size;
1744da2131fSThomas Gleixner if (ext_table_size != exttable_size(ext_header)) {
1754da2131fSThomas Gleixner if (print_err)
1764da2131fSThomas Gleixner pr_err("Error: extended signature table size mismatch.\n");
1774da2131fSThomas Gleixner return -EFAULT;
1784da2131fSThomas Gleixner }
1794da2131fSThomas Gleixner
1804da2131fSThomas Gleixner ext_sigcount = ext_header->count;
1814da2131fSThomas Gleixner
1824da2131fSThomas Gleixner /*
1834da2131fSThomas Gleixner * Check extended table checksum: the sum of all dwords that
1844da2131fSThomas Gleixner * comprise a valid table must be 0.
1854da2131fSThomas Gleixner */
1864da2131fSThomas Gleixner ext_tablep = (u32 *)ext_header;
1874da2131fSThomas Gleixner
1884da2131fSThomas Gleixner i = ext_table_size / sizeof(u32);
1894da2131fSThomas Gleixner while (i--)
1904da2131fSThomas Gleixner ext_table_sum += ext_tablep[i];
1914da2131fSThomas Gleixner
1924da2131fSThomas Gleixner if (ext_table_sum) {
1934da2131fSThomas Gleixner if (print_err)
1944da2131fSThomas Gleixner pr_warn("Bad extended signature table checksum, aborting.\n");
1954da2131fSThomas Gleixner return -EINVAL;
1964da2131fSThomas Gleixner }
1974da2131fSThomas Gleixner }
1984da2131fSThomas Gleixner
1994da2131fSThomas Gleixner /*
2004da2131fSThomas Gleixner * Calculate the checksum of update data and header. The checksum of
2014da2131fSThomas Gleixner * valid update data and header including the extended signature table
2024da2131fSThomas Gleixner * must be 0.
2034da2131fSThomas Gleixner */
2044da2131fSThomas Gleixner orig_sum = 0;
2054da2131fSThomas Gleixner i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
2064da2131fSThomas Gleixner while (i--)
2074da2131fSThomas Gleixner orig_sum += ((u32 *)mc)[i];
2084da2131fSThomas Gleixner
2094da2131fSThomas Gleixner if (orig_sum) {
2104da2131fSThomas Gleixner if (print_err)
2114da2131fSThomas Gleixner pr_err("Bad microcode data checksum, aborting.\n");
2124da2131fSThomas Gleixner return -EINVAL;
2134da2131fSThomas Gleixner }
2144da2131fSThomas Gleixner
2154da2131fSThomas Gleixner if (!ext_table_size)
2164da2131fSThomas Gleixner return 0;
2174da2131fSThomas Gleixner
2184da2131fSThomas Gleixner /*
2194da2131fSThomas Gleixner * Check extended signature checksum: 0 => valid.
2204da2131fSThomas Gleixner */
2214da2131fSThomas Gleixner for (i = 0; i < ext_sigcount; i++) {
2224da2131fSThomas Gleixner ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
2234da2131fSThomas Gleixner EXT_SIGNATURE_SIZE * i;
2244da2131fSThomas Gleixner
2254da2131fSThomas Gleixner sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
2264da2131fSThomas Gleixner (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
2274da2131fSThomas Gleixner if (sum) {
2284da2131fSThomas Gleixner if (print_err)
2294da2131fSThomas Gleixner pr_err("Bad extended signature checksum, aborting.\n");
2304da2131fSThomas Gleixner return -EINVAL;
2314da2131fSThomas Gleixner }
2324da2131fSThomas Gleixner }
2334da2131fSThomas Gleixner return 0;
2344da2131fSThomas Gleixner }
2354da2131fSThomas Gleixner EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
2364da2131fSThomas Gleixner
update_ucode_pointer(struct microcode_intel * mc)237dd5e3e3cSThomas Gleixner static void update_ucode_pointer(struct microcode_intel *mc)
238fe055896SBorislav Petkov {
239f24f2044SThomas Gleixner kvfree(ucode_patch_va);
240fe055896SBorislav Petkov
241dd5e3e3cSThomas Gleixner /*
242dd5e3e3cSThomas Gleixner * Save the virtual address for early loading and for eventual free
243dd5e3e3cSThomas Gleixner * on late loading.
244dd5e3e3cSThomas Gleixner */
245dd5e3e3cSThomas Gleixner ucode_patch_va = mc;
246bd207330SBorislav Petkov }
247fe055896SBorislav Petkov
save_microcode_patch(struct microcode_intel * patch)248dd5e3e3cSThomas Gleixner static void save_microcode_patch(struct microcode_intel *patch)
249dd5e3e3cSThomas Gleixner {
250f24f2044SThomas Gleixner unsigned int size = get_totalsize(&patch->hdr);
251dd5e3e3cSThomas Gleixner struct microcode_intel *mc;
252dd5e3e3cSThomas Gleixner
253f24f2044SThomas Gleixner mc = kvmemdup(patch, size, GFP_KERNEL);
254dd5e3e3cSThomas Gleixner if (mc)
255dd5e3e3cSThomas Gleixner update_ucode_pointer(mc);
256f24f2044SThomas Gleixner else
257f24f2044SThomas Gleixner pr_err("Unable to allocate microcode memory size: %u\n", size);
258dd5e3e3cSThomas Gleixner }
259dd5e3e3cSThomas Gleixner
260dd5e3e3cSThomas Gleixner /* Scan blob for microcode matching the boot CPUs family, model, stepping */
scan_microcode(void * data,size_t size,struct ucode_cpu_info * uci,bool save)261dd5e3e3cSThomas Gleixner static __init struct microcode_intel *scan_microcode(void *data, size_t size,
262dd5e3e3cSThomas Gleixner struct ucode_cpu_info *uci,
263dd5e3e3cSThomas Gleixner bool save)
264fe055896SBorislav Petkov {
265f96fde53SBorislav Petkov struct microcode_header_intel *mc_header;
26606b8534cSBorislav Petkov struct microcode_intel *patch = NULL;
267ae76d951SAshok Raj u32 cur_rev = uci->cpu_sig.rev;
268f96fde53SBorislav Petkov unsigned int mc_size;
269fe055896SBorislav Petkov
270b0f0bf5eSThomas Gleixner for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) {
27106b8534cSBorislav Petkov mc_header = (struct microcode_header_intel *)data;
272fe055896SBorislav Petkov
273fe055896SBorislav Petkov mc_size = get_totalsize(mc_header);
274ae76d951SAshok Raj if (!mc_size || mc_size > size ||
275e0788c32SJithu Joseph intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)
276fe055896SBorislav Petkov break;
277fe055896SBorislav Petkov
278b7fcd995SThomas Gleixner if (!intel_find_matching_signature(data, &uci->cpu_sig))
279fe055896SBorislav Petkov continue;
280fe055896SBorislav Petkov
281dd5e3e3cSThomas Gleixner /*
282dd5e3e3cSThomas Gleixner * For saving the early microcode, find the matching revision which
283dd5e3e3cSThomas Gleixner * was loaded on the BSP.
284dd5e3e3cSThomas Gleixner *
285dd5e3e3cSThomas Gleixner * On the BSP during early boot, find a newer revision than
286dd5e3e3cSThomas Gleixner * actually loaded in the CPU.
287dd5e3e3cSThomas Gleixner */
288dd5e3e3cSThomas Gleixner if (save) {
289dd5e3e3cSThomas Gleixner if (cur_rev != mc_header->rev)
290b0f0bf5eSThomas Gleixner continue;
291dd5e3e3cSThomas Gleixner } else if (cur_rev >= mc_header->rev) {
292b0f0bf5eSThomas Gleixner continue;
293dd5e3e3cSThomas Gleixner }
294fe055896SBorislav Petkov
29506b8534cSBorislav Petkov patch = data;
296ae76d951SAshok Raj cur_rev = mc_header->rev;
297fe055896SBorislav Petkov }
298fe055896SBorislav Petkov
299dd5e3e3cSThomas Gleixner return size ? NULL : patch;
300fe055896SBorislav Petkov }
301fe055896SBorislav Petkov
__apply_microcode(struct ucode_cpu_info * uci,struct microcode_intel * mc,u32 * cur_rev)3023973718cSThomas Gleixner static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
3033973718cSThomas Gleixner struct microcode_intel *mc,
3043973718cSThomas Gleixner u32 *cur_rev)
305fe055896SBorislav Petkov {
3063973718cSThomas Gleixner u32 rev;
307fe055896SBorislav Petkov
308de778275SBorislav Petkov if (!mc)
309dd5e3e3cSThomas Gleixner return UCODE_NFOUND;
310fe055896SBorislav Petkov
311c182d2b7SAshok Raj /*
312c182d2b7SAshok Raj * Save us the MSR write below - which is a particular expensive
313c182d2b7SAshok Raj * operation - when the other hyperthread has updated the microcode
314c182d2b7SAshok Raj * already.
315c182d2b7SAshok Raj */
3163973718cSThomas Gleixner *cur_rev = intel_get_microcode_revision();
3173973718cSThomas Gleixner if (*cur_rev >= mc->hdr.rev) {
3183973718cSThomas Gleixner uci->cpu_sig.rev = *cur_rev;
319c182d2b7SAshok Raj return UCODE_OK;
320c182d2b7SAshok Raj }
321c182d2b7SAshok Raj
322fe055896SBorislav Petkov /* write microcode via MSR 0x79 */
323c416e611SBorislav Petkov native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
324fe055896SBorislav Petkov
3254167709bSBorislav Petkov rev = intel_get_microcode_revision();
3264167709bSBorislav Petkov if (rev != mc->hdr.rev)
327dd5e3e3cSThomas Gleixner return UCODE_ERROR;
328fe055896SBorislav Petkov
3294167709bSBorislav Petkov uci->cpu_sig.rev = rev;
3303973718cSThomas Gleixner return UCODE_UPDATED;
3313973718cSThomas Gleixner }
332fe055896SBorislav Petkov
apply_microcode_early(struct ucode_cpu_info * uci)3333973718cSThomas Gleixner static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
3343973718cSThomas Gleixner {
3353973718cSThomas Gleixner struct microcode_intel *mc = uci->mc;
336080990aaSBorislav Petkov (AMD) u32 cur_rev;
3373973718cSThomas Gleixner
338080990aaSBorislav Petkov (AMD) return __apply_microcode(uci, mc, &cur_rev);
339fe055896SBorislav Petkov }
340fe055896SBorislav Petkov
load_builtin_intel_microcode(struct cpio_data * cp)341dd5e3e3cSThomas Gleixner static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
3420177669eSThomas Gleixner {
3430177669eSThomas Gleixner unsigned int eax = 1, ebx, ecx = 0, edx;
3440177669eSThomas Gleixner struct firmware fw;
3450177669eSThomas Gleixner char name[30];
3460177669eSThomas Gleixner
3470177669eSThomas Gleixner if (IS_ENABLED(CONFIG_X86_32))
3480177669eSThomas Gleixner return false;
3490177669eSThomas Gleixner
3500177669eSThomas Gleixner native_cpuid(&eax, &ebx, &ecx, &edx);
3510177669eSThomas Gleixner
3520177669eSThomas Gleixner sprintf(name, "intel-ucode/%02x-%02x-%02x",
3530177669eSThomas Gleixner x86_family(eax), x86_model(eax), x86_stepping(eax));
3540177669eSThomas Gleixner
3550177669eSThomas Gleixner if (firmware_request_builtin(&fw, name)) {
3560177669eSThomas Gleixner cp->size = fw.size;
3570177669eSThomas Gleixner cp->data = (void *)fw.data;
3580177669eSThomas Gleixner return true;
3590177669eSThomas Gleixner }
3600177669eSThomas Gleixner return false;
3610177669eSThomas Gleixner }
3620177669eSThomas Gleixner
get_microcode_blob(struct ucode_cpu_info * uci,bool save)363dd5e3e3cSThomas Gleixner static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save)
36406b8534cSBorislav Petkov {
36506b8534cSBorislav Petkov struct cpio_data cp;
36606b8534cSBorislav Petkov
3679c21ea53SBorislav Petkov (AMD) intel_collect_cpu_info(&uci->cpu_sig);
3689c21ea53SBorislav Petkov (AMD)
36906b8534cSBorislav Petkov if (!load_builtin_intel_microcode(&cp))
3700b62f6cbSThomas Gleixner cp = find_microcode_in_initrd(ucode_path);
37106b8534cSBorislav Petkov
37206b8534cSBorislav Petkov if (!(cp.data && cp.size))
37306b8534cSBorislav Petkov return NULL;
37406b8534cSBorislav Petkov
375dd5e3e3cSThomas Gleixner return scan_microcode(cp.data, cp.size, uci, save);
376fe055896SBorislav Petkov }
377fe055896SBorislav Petkov
378dd5e3e3cSThomas Gleixner /*
379dd5e3e3cSThomas Gleixner * Invoked from an early init call to save the microcode blob which was
380dd5e3e3cSThomas Gleixner * selected during early boot when mm was not usable. The microcode must be
381dd5e3e3cSThomas Gleixner * saved because initrd is going away. It's an early init call so the APs
382dd5e3e3cSThomas Gleixner * just can use the pointer and do not have to scan initrd/builtin firmware
383dd5e3e3cSThomas Gleixner * again.
384dd5e3e3cSThomas Gleixner */
save_builtin_microcode(void)385dd5e3e3cSThomas Gleixner static int __init save_builtin_microcode(void)
386fe055896SBorislav Petkov {
387bd6fe58dSBorislav Petkov struct ucode_cpu_info uci;
388fe055896SBorislav Petkov
389dd5e3e3cSThomas Gleixner if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
390dd5e3e3cSThomas Gleixner return 0;
391fe055896SBorislav Petkov
392*5214a9f6SBorislav Petkov (AMD) if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
393dd5e3e3cSThomas Gleixner return 0;
394fe055896SBorislav Petkov
395dd5e3e3cSThomas Gleixner uci.mc = get_microcode_blob(&uci, true);
396dd5e3e3cSThomas Gleixner if (uci.mc)
397dd5e3e3cSThomas Gleixner save_microcode_patch(uci.mc);
398dd5e3e3cSThomas Gleixner return 0;
399dd5e3e3cSThomas Gleixner }
400dd5e3e3cSThomas Gleixner early_initcall(save_builtin_microcode);
401dd5e3e3cSThomas Gleixner
402dd5e3e3cSThomas Gleixner /* Load microcode on BSP from initrd or builtin blobs */
load_ucode_intel_bsp(struct early_load_data * ed)403080990aaSBorislav Petkov (AMD) void __init load_ucode_intel_bsp(struct early_load_data *ed)
404dd5e3e3cSThomas Gleixner {
405dd5e3e3cSThomas Gleixner struct ucode_cpu_info uci;
406dd5e3e3cSThomas Gleixner
407dd5e3e3cSThomas Gleixner uci.mc = get_microcode_blob(&uci, false);
4089c21ea53SBorislav Petkov (AMD) ed->old_rev = uci.cpu_sig.rev;
409080990aaSBorislav Petkov (AMD)
4109c21ea53SBorislav Petkov (AMD) if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) {
4119c21ea53SBorislav Petkov (AMD) ucode_patch_va = UCODE_BSP_LOADED;
412080990aaSBorislav Petkov (AMD) ed->new_rev = uci.cpu_sig.rev;
413fe055896SBorislav Petkov }
4149c21ea53SBorislav Petkov (AMD) }
415fe055896SBorislav Petkov
load_ucode_intel_ap(void)41606b8534cSBorislav Petkov void load_ucode_intel_ap(void)
41706b8534cSBorislav Petkov {
41806b8534cSBorislav Petkov struct ucode_cpu_info uci;
41906b8534cSBorislav Petkov
420dd5e3e3cSThomas Gleixner uci.mc = ucode_patch_va;
421dd5e3e3cSThomas Gleixner if (uci.mc)
4220b62f6cbSThomas Gleixner apply_microcode_early(&uci);
42306b8534cSBorislav Petkov }
42406b8534cSBorislav Petkov
425dd5e3e3cSThomas Gleixner /* Reload microcode on resume */
reload_ucode_intel(void)426fe055896SBorislav Petkov void reload_ucode_intel(void)
427fe055896SBorislav Petkov {
428dd5e3e3cSThomas Gleixner struct ucode_cpu_info uci = { .mc = ucode_patch_va, };
429fe055896SBorislav Petkov
430dd5e3e3cSThomas Gleixner if (uci.mc)
4310b62f6cbSThomas Gleixner apply_microcode_early(&uci);
432fe055896SBorislav Petkov }
433fe055896SBorislav Petkov
collect_cpu_info(int cpu_num,struct cpu_signature * csig)434bad5fa63SBorislav Petkov static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
435bad5fa63SBorislav Petkov {
43611f96ac4SThomas Gleixner intel_collect_cpu_info(csig);
437bad5fa63SBorislav Petkov return 0;
438bad5fa63SBorislav Petkov }
439bad5fa63SBorislav Petkov
apply_microcode_late(int cpu)4403973718cSThomas Gleixner static enum ucode_state apply_microcode_late(int cpu)
441bad5fa63SBorislav Petkov {
442d8c3b52cSBorislav Petkov struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
4433973718cSThomas Gleixner struct microcode_intel *mc = ucode_patch_late;
4448da38ebaSFilippo Sironi enum ucode_state ret;
4453973718cSThomas Gleixner u32 cur_rev;
446bad5fa63SBorislav Petkov
4473973718cSThomas Gleixner if (WARN_ON_ONCE(smp_processor_id() != cpu))
4483f1f576aSBorislav Petkov return UCODE_ERROR;
44958b5f2ccSBorislav Petkov
4503973718cSThomas Gleixner ret = __apply_microcode(uci, mc, &cur_rev);
4513973718cSThomas Gleixner if (ret != UCODE_UPDATED && ret != UCODE_OK)
4523973718cSThomas Gleixner return ret;
453bad5fa63SBorislav Petkov
4543973718cSThomas Gleixner cpu_data(cpu).microcode = uci->cpu_sig.rev;
4553973718cSThomas Gleixner if (!cpu)
4563973718cSThomas Gleixner boot_cpu_data.microcode = uci->cpu_sig.rev;
457370a132bSPrarit Bhargava
4588da38ebaSFilippo Sironi return ret;
459bad5fa63SBorislav Petkov }
460bad5fa63SBorislav Petkov
ucode_validate_minrev(struct microcode_header_intel * mc_header)461cf5ab01cSAshok Raj static bool ucode_validate_minrev(struct microcode_header_intel *mc_header)
462cf5ab01cSAshok Raj {
463cf5ab01cSAshok Raj int cur_rev = boot_cpu_data.microcode;
464cf5ab01cSAshok Raj
465cf5ab01cSAshok Raj /*
466cf5ab01cSAshok Raj * When late-loading, ensure the header declares a minimum revision
467cf5ab01cSAshok Raj * required to perform a late-load. The previously reserved field
468cf5ab01cSAshok Raj * is 0 in older microcode blobs.
469cf5ab01cSAshok Raj */
470cf5ab01cSAshok Raj if (!mc_header->min_req_ver) {
471cf5ab01cSAshok Raj pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n");
472cf5ab01cSAshok Raj return false;
473cf5ab01cSAshok Raj }
474cf5ab01cSAshok Raj
475cf5ab01cSAshok Raj /*
476cf5ab01cSAshok Raj * Check whether the current revision is either greater or equal to
477cf5ab01cSAshok Raj * to the minimum revision specified in the header.
478cf5ab01cSAshok Raj */
479cf5ab01cSAshok Raj if (cur_rev < mc_header->min_req_ver) {
480cf5ab01cSAshok Raj pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev);
481cf5ab01cSAshok Raj pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver);
482cf5ab01cSAshok Raj return false;
483cf5ab01cSAshok Raj }
484cf5ab01cSAshok Raj return true;
485cf5ab01cSAshok Raj }
486cf5ab01cSAshok Raj
parse_microcode_blobs(int cpu,struct iov_iter * iter)4876b072022SThomas Gleixner static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
488bad5fa63SBorislav Petkov {
489bad5fa63SBorislav Petkov struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
490cf5ab01cSAshok Raj bool is_safe, new_is_safe = false;
4916b072022SThomas Gleixner int cur_rev = uci->cpu_sig.rev;
492dd5e3e3cSThomas Gleixner unsigned int curr_mc_size = 0;
4937e94a7b6SJann Horn u8 *new_mc = NULL, *mc = NULL;
494bad5fa63SBorislav Petkov
4957e94a7b6SJann Horn while (iov_iter_count(iter)) {
496bad5fa63SBorislav Petkov struct microcode_header_intel mc_header;
4977e94a7b6SJann Horn unsigned int mc_size, data_size;
4987e94a7b6SJann Horn u8 *data;
499bad5fa63SBorislav Petkov
5007e94a7b6SJann Horn if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
5017e94a7b6SJann Horn pr_err("error! Truncated or inaccessible header in microcode data file\n");
502f24f2044SThomas Gleixner goto fail;
50335a9ff4eSQuentin Casasnovas }
50435a9ff4eSQuentin Casasnovas
505bad5fa63SBorislav Petkov mc_size = get_totalsize(&mc_header);
5067e94a7b6SJann Horn if (mc_size < sizeof(mc_header)) {
5077e94a7b6SJann Horn pr_err("error! Bad data in microcode data file (totalsize too small)\n");
508f24f2044SThomas Gleixner goto fail;
5097e94a7b6SJann Horn }
5107e94a7b6SJann Horn data_size = mc_size - sizeof(mc_header);
5117e94a7b6SJann Horn if (data_size > iov_iter_count(iter)) {
5127e94a7b6SJann Horn pr_err("error! Bad data in microcode data file (truncated file?)\n");
513f24f2044SThomas Gleixner goto fail;
514bad5fa63SBorislav Petkov }
515bad5fa63SBorislav Petkov
516bad5fa63SBorislav Petkov /* For performance reasons, reuse mc area when possible */
517bad5fa63SBorislav Petkov if (!mc || mc_size > curr_mc_size) {
518f24f2044SThomas Gleixner kvfree(mc);
519f24f2044SThomas Gleixner mc = kvmalloc(mc_size, GFP_KERNEL);
520bad5fa63SBorislav Petkov if (!mc)
521f24f2044SThomas Gleixner goto fail;
522bad5fa63SBorislav Petkov curr_mc_size = mc_size;
523bad5fa63SBorislav Petkov }
524bad5fa63SBorislav Petkov
5257e94a7b6SJann Horn memcpy(mc, &mc_header, sizeof(mc_header));
5267e94a7b6SJann Horn data = mc + sizeof(mc_header);
5277e94a7b6SJann Horn if (!copy_from_iter_full(data, data_size, iter) ||
528f24f2044SThomas Gleixner intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
529f24f2044SThomas Gleixner goto fail;
530bad5fa63SBorislav Petkov
5316b072022SThomas Gleixner if (cur_rev >= mc_header.rev)
5326b072022SThomas Gleixner continue;
5336b072022SThomas Gleixner
534b7fcd995SThomas Gleixner if (!intel_find_matching_signature(mc, &uci->cpu_sig))
5356b072022SThomas Gleixner continue;
5366b072022SThomas Gleixner
537cf5ab01cSAshok Raj is_safe = ucode_validate_minrev(&mc_header);
538cf5ab01cSAshok Raj if (force_minrev && !is_safe)
539cf5ab01cSAshok Raj continue;
540cf5ab01cSAshok Raj
541f24f2044SThomas Gleixner kvfree(new_mc);
5426b072022SThomas Gleixner cur_rev = mc_header.rev;
543bad5fa63SBorislav Petkov new_mc = mc;
544cf5ab01cSAshok Raj new_is_safe = is_safe;
5456b072022SThomas Gleixner mc = NULL;
546bad5fa63SBorislav Petkov }
547bad5fa63SBorislav Petkov
548f24f2044SThomas Gleixner if (iov_iter_count(iter))
549f24f2044SThomas Gleixner goto fail;
550bad5fa63SBorislav Petkov
551f24f2044SThomas Gleixner kvfree(mc);
552f61337d9SBorislav Petkov if (!new_mc)
553f61337d9SBorislav Petkov return UCODE_NFOUND;
554bad5fa63SBorislav Petkov
5552a1dada3SThomas Gleixner ucode_patch_late = (struct microcode_intel *)new_mc;
556cf5ab01cSAshok Raj return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW;
557f24f2044SThomas Gleixner
558f24f2044SThomas Gleixner fail:
559f24f2044SThomas Gleixner kvfree(mc);
560f24f2044SThomas Gleixner kvfree(new_mc);
561f24f2044SThomas Gleixner return UCODE_ERROR;
562bad5fa63SBorislav Petkov }
563bad5fa63SBorislav Petkov
is_blacklisted(unsigned int cpu)564723f2828SBorislav Petkov static bool is_blacklisted(unsigned int cpu)
565723f2828SBorislav Petkov {
566723f2828SBorislav Petkov struct cpuinfo_x86 *c = &cpu_data(cpu);
567723f2828SBorislav Petkov
568b94b7373SJia Zhang /*
569b94b7373SJia Zhang * Late loading on model 79 with microcode revision less than 0x0b000021
5707e702d17SJia Zhang * and LLC size per core bigger than 2.5MB may result in a system hang.
5719a819753SChang S. Bae * This behavior is documented in item BDX90, #334165 (Intel Xeon
5727e702d17SJia Zhang * Processor E7-8800/4800 v4 Product Family).
573b94b7373SJia Zhang */
574375a7564STony Luck if (c->x86_vfm == INTEL_BROADWELL_X &&
575b399151cSJia Zhang c->x86_stepping == 0x01 &&
5767e702d17SJia Zhang llc_size_per_core > 2621440 &&
577b94b7373SJia Zhang c->microcode < 0x0b000021) {
5789a819753SChang S. Bae pr_err_once("Erratum BDX90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
579b94b7373SJia Zhang pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
580723f2828SBorislav Petkov return true;
581723f2828SBorislav Petkov }
582723f2828SBorislav Petkov
583723f2828SBorislav Petkov return false;
584723f2828SBorislav Petkov }
585723f2828SBorislav Petkov
request_microcode_fw(int cpu,struct device * device)586a61ac80aSBorislav Petkov static enum ucode_state request_microcode_fw(int cpu, struct device *device)
587bad5fa63SBorislav Petkov {
588bad5fa63SBorislav Petkov struct cpuinfo_x86 *c = &cpu_data(cpu);
589bad5fa63SBorislav Petkov const struct firmware *firmware;
5907e94a7b6SJann Horn struct iov_iter iter;
591bad5fa63SBorislav Petkov enum ucode_state ret;
5927e94a7b6SJann Horn struct kvec kvec;
5937e94a7b6SJann Horn char name[30];
594bad5fa63SBorislav Petkov
595723f2828SBorislav Petkov if (is_blacklisted(cpu))
596723f2828SBorislav Petkov return UCODE_NFOUND;
597723f2828SBorislav Petkov
598bad5fa63SBorislav Petkov sprintf(name, "intel-ucode/%02x-%02x-%02x",
599b399151cSJia Zhang c->x86, c->x86_model, c->x86_stepping);
600bad5fa63SBorislav Petkov
601d3bad75aSLinus Torvalds if (request_firmware_direct(&firmware, name, device)) {
602bad5fa63SBorislav Petkov pr_debug("data file %s load failed\n", name);
603bad5fa63SBorislav Petkov return UCODE_NFOUND;
604bad5fa63SBorislav Petkov }
605bad5fa63SBorislav Petkov
6067e94a7b6SJann Horn kvec.iov_base = (void *)firmware->data;
6077e94a7b6SJann Horn kvec.iov_len = firmware->size;
608de4eda9dSAl Viro iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
6096b072022SThomas Gleixner ret = parse_microcode_blobs(cpu, &iter);
610bad5fa63SBorislav Petkov
611bad5fa63SBorislav Petkov release_firmware(firmware);
612bad5fa63SBorislav Petkov
613bad5fa63SBorislav Petkov return ret;
614bad5fa63SBorislav Petkov }
615bad5fa63SBorislav Petkov
finalize_late_load(int result)6162a1dada3SThomas Gleixner static void finalize_late_load(int result)
6172a1dada3SThomas Gleixner {
6182a1dada3SThomas Gleixner if (!result)
619f24f2044SThomas Gleixner update_ucode_pointer(ucode_patch_late);
620f24f2044SThomas Gleixner else
621f24f2044SThomas Gleixner kvfree(ucode_patch_late);
6222a1dada3SThomas Gleixner ucode_patch_late = NULL;
6232a1dada3SThomas Gleixner }
6242a1dada3SThomas Gleixner
625bad5fa63SBorislav Petkov static struct microcode_ops microcode_intel_ops = {
626bad5fa63SBorislav Petkov .request_microcode_fw = request_microcode_fw,
627bad5fa63SBorislav Petkov .collect_cpu_info = collect_cpu_info,
6283973718cSThomas Gleixner .apply_microcode = apply_microcode_late,
6292a1dada3SThomas Gleixner .finalize_late_load = finalize_late_load,
6307eb314a2SThomas Gleixner .use_nmi = IS_ENABLED(CONFIG_X86_64),
631bad5fa63SBorislav Petkov };
632bad5fa63SBorislav Petkov
calc_llc_size_per_core(struct cpuinfo_x86 * c)6330177669eSThomas Gleixner static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
6347e702d17SJia Zhang {
63524dbc600SGustavo A. R. Silva u64 llc_size = c->x86_cache_size * 1024ULL;
6367e702d17SJia Zhang
63789b0f15fSThomas Gleixner do_div(llc_size, topology_num_cores_per_package());
6380177669eSThomas Gleixner llc_size_per_core = (unsigned int)llc_size;
6397e702d17SJia Zhang }
6407e702d17SJia Zhang
init_intel_microcode(void)641bad5fa63SBorislav Petkov struct microcode_ops * __init init_intel_microcode(void)
642bad5fa63SBorislav Petkov {
6439a2bc335SBorislav Petkov struct cpuinfo_x86 *c = &boot_cpu_data;
644bad5fa63SBorislav Petkov
645bad5fa63SBorislav Petkov if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
646bad5fa63SBorislav Petkov cpu_has(c, X86_FEATURE_IA64)) {
647bad5fa63SBorislav Petkov pr_err("Intel CPU family 0x%x not supported\n", c->x86);
648bad5fa63SBorislav Petkov return NULL;
649bad5fa63SBorislav Petkov }
650bad5fa63SBorislav Petkov
6510177669eSThomas Gleixner calc_llc_size_per_core(c);
6527e702d17SJia Zhang
653bad5fa63SBorislav Petkov return µcode_intel_ops;
654bad5fa63SBorislav Petkov }
655