1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * CPU Microcode Update Driver for Linux 4 * 5 * Copyright (C) 2000-2006 Tigran Aivazian <[email protected]> 6 * 2006 Shaohua Li <[email protected]> 7 * 2013-2016 Borislav Petkov <[email protected]> 8 * 9 * X86 CPU microcode early update for Linux: 10 * 11 * Copyright (C) 2012 Fenghua Yu <[email protected]> 12 * H Peter Anvin" <[email protected]> 13 * (C) 2015 Borislav Petkov <[email protected]> 14 * 15 * This driver allows to upgrade microcode on x86 processors. 16 */ 17 18 #define pr_fmt(fmt) "microcode: " fmt 19 20 #include <linux/platform_device.h> 21 #include <linux/stop_machine.h> 22 #include <linux/syscore_ops.h> 23 #include <linux/miscdevice.h> 24 #include <linux/capability.h> 25 #include <linux/firmware.h> 26 #include <linux/kernel.h> 27 #include <linux/delay.h> 28 #include <linux/mutex.h> 29 #include <linux/cpu.h> 30 #include <linux/nmi.h> 31 #include <linux/fs.h> 32 #include <linux/mm.h> 33 34 #include <asm/cpu_device_id.h> 35 #include <asm/perf_event.h> 36 #include <asm/processor.h> 37 #include <asm/cmdline.h> 38 #include <asm/setup.h> 39 40 #include "internal.h" 41 42 #define DRIVER_VERSION "2.2" 43 44 static struct microcode_ops *microcode_ops; 45 bool dis_ucode_ldr = true; 46 47 /* 48 * Synchronization. 49 * 50 * All non cpu-hotplug-callback call sites use: 51 * 52 * - cpus_read_lock/unlock() to synchronize with 53 * the cpu-hotplug-callback call sites. 54 * 55 * We guarantee that only a single cpu is being 56 * updated at any particular moment of time. 57 */ 58 struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 59 60 struct cpu_info_ctx { 61 struct cpu_signature *cpu_sig; 62 int err; 63 }; 64 65 /* 66 * Those patch levels cannot be updated to newer ones and thus should be final. 67 */ 68 static u32 final_levels[] = { 69 0x01000098, 70 0x0100009f, 71 0x010000af, 72 0, /* T-101 terminator */ 73 }; 74 75 /* 76 * Check the current patch level on this CPU. 77 * 78 * Returns: 79 * - true: if update should stop 80 * - false: otherwise 81 */ 82 static bool amd_check_current_patch_level(void) 83 { 84 u32 lvl, dummy, i; 85 u32 *levels; 86 87 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); 88 89 levels = final_levels; 90 91 for (i = 0; levels[i]; i++) { 92 if (lvl == levels[i]) 93 return true; 94 } 95 return false; 96 } 97 98 static bool __init check_loader_disabled_bsp(void) 99 { 100 static const char *__dis_opt_str = "dis_ucode_ldr"; 101 const char *cmdline = boot_command_line; 102 const char *option = __dis_opt_str; 103 104 /* 105 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 106 * completely accurate as xen pv guests don't see that CPUID bit set but 107 * that's good enough as they don't land on the BSP path anyway. 108 */ 109 if (native_cpuid_ecx(1) & BIT(31)) 110 return true; 111 112 if (x86_cpuid_vendor() == X86_VENDOR_AMD) { 113 if (amd_check_current_patch_level()) 114 return true; 115 } 116 117 if (cmdline_find_option_bool(cmdline, option) <= 0) 118 dis_ucode_ldr = false; 119 120 return dis_ucode_ldr; 121 } 122 123 void __init load_ucode_bsp(void) 124 { 125 unsigned int cpuid_1_eax; 126 bool intel = true; 127 128 if (!have_cpuid_p()) 129 return; 130 131 cpuid_1_eax = native_cpuid_eax(1); 132 133 switch (x86_cpuid_vendor()) { 134 case X86_VENDOR_INTEL: 135 if (x86_family(cpuid_1_eax) < 6) 136 return; 137 break; 138 139 case X86_VENDOR_AMD: 140 if (x86_family(cpuid_1_eax) < 0x10) 141 return; 142 intel = false; 143 break; 144 145 default: 146 return; 147 } 148 149 if (check_loader_disabled_bsp()) 150 return; 151 152 if (intel) 153 load_ucode_intel_bsp(); 154 else 155 load_ucode_amd_bsp(cpuid_1_eax); 156 } 157 158 void load_ucode_ap(void) 159 { 160 unsigned int cpuid_1_eax; 161 162 if (dis_ucode_ldr) 163 return; 164 165 cpuid_1_eax = native_cpuid_eax(1); 166 167 switch (x86_cpuid_vendor()) { 168 case X86_VENDOR_INTEL: 169 if (x86_family(cpuid_1_eax) >= 6) 170 load_ucode_intel_ap(); 171 break; 172 case X86_VENDOR_AMD: 173 if (x86_family(cpuid_1_eax) >= 0x10) 174 load_ucode_amd_ap(cpuid_1_eax); 175 break; 176 default: 177 break; 178 } 179 } 180 181 struct cpio_data __init find_microcode_in_initrd(const char *path) 182 { 183 #ifdef CONFIG_BLK_DEV_INITRD 184 unsigned long start = 0; 185 size_t size; 186 187 #ifdef CONFIG_X86_32 188 size = boot_params.hdr.ramdisk_size; 189 /* Early load on BSP has a temporary mapping. */ 190 if (size) 191 start = initrd_start_early; 192 193 #else /* CONFIG_X86_64 */ 194 size = (unsigned long)boot_params.ext_ramdisk_size << 32; 195 size |= boot_params.hdr.ramdisk_size; 196 197 if (size) { 198 start = (unsigned long)boot_params.ext_ramdisk_image << 32; 199 start |= boot_params.hdr.ramdisk_image; 200 start += PAGE_OFFSET; 201 } 202 #endif 203 204 /* 205 * Fixup the start address: after reserve_initrd() runs, initrd_start 206 * has the virtual address of the beginning of the initrd. It also 207 * possibly relocates the ramdisk. In either case, initrd_start contains 208 * the updated address so use that instead. 209 */ 210 if (initrd_start) 211 start = initrd_start; 212 213 return find_cpio_data(path, (void *)start, size, NULL); 214 #else /* !CONFIG_BLK_DEV_INITRD */ 215 return (struct cpio_data){ NULL, 0, "" }; 216 #endif 217 } 218 219 static void reload_early_microcode(unsigned int cpu) 220 { 221 int vendor, family; 222 223 vendor = x86_cpuid_vendor(); 224 family = x86_cpuid_family(); 225 226 switch (vendor) { 227 case X86_VENDOR_INTEL: 228 if (family >= 6) 229 reload_ucode_intel(); 230 break; 231 case X86_VENDOR_AMD: 232 if (family >= 0x10) 233 reload_ucode_amd(cpu); 234 break; 235 default: 236 break; 237 } 238 } 239 240 /* fake device for request_firmware */ 241 static struct platform_device *microcode_pdev; 242 243 #ifdef CONFIG_MICROCODE_LATE_LOADING 244 /* 245 * Late loading dance. Why the heavy-handed stomp_machine effort? 246 * 247 * - HT siblings must be idle and not execute other code while the other sibling 248 * is loading microcode in order to avoid any negative interactions caused by 249 * the loading. 250 * 251 * - In addition, microcode update on the cores must be serialized until this 252 * requirement can be relaxed in the future. Right now, this is conservative 253 * and good. 254 */ 255 #define SPINUNIT 100 /* 100 nsec */ 256 257 258 static atomic_t late_cpus_in; 259 static atomic_t late_cpus_out; 260 261 static int __wait_for_cpus(atomic_t *t, long long timeout) 262 { 263 int all_cpus = num_online_cpus(); 264 265 atomic_inc(t); 266 267 while (atomic_read(t) < all_cpus) { 268 if (timeout < SPINUNIT) { 269 pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", 270 all_cpus - atomic_read(t)); 271 return 1; 272 } 273 274 ndelay(SPINUNIT); 275 timeout -= SPINUNIT; 276 277 touch_nmi_watchdog(); 278 } 279 return 0; 280 } 281 282 /* 283 * Returns: 284 * < 0 - on error 285 * 0 - success (no update done or microcode was updated) 286 */ 287 static int __reload_late(void *info) 288 { 289 int cpu = smp_processor_id(); 290 enum ucode_state err; 291 int ret = 0; 292 293 /* 294 * Wait for all CPUs to arrive. A load will not be attempted unless all 295 * CPUs show up. 296 * */ 297 if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) 298 return -1; 299 300 /* 301 * On an SMT system, it suffices to load the microcode on one sibling of 302 * the core because the microcode engine is shared between the threads. 303 * Synchronization still needs to take place so that no concurrent 304 * loading attempts happen on multiple threads of an SMT core. See 305 * below. 306 */ 307 if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) 308 err = microcode_ops->apply_microcode(cpu); 309 else 310 goto wait_for_siblings; 311 312 if (err >= UCODE_NFOUND) { 313 if (err == UCODE_ERROR) { 314 pr_warn("Error reloading microcode on CPU %d\n", cpu); 315 ret = -1; 316 } 317 } 318 319 wait_for_siblings: 320 if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) 321 panic("Timeout during microcode update!\n"); 322 323 /* 324 * At least one thread has completed update on each core. 325 * For others, simply call the update to make sure the 326 * per-cpu cpuinfo can be updated with right microcode 327 * revision. 328 */ 329 if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) 330 err = microcode_ops->apply_microcode(cpu); 331 332 return ret; 333 } 334 335 /* 336 * Reload microcode late on all CPUs. Wait for a sec until they 337 * all gather together. 338 */ 339 static int microcode_reload_late(void) 340 { 341 int old = boot_cpu_data.microcode, ret; 342 struct cpuinfo_x86 prev_info; 343 344 pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); 345 pr_err("You should switch to early loading, if possible.\n"); 346 347 atomic_set(&late_cpus_in, 0); 348 atomic_set(&late_cpus_out, 0); 349 350 /* 351 * Take a snapshot before the microcode update in order to compare and 352 * check whether any bits changed after an update. 353 */ 354 store_cpu_caps(&prev_info); 355 356 ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); 357 358 if (microcode_ops->finalize_late_load) 359 microcode_ops->finalize_late_load(ret); 360 361 if (!ret) { 362 pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", 363 old, boot_cpu_data.microcode); 364 microcode_check(&prev_info); 365 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 366 } else { 367 pr_info("Reload failed, current microcode revision: 0x%x\n", 368 boot_cpu_data.microcode); 369 } 370 return ret; 371 } 372 373 /* 374 * Ensure that all required CPUs which are present and have been booted 375 * once are online. 376 * 377 * To pass this check, all primary threads must be online. 378 * 379 * If the microcode load is not safe against NMI then all SMT threads 380 * must be online as well because they still react to NMIs when they are 381 * soft-offlined and parked in one of the play_dead() variants. So if a 382 * NMI hits while the primary thread updates the microcode the resulting 383 * behaviour is undefined. The default play_dead() implementation on 384 * modern CPUs uses MWAIT, which is also not guaranteed to be safe 385 * against a microcode update which affects MWAIT. 386 */ 387 static bool ensure_cpus_are_online(void) 388 { 389 unsigned int cpu; 390 391 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { 392 if (!cpu_online(cpu)) { 393 if (topology_is_primary_thread(cpu) || !microcode_ops->nmi_safe) { 394 pr_err("CPU %u not online\n", cpu); 395 return false; 396 } 397 } 398 } 399 return true; 400 } 401 402 static int ucode_load_late_locked(void) 403 { 404 if (!ensure_cpus_are_online()) 405 return -EBUSY; 406 407 switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { 408 case UCODE_NEW: 409 return microcode_reload_late(); 410 case UCODE_NFOUND: 411 return -ENOENT; 412 default: 413 return -EBADFD; 414 } 415 } 416 417 static ssize_t reload_store(struct device *dev, 418 struct device_attribute *attr, 419 const char *buf, size_t size) 420 { 421 unsigned long val; 422 ssize_t ret; 423 424 ret = kstrtoul(buf, 0, &val); 425 if (ret || val != 1) 426 return -EINVAL; 427 428 cpus_read_lock(); 429 ret = ucode_load_late_locked(); 430 cpus_read_unlock(); 431 432 return ret ? : size; 433 } 434 435 static DEVICE_ATTR_WO(reload); 436 #endif 437 438 static ssize_t version_show(struct device *dev, 439 struct device_attribute *attr, char *buf) 440 { 441 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 442 443 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); 444 } 445 446 static ssize_t processor_flags_show(struct device *dev, 447 struct device_attribute *attr, char *buf) 448 { 449 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 450 451 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); 452 } 453 454 static DEVICE_ATTR_RO(version); 455 static DEVICE_ATTR_RO(processor_flags); 456 457 static struct attribute *mc_default_attrs[] = { 458 &dev_attr_version.attr, 459 &dev_attr_processor_flags.attr, 460 NULL 461 }; 462 463 static const struct attribute_group mc_attr_group = { 464 .attrs = mc_default_attrs, 465 .name = "microcode", 466 }; 467 468 static void microcode_fini_cpu(int cpu) 469 { 470 if (microcode_ops->microcode_fini_cpu) 471 microcode_ops->microcode_fini_cpu(cpu); 472 } 473 474 /** 475 * microcode_bsp_resume - Update boot CPU microcode during resume. 476 */ 477 void microcode_bsp_resume(void) 478 { 479 int cpu = smp_processor_id(); 480 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 481 482 if (uci->mc) 483 microcode_ops->apply_microcode(cpu); 484 else 485 reload_early_microcode(cpu); 486 } 487 488 static struct syscore_ops mc_syscore_ops = { 489 .resume = microcode_bsp_resume, 490 }; 491 492 static int mc_cpu_online(unsigned int cpu) 493 { 494 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 495 struct device *dev = get_cpu_device(cpu); 496 497 memset(uci, 0, sizeof(*uci)); 498 499 microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); 500 cpu_data(cpu).microcode = uci->cpu_sig.rev; 501 if (!cpu) 502 boot_cpu_data.microcode = uci->cpu_sig.rev; 503 504 if (sysfs_create_group(&dev->kobj, &mc_attr_group)) 505 pr_err("Failed to create group for CPU%d\n", cpu); 506 return 0; 507 } 508 509 static int mc_cpu_down_prep(unsigned int cpu) 510 { 511 struct device *dev = get_cpu_device(cpu); 512 513 microcode_fini_cpu(cpu); 514 sysfs_remove_group(&dev->kobj, &mc_attr_group); 515 return 0; 516 } 517 518 static struct attribute *cpu_root_microcode_attrs[] = { 519 #ifdef CONFIG_MICROCODE_LATE_LOADING 520 &dev_attr_reload.attr, 521 #endif 522 NULL 523 }; 524 525 static const struct attribute_group cpu_root_microcode_group = { 526 .name = "microcode", 527 .attrs = cpu_root_microcode_attrs, 528 }; 529 530 static int __init microcode_init(void) 531 { 532 struct device *dev_root; 533 struct cpuinfo_x86 *c = &boot_cpu_data; 534 int error; 535 536 if (dis_ucode_ldr) 537 return -EINVAL; 538 539 if (c->x86_vendor == X86_VENDOR_INTEL) 540 microcode_ops = init_intel_microcode(); 541 else if (c->x86_vendor == X86_VENDOR_AMD) 542 microcode_ops = init_amd_microcode(); 543 else 544 pr_err("no support for this CPU vendor\n"); 545 546 if (!microcode_ops) 547 return -ENODEV; 548 549 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 550 if (IS_ERR(microcode_pdev)) 551 return PTR_ERR(microcode_pdev); 552 553 dev_root = bus_get_dev_root(&cpu_subsys); 554 if (dev_root) { 555 error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group); 556 put_device(dev_root); 557 if (error) { 558 pr_err("Error creating microcode group!\n"); 559 goto out_pdev; 560 } 561 } 562 563 register_syscore_ops(&mc_syscore_ops); 564 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 565 mc_cpu_online, mc_cpu_down_prep); 566 567 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); 568 569 return 0; 570 571 out_pdev: 572 platform_device_unregister(microcode_pdev); 573 return error; 574 575 } 576 late_initcall(microcode_init); 577