1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * CPU Microcode Update Driver for Linux 4 * 5 * Copyright (C) 2000-2006 Tigran Aivazian <[email protected]> 6 * 2006 Shaohua Li <[email protected]> 7 * 2013-2016 Borislav Petkov <[email protected]> 8 * 9 * X86 CPU microcode early update for Linux: 10 * 11 * Copyright (C) 2012 Fenghua Yu <[email protected]> 12 * H Peter Anvin" <[email protected]> 13 * (C) 2015 Borislav Petkov <[email protected]> 14 * 15 * This driver allows to upgrade microcode on x86 processors. 16 */ 17 18 #define pr_fmt(fmt) "microcode: " fmt 19 20 #include <linux/platform_device.h> 21 #include <linux/stop_machine.h> 22 #include <linux/syscore_ops.h> 23 #include <linux/miscdevice.h> 24 #include <linux/capability.h> 25 #include <linux/firmware.h> 26 #include <linux/kernel.h> 27 #include <linux/delay.h> 28 #include <linux/mutex.h> 29 #include <linux/cpu.h> 30 #include <linux/nmi.h> 31 #include <linux/fs.h> 32 #include <linux/mm.h> 33 34 #include <asm/cpu_device_id.h> 35 #include <asm/perf_event.h> 36 #include <asm/processor.h> 37 #include <asm/cmdline.h> 38 #include <asm/setup.h> 39 40 #include "internal.h" 41 42 #define DRIVER_VERSION "2.2" 43 44 static struct microcode_ops *microcode_ops; 45 bool dis_ucode_ldr = true; 46 47 /* 48 * Synchronization. 49 * 50 * All non cpu-hotplug-callback call sites use: 51 * 52 * - cpus_read_lock/unlock() to synchronize with 53 * the cpu-hotplug-callback call sites. 54 * 55 * We guarantee that only a single cpu is being 56 * updated at any particular moment of time. 57 */ 58 struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 59 60 struct cpu_info_ctx { 61 struct cpu_signature *cpu_sig; 62 int err; 63 }; 64 65 /* 66 * Those patch levels cannot be updated to newer ones and thus should be final. 67 */ 68 static u32 final_levels[] = { 69 0x01000098, 70 0x0100009f, 71 0x010000af, 72 0, /* T-101 terminator */ 73 }; 74 75 /* 76 * Check the current patch level on this CPU. 77 * 78 * Returns: 79 * - true: if update should stop 80 * - false: otherwise 81 */ 82 static bool amd_check_current_patch_level(void) 83 { 84 u32 lvl, dummy, i; 85 u32 *levels; 86 87 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); 88 89 levels = final_levels; 90 91 for (i = 0; levels[i]; i++) { 92 if (lvl == levels[i]) 93 return true; 94 } 95 return false; 96 } 97 98 static bool __init check_loader_disabled_bsp(void) 99 { 100 static const char *__dis_opt_str = "dis_ucode_ldr"; 101 const char *cmdline = boot_command_line; 102 const char *option = __dis_opt_str; 103 104 /* 105 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 106 * completely accurate as xen pv guests don't see that CPUID bit set but 107 * that's good enough as they don't land on the BSP path anyway. 108 */ 109 if (native_cpuid_ecx(1) & BIT(31)) 110 return true; 111 112 if (x86_cpuid_vendor() == X86_VENDOR_AMD) { 113 if (amd_check_current_patch_level()) 114 return true; 115 } 116 117 if (cmdline_find_option_bool(cmdline, option) <= 0) 118 dis_ucode_ldr = false; 119 120 return dis_ucode_ldr; 121 } 122 123 void __init load_ucode_bsp(void) 124 { 125 unsigned int cpuid_1_eax; 126 bool intel = true; 127 128 if (!have_cpuid_p()) 129 return; 130 131 cpuid_1_eax = native_cpuid_eax(1); 132 133 switch (x86_cpuid_vendor()) { 134 case X86_VENDOR_INTEL: 135 if (x86_family(cpuid_1_eax) < 6) 136 return; 137 break; 138 139 case X86_VENDOR_AMD: 140 if (x86_family(cpuid_1_eax) < 0x10) 141 return; 142 intel = false; 143 break; 144 145 default: 146 return; 147 } 148 149 if (check_loader_disabled_bsp()) 150 return; 151 152 if (intel) 153 load_ucode_intel_bsp(); 154 else 155 load_ucode_amd_bsp(cpuid_1_eax); 156 } 157 158 void load_ucode_ap(void) 159 { 160 unsigned int cpuid_1_eax; 161 162 if (dis_ucode_ldr) 163 return; 164 165 cpuid_1_eax = native_cpuid_eax(1); 166 167 switch (x86_cpuid_vendor()) { 168 case X86_VENDOR_INTEL: 169 if (x86_family(cpuid_1_eax) >= 6) 170 load_ucode_intel_ap(); 171 break; 172 case X86_VENDOR_AMD: 173 if (x86_family(cpuid_1_eax) >= 0x10) 174 load_ucode_amd_ap(cpuid_1_eax); 175 break; 176 default: 177 break; 178 } 179 } 180 181 struct cpio_data __init find_microcode_in_initrd(const char *path) 182 { 183 #ifdef CONFIG_BLK_DEV_INITRD 184 unsigned long start = 0; 185 size_t size; 186 187 #ifdef CONFIG_X86_32 188 size = boot_params.hdr.ramdisk_size; 189 /* Early load on BSP has a temporary mapping. */ 190 if (size) 191 start = initrd_start_early; 192 193 #else /* CONFIG_X86_64 */ 194 size = (unsigned long)boot_params.ext_ramdisk_size << 32; 195 size |= boot_params.hdr.ramdisk_size; 196 197 if (size) { 198 start = (unsigned long)boot_params.ext_ramdisk_image << 32; 199 start |= boot_params.hdr.ramdisk_image; 200 start += PAGE_OFFSET; 201 } 202 #endif 203 204 /* 205 * Fixup the start address: after reserve_initrd() runs, initrd_start 206 * has the virtual address of the beginning of the initrd. It also 207 * possibly relocates the ramdisk. In either case, initrd_start contains 208 * the updated address so use that instead. 209 */ 210 if (initrd_start) 211 start = initrd_start; 212 213 return find_cpio_data(path, (void *)start, size, NULL); 214 #else /* !CONFIG_BLK_DEV_INITRD */ 215 return (struct cpio_data){ NULL, 0, "" }; 216 #endif 217 } 218 219 static void reload_early_microcode(unsigned int cpu) 220 { 221 int vendor, family; 222 223 vendor = x86_cpuid_vendor(); 224 family = x86_cpuid_family(); 225 226 switch (vendor) { 227 case X86_VENDOR_INTEL: 228 if (family >= 6) 229 reload_ucode_intel(); 230 break; 231 case X86_VENDOR_AMD: 232 if (family >= 0x10) 233 reload_ucode_amd(cpu); 234 break; 235 default: 236 break; 237 } 238 } 239 240 /* fake device for request_firmware */ 241 static struct platform_device *microcode_pdev; 242 243 #ifdef CONFIG_MICROCODE_LATE_LOADING 244 /* 245 * Late loading dance. Why the heavy-handed stomp_machine effort? 246 * 247 * - HT siblings must be idle and not execute other code while the other sibling 248 * is loading microcode in order to avoid any negative interactions caused by 249 * the loading. 250 * 251 * - In addition, microcode update on the cores must be serialized until this 252 * requirement can be relaxed in the future. Right now, this is conservative 253 * and good. 254 */ 255 static atomic_t late_cpus_in, late_cpus_out; 256 257 static bool wait_for_cpus(atomic_t *cnt) 258 { 259 unsigned int timeout; 260 261 WARN_ON_ONCE(atomic_dec_return(cnt) < 0); 262 263 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { 264 if (!atomic_read(cnt)) 265 return true; 266 267 udelay(1); 268 269 if (!(timeout % USEC_PER_MSEC)) 270 touch_nmi_watchdog(); 271 } 272 /* Prevent the late comers from making progress and let them time out */ 273 atomic_inc(cnt); 274 return false; 275 } 276 277 /* 278 * Returns: 279 * < 0 - on error 280 * 0 - success (no update done or microcode was updated) 281 */ 282 static int __reload_late(void *info) 283 { 284 int cpu = smp_processor_id(); 285 enum ucode_state err; 286 int ret = 0; 287 288 /* 289 * Wait for all CPUs to arrive. A load will not be attempted unless all 290 * CPUs show up. 291 * */ 292 if (!wait_for_cpus(&late_cpus_in)) 293 return -1; 294 295 /* 296 * On an SMT system, it suffices to load the microcode on one sibling of 297 * the core because the microcode engine is shared between the threads. 298 * Synchronization still needs to take place so that no concurrent 299 * loading attempts happen on multiple threads of an SMT core. See 300 * below. 301 */ 302 if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) 303 err = microcode_ops->apply_microcode(cpu); 304 else 305 goto wait_for_siblings; 306 307 if (err >= UCODE_NFOUND) { 308 if (err == UCODE_ERROR) { 309 pr_warn("Error reloading microcode on CPU %d\n", cpu); 310 ret = -1; 311 } 312 } 313 314 wait_for_siblings: 315 if (!wait_for_cpus(&late_cpus_out)) 316 panic("Timeout during microcode update!\n"); 317 318 /* 319 * At least one thread has completed update on each core. 320 * For others, simply call the update to make sure the 321 * per-cpu cpuinfo can be updated with right microcode 322 * revision. 323 */ 324 if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) 325 err = microcode_ops->apply_microcode(cpu); 326 327 return ret; 328 } 329 330 /* 331 * Reload microcode late on all CPUs. Wait for a sec until they 332 * all gather together. 333 */ 334 static int microcode_reload_late(void) 335 { 336 int old = boot_cpu_data.microcode, ret; 337 struct cpuinfo_x86 prev_info; 338 339 pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); 340 pr_err("You should switch to early loading, if possible.\n"); 341 342 atomic_set(&late_cpus_in, num_online_cpus()); 343 atomic_set(&late_cpus_out, num_online_cpus()); 344 345 /* 346 * Take a snapshot before the microcode update in order to compare and 347 * check whether any bits changed after an update. 348 */ 349 store_cpu_caps(&prev_info); 350 351 ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); 352 353 if (microcode_ops->finalize_late_load) 354 microcode_ops->finalize_late_load(ret); 355 356 if (!ret) { 357 pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", 358 old, boot_cpu_data.microcode); 359 microcode_check(&prev_info); 360 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 361 } else { 362 pr_info("Reload failed, current microcode revision: 0x%x\n", 363 boot_cpu_data.microcode); 364 } 365 return ret; 366 } 367 368 /* 369 * Ensure that all required CPUs which are present and have been booted 370 * once are online. 371 * 372 * To pass this check, all primary threads must be online. 373 * 374 * If the microcode load is not safe against NMI then all SMT threads 375 * must be online as well because they still react to NMIs when they are 376 * soft-offlined and parked in one of the play_dead() variants. So if a 377 * NMI hits while the primary thread updates the microcode the resulting 378 * behaviour is undefined. The default play_dead() implementation on 379 * modern CPUs uses MWAIT, which is also not guaranteed to be safe 380 * against a microcode update which affects MWAIT. 381 */ 382 static bool ensure_cpus_are_online(void) 383 { 384 unsigned int cpu; 385 386 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { 387 if (!cpu_online(cpu)) { 388 if (topology_is_primary_thread(cpu) || !microcode_ops->nmi_safe) { 389 pr_err("CPU %u not online\n", cpu); 390 return false; 391 } 392 } 393 } 394 return true; 395 } 396 397 static int ucode_load_late_locked(void) 398 { 399 if (!ensure_cpus_are_online()) 400 return -EBUSY; 401 402 switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { 403 case UCODE_NEW: 404 return microcode_reload_late(); 405 case UCODE_NFOUND: 406 return -ENOENT; 407 default: 408 return -EBADFD; 409 } 410 } 411 412 static ssize_t reload_store(struct device *dev, 413 struct device_attribute *attr, 414 const char *buf, size_t size) 415 { 416 unsigned long val; 417 ssize_t ret; 418 419 ret = kstrtoul(buf, 0, &val); 420 if (ret || val != 1) 421 return -EINVAL; 422 423 cpus_read_lock(); 424 ret = ucode_load_late_locked(); 425 cpus_read_unlock(); 426 427 return ret ? : size; 428 } 429 430 static DEVICE_ATTR_WO(reload); 431 #endif 432 433 static ssize_t version_show(struct device *dev, 434 struct device_attribute *attr, char *buf) 435 { 436 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 437 438 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); 439 } 440 441 static ssize_t processor_flags_show(struct device *dev, 442 struct device_attribute *attr, char *buf) 443 { 444 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 445 446 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); 447 } 448 449 static DEVICE_ATTR_RO(version); 450 static DEVICE_ATTR_RO(processor_flags); 451 452 static struct attribute *mc_default_attrs[] = { 453 &dev_attr_version.attr, 454 &dev_attr_processor_flags.attr, 455 NULL 456 }; 457 458 static const struct attribute_group mc_attr_group = { 459 .attrs = mc_default_attrs, 460 .name = "microcode", 461 }; 462 463 static void microcode_fini_cpu(int cpu) 464 { 465 if (microcode_ops->microcode_fini_cpu) 466 microcode_ops->microcode_fini_cpu(cpu); 467 } 468 469 /** 470 * microcode_bsp_resume - Update boot CPU microcode during resume. 471 */ 472 void microcode_bsp_resume(void) 473 { 474 int cpu = smp_processor_id(); 475 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 476 477 if (uci->mc) 478 microcode_ops->apply_microcode(cpu); 479 else 480 reload_early_microcode(cpu); 481 } 482 483 static struct syscore_ops mc_syscore_ops = { 484 .resume = microcode_bsp_resume, 485 }; 486 487 static int mc_cpu_online(unsigned int cpu) 488 { 489 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 490 struct device *dev = get_cpu_device(cpu); 491 492 memset(uci, 0, sizeof(*uci)); 493 494 microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); 495 cpu_data(cpu).microcode = uci->cpu_sig.rev; 496 if (!cpu) 497 boot_cpu_data.microcode = uci->cpu_sig.rev; 498 499 if (sysfs_create_group(&dev->kobj, &mc_attr_group)) 500 pr_err("Failed to create group for CPU%d\n", cpu); 501 return 0; 502 } 503 504 static int mc_cpu_down_prep(unsigned int cpu) 505 { 506 struct device *dev = get_cpu_device(cpu); 507 508 microcode_fini_cpu(cpu); 509 sysfs_remove_group(&dev->kobj, &mc_attr_group); 510 return 0; 511 } 512 513 static struct attribute *cpu_root_microcode_attrs[] = { 514 #ifdef CONFIG_MICROCODE_LATE_LOADING 515 &dev_attr_reload.attr, 516 #endif 517 NULL 518 }; 519 520 static const struct attribute_group cpu_root_microcode_group = { 521 .name = "microcode", 522 .attrs = cpu_root_microcode_attrs, 523 }; 524 525 static int __init microcode_init(void) 526 { 527 struct device *dev_root; 528 struct cpuinfo_x86 *c = &boot_cpu_data; 529 int error; 530 531 if (dis_ucode_ldr) 532 return -EINVAL; 533 534 if (c->x86_vendor == X86_VENDOR_INTEL) 535 microcode_ops = init_intel_microcode(); 536 else if (c->x86_vendor == X86_VENDOR_AMD) 537 microcode_ops = init_amd_microcode(); 538 else 539 pr_err("no support for this CPU vendor\n"); 540 541 if (!microcode_ops) 542 return -ENODEV; 543 544 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 545 if (IS_ERR(microcode_pdev)) 546 return PTR_ERR(microcode_pdev); 547 548 dev_root = bus_get_dev_root(&cpu_subsys); 549 if (dev_root) { 550 error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group); 551 put_device(dev_root); 552 if (error) { 553 pr_err("Error creating microcode group!\n"); 554 goto out_pdev; 555 } 556 } 557 558 register_syscore_ops(&mc_syscore_ops); 559 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 560 mc_cpu_online, mc_cpu_down_prep); 561 562 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); 563 564 return 0; 565 566 out_pdev: 567 platform_device_unregister(microcode_pdev); 568 return error; 569 570 } 571 late_initcall(microcode_init); 572