1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/export.h> 11 #include <linux/kernel.h> 12 #include <linux/stddef.h> 13 #include <linux/ioport.h> 14 #include <linux/delay.h> 15 #include <linux/utsname.h> 16 #include <linux/initrd.h> 17 #include <linux/console.h> 18 #include <linux/bootmem.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/init.h> 22 #include <linux/kexec.h> 23 #include <linux/of_fdt.h> 24 #include <linux/cpu.h> 25 #include <linux/interrupt.h> 26 #include <linux/smp.h> 27 #include <linux/proc_fs.h> 28 #include <linux/memblock.h> 29 #include <linux/bug.h> 30 #include <linux/compiler.h> 31 #include <linux/sort.h> 32 33 #include <asm/unified.h> 34 #include <asm/cp15.h> 35 #include <asm/cpu.h> 36 #include <asm/cputype.h> 37 #include <asm/elf.h> 38 #include <asm/procinfo.h> 39 #include <asm/sections.h> 40 #include <asm/setup.h> 41 #include <asm/smp_plat.h> 42 #include <asm/mach-types.h> 43 #include <asm/cacheflush.h> 44 #include <asm/cachetype.h> 45 #include <asm/tlbflush.h> 46 47 #include <asm/prom.h> 48 #include <asm/mach/arch.h> 49 #include <asm/mach/irq.h> 50 #include <asm/mach/time.h> 51 #include <asm/system_info.h> 52 #include <asm/system_misc.h> 53 #include <asm/traps.h> 54 #include <asm/unwind.h> 55 #include <asm/memblock.h> 56 #include <asm/virt.h> 57 58 #include "atags.h" 59 #include "tcm.h" 60 61 62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 63 char fpe_type[8]; 64 65 static int __init fpe_setup(char *line) 66 { 67 memcpy(fpe_type, line, 8); 68 return 1; 69 } 70 71 __setup("fpe=", fpe_setup); 72 #endif 73 74 extern void paging_init(struct machine_desc *desc); 75 extern void sanity_check_meminfo(void); 76 extern void reboot_setup(char *str); 77 extern void setup_dma_zone(struct machine_desc *desc); 78 79 unsigned int processor_id; 80 EXPORT_SYMBOL(processor_id); 81 unsigned int __machine_arch_type __read_mostly; 82 EXPORT_SYMBOL(__machine_arch_type); 83 unsigned int cacheid __read_mostly; 84 EXPORT_SYMBOL(cacheid); 85 86 unsigned int __atags_pointer __initdata; 87 88 unsigned int system_rev; 89 EXPORT_SYMBOL(system_rev); 90 91 unsigned int system_serial_low; 92 EXPORT_SYMBOL(system_serial_low); 93 94 unsigned int system_serial_high; 95 EXPORT_SYMBOL(system_serial_high); 96 97 unsigned int elf_hwcap __read_mostly; 98 EXPORT_SYMBOL(elf_hwcap); 99 100 101 #ifdef MULTI_CPU 102 struct processor processor __read_mostly; 103 #endif 104 #ifdef MULTI_TLB 105 struct cpu_tlb_fns cpu_tlb __read_mostly; 106 #endif 107 #ifdef MULTI_USER 108 struct cpu_user_fns cpu_user __read_mostly; 109 #endif 110 #ifdef MULTI_CACHE 111 struct cpu_cache_fns cpu_cache __read_mostly; 112 #endif 113 #ifdef CONFIG_OUTER_CACHE 114 struct outer_cache_fns outer_cache __read_mostly; 115 EXPORT_SYMBOL(outer_cache); 116 #endif 117 118 /* 119 * Cached cpu_architecture() result for use by assembler code. 120 * C code should use the cpu_architecture() function instead of accessing this 121 * variable directly. 122 */ 123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN; 124 125 struct stack { 126 u32 irq[3]; 127 u32 abt[3]; 128 u32 und[3]; 129 } ____cacheline_aligned; 130 131 static struct stack stacks[NR_CPUS]; 132 133 char elf_platform[ELF_PLATFORM_SIZE]; 134 EXPORT_SYMBOL(elf_platform); 135 136 static const char *cpu_name; 137 static const char *machine_name; 138 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 139 struct machine_desc *machine_desc __initdata; 140 141 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 142 #define ENDIANNESS ((char)endian_test.l) 143 144 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 145 146 /* 147 * Standard memory resources 148 */ 149 static struct resource mem_res[] = { 150 { 151 .name = "Video RAM", 152 .start = 0, 153 .end = 0, 154 .flags = IORESOURCE_MEM 155 }, 156 { 157 .name = "Kernel code", 158 .start = 0, 159 .end = 0, 160 .flags = IORESOURCE_MEM 161 }, 162 { 163 .name = "Kernel data", 164 .start = 0, 165 .end = 0, 166 .flags = IORESOURCE_MEM 167 } 168 }; 169 170 #define video_ram mem_res[0] 171 #define kernel_code mem_res[1] 172 #define kernel_data mem_res[2] 173 174 static struct resource io_res[] = { 175 { 176 .name = "reserved", 177 .start = 0x3bc, 178 .end = 0x3be, 179 .flags = IORESOURCE_IO | IORESOURCE_BUSY 180 }, 181 { 182 .name = "reserved", 183 .start = 0x378, 184 .end = 0x37f, 185 .flags = IORESOURCE_IO | IORESOURCE_BUSY 186 }, 187 { 188 .name = "reserved", 189 .start = 0x278, 190 .end = 0x27f, 191 .flags = IORESOURCE_IO | IORESOURCE_BUSY 192 } 193 }; 194 195 #define lp0 io_res[0] 196 #define lp1 io_res[1] 197 #define lp2 io_res[2] 198 199 static const char *proc_arch[] = { 200 "undefined/unknown", 201 "3", 202 "4", 203 "4T", 204 "5", 205 "5T", 206 "5TE", 207 "5TEJ", 208 "6TEJ", 209 "7", 210 "?(11)", 211 "?(12)", 212 "?(13)", 213 "?(14)", 214 "?(15)", 215 "?(16)", 216 "?(17)", 217 }; 218 219 static int __get_cpu_architecture(void) 220 { 221 int cpu_arch; 222 223 if ((read_cpuid_id() & 0x0008f000) == 0) { 224 cpu_arch = CPU_ARCH_UNKNOWN; 225 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 226 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 227 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 228 cpu_arch = (read_cpuid_id() >> 16) & 7; 229 if (cpu_arch) 230 cpu_arch += CPU_ARCH_ARMv3; 231 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 232 unsigned int mmfr0; 233 234 /* Revised CPUID format. Read the Memory Model Feature 235 * Register 0 and check for VMSAv7 or PMSAv7 */ 236 asm("mrc p15, 0, %0, c0, c1, 4" 237 : "=r" (mmfr0)); 238 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 239 (mmfr0 & 0x000000f0) >= 0x00000030) 240 cpu_arch = CPU_ARCH_ARMv7; 241 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 242 (mmfr0 & 0x000000f0) == 0x00000020) 243 cpu_arch = CPU_ARCH_ARMv6; 244 else 245 cpu_arch = CPU_ARCH_UNKNOWN; 246 } else 247 cpu_arch = CPU_ARCH_UNKNOWN; 248 249 return cpu_arch; 250 } 251 252 int __pure cpu_architecture(void) 253 { 254 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN); 255 256 return __cpu_architecture; 257 } 258 259 static int cpu_has_aliasing_icache(unsigned int arch) 260 { 261 int aliasing_icache; 262 unsigned int id_reg, num_sets, line_size; 263 264 /* PIPT caches never alias. */ 265 if (icache_is_pipt()) 266 return 0; 267 268 /* arch specifies the register format */ 269 switch (arch) { 270 case CPU_ARCH_ARMv7: 271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR" 272 : /* No output operands */ 273 : "r" (1)); 274 isb(); 275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR" 276 : "=r" (id_reg)); 277 line_size = 4 << ((id_reg & 0x7) + 2); 278 num_sets = ((id_reg >> 13) & 0x7fff) + 1; 279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE; 280 break; 281 case CPU_ARCH_ARMv6: 282 aliasing_icache = read_cpuid_cachetype() & (1 << 11); 283 break; 284 default: 285 /* I-cache aliases will be handled by D-cache aliasing code */ 286 aliasing_icache = 0; 287 } 288 289 return aliasing_icache; 290 } 291 292 static void __init cacheid_init(void) 293 { 294 unsigned int cachetype = read_cpuid_cachetype(); 295 unsigned int arch = cpu_architecture(); 296 297 if (arch >= CPU_ARCH_ARMv6) { 298 if ((cachetype & (7 << 29)) == 4 << 29) { 299 /* ARMv7 register format */ 300 arch = CPU_ARCH_ARMv7; 301 cacheid = CACHEID_VIPT_NONALIASING; 302 switch (cachetype & (3 << 14)) { 303 case (1 << 14): 304 cacheid |= CACHEID_ASID_TAGGED; 305 break; 306 case (3 << 14): 307 cacheid |= CACHEID_PIPT; 308 break; 309 } 310 } else { 311 arch = CPU_ARCH_ARMv6; 312 if (cachetype & (1 << 23)) 313 cacheid = CACHEID_VIPT_ALIASING; 314 else 315 cacheid = CACHEID_VIPT_NONALIASING; 316 } 317 if (cpu_has_aliasing_icache(arch)) 318 cacheid |= CACHEID_VIPT_I_ALIASING; 319 } else { 320 cacheid = CACHEID_VIVT; 321 } 322 323 printk("CPU: %s data cache, %s instruction cache\n", 324 cache_is_vivt() ? "VIVT" : 325 cache_is_vipt_aliasing() ? "VIPT aliasing" : 326 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown", 327 cache_is_vivt() ? "VIVT" : 328 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 329 icache_is_vipt_aliasing() ? "VIPT aliasing" : 330 icache_is_pipt() ? "PIPT" : 331 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 332 } 333 334 /* 335 * These functions re-use the assembly code in head.S, which 336 * already provide the required functionality. 337 */ 338 extern struct proc_info_list *lookup_processor_type(unsigned int); 339 340 void __init early_print(const char *str, ...) 341 { 342 extern void printascii(const char *); 343 char buf[256]; 344 va_list ap; 345 346 va_start(ap, str); 347 vsnprintf(buf, sizeof(buf), str, ap); 348 va_end(ap); 349 350 #ifdef CONFIG_DEBUG_LL 351 printascii(buf); 352 #endif 353 printk("%s", buf); 354 } 355 356 static void __init cpuid_init_hwcaps(void) 357 { 358 unsigned int divide_instrs; 359 360 if (cpu_architecture() < CPU_ARCH_ARMv7) 361 return; 362 363 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; 364 365 switch (divide_instrs) { 366 case 2: 367 elf_hwcap |= HWCAP_IDIVA; 368 case 1: 369 elf_hwcap |= HWCAP_IDIVT; 370 } 371 } 372 373 static void __init feat_v6_fixup(void) 374 { 375 int id = read_cpuid_id(); 376 377 if ((id & 0xff0f0000) != 0x41070000) 378 return; 379 380 /* 381 * HWCAP_TLS is available only on 1136 r1p0 and later, 382 * see also kuser_get_tls_init. 383 */ 384 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) 385 elf_hwcap &= ~HWCAP_TLS; 386 } 387 388 /* 389 * cpu_init - initialise one CPU. 390 * 391 * cpu_init sets up the per-CPU stacks. 392 */ 393 void cpu_init(void) 394 { 395 unsigned int cpu = smp_processor_id(); 396 struct stack *stk = &stacks[cpu]; 397 398 if (cpu >= NR_CPUS) { 399 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 400 BUG(); 401 } 402 403 /* 404 * This only works on resume and secondary cores. For booting on the 405 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. 406 */ 407 set_my_cpu_offset(per_cpu_offset(cpu)); 408 409 cpu_proc_init(); 410 411 /* 412 * Define the placement constraint for the inline asm directive below. 413 * In Thumb-2, msr with an immediate value is not allowed. 414 */ 415 #ifdef CONFIG_THUMB2_KERNEL 416 #define PLC "r" 417 #else 418 #define PLC "I" 419 #endif 420 421 /* 422 * setup stacks for re-entrant exception handlers 423 */ 424 __asm__ ( 425 "msr cpsr_c, %1\n\t" 426 "add r14, %0, %2\n\t" 427 "mov sp, r14\n\t" 428 "msr cpsr_c, %3\n\t" 429 "add r14, %0, %4\n\t" 430 "mov sp, r14\n\t" 431 "msr cpsr_c, %5\n\t" 432 "add r14, %0, %6\n\t" 433 "mov sp, r14\n\t" 434 "msr cpsr_c, %7" 435 : 436 : "r" (stk), 437 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 438 "I" (offsetof(struct stack, irq[0])), 439 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 440 "I" (offsetof(struct stack, abt[0])), 441 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 442 "I" (offsetof(struct stack, und[0])), 443 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 444 : "r14"); 445 } 446 447 int __cpu_logical_map[NR_CPUS]; 448 449 void __init smp_setup_processor_id(void) 450 { 451 int i; 452 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; 453 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 454 455 cpu_logical_map(0) = cpu; 456 for (i = 1; i < nr_cpu_ids; ++i) 457 cpu_logical_map(i) = i == cpu ? 0 : i; 458 459 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); 460 } 461 462 static void __init setup_processor(void) 463 { 464 struct proc_info_list *list; 465 466 /* 467 * locate processor in the list of supported processor 468 * types. The linker builds this table for us from the 469 * entries in arch/arm/mm/proc-*.S 470 */ 471 list = lookup_processor_type(read_cpuid_id()); 472 if (!list) { 473 printk("CPU configuration botched (ID %08x), unable " 474 "to continue.\n", read_cpuid_id()); 475 while (1); 476 } 477 478 cpu_name = list->cpu_name; 479 __cpu_architecture = __get_cpu_architecture(); 480 481 #ifdef MULTI_CPU 482 processor = *list->proc; 483 #endif 484 #ifdef MULTI_TLB 485 cpu_tlb = *list->tlb; 486 #endif 487 #ifdef MULTI_USER 488 cpu_user = *list->user; 489 #endif 490 #ifdef MULTI_CACHE 491 cpu_cache = *list->cache; 492 #endif 493 494 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 495 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 496 proc_arch[cpu_architecture()], cr_alignment); 497 498 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", 499 list->arch_name, ENDIANNESS); 500 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 501 list->elf_name, ENDIANNESS); 502 elf_hwcap = list->elf_hwcap; 503 504 cpuid_init_hwcaps(); 505 506 #ifndef CONFIG_ARM_THUMB 507 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 508 #endif 509 510 feat_v6_fixup(); 511 512 cacheid_init(); 513 cpu_init(); 514 } 515 516 void __init dump_machine_table(void) 517 { 518 struct machine_desc *p; 519 520 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 521 for_each_machine_desc(p) 522 early_print("%08x\t%s\n", p->nr, p->name); 523 524 early_print("\nPlease check your kernel config and/or bootloader.\n"); 525 526 while (true) 527 /* can't use cpu_relax() here as it may require MMU setup */; 528 } 529 530 int __init arm_add_memory(phys_addr_t start, phys_addr_t size) 531 { 532 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 533 534 if (meminfo.nr_banks >= NR_BANKS) { 535 printk(KERN_CRIT "NR_BANKS too low, " 536 "ignoring memory at 0x%08llx\n", (long long)start); 537 return -EINVAL; 538 } 539 540 /* 541 * Ensure that start/size are aligned to a page boundary. 542 * Size is appropriately rounded down, start is rounded up. 543 */ 544 size -= start & ~PAGE_MASK; 545 bank->start = PAGE_ALIGN(start); 546 547 #ifndef CONFIG_ARM_LPAE 548 if (bank->start + size < bank->start) { 549 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " 550 "32-bit physical address space\n", (long long)start); 551 /* 552 * To ensure bank->start + bank->size is representable in 553 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. 554 * This means we lose a page after masking. 555 */ 556 size = ULONG_MAX - bank->start; 557 } 558 #endif 559 560 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 561 562 /* 563 * Check whether this memory region has non-zero size or 564 * invalid node number. 565 */ 566 if (bank->size == 0) 567 return -EINVAL; 568 569 meminfo.nr_banks++; 570 return 0; 571 } 572 573 /* 574 * Pick out the memory size. We look for mem=size@start, 575 * where start and size are "size[KkMm]" 576 */ 577 static int __init early_mem(char *p) 578 { 579 static int usermem __initdata = 0; 580 phys_addr_t size; 581 phys_addr_t start; 582 char *endp; 583 584 /* 585 * If the user specifies memory size, we 586 * blow away any automatically generated 587 * size. 588 */ 589 if (usermem == 0) { 590 usermem = 1; 591 meminfo.nr_banks = 0; 592 } 593 594 start = PHYS_OFFSET; 595 size = memparse(p, &endp); 596 if (*endp == '@') 597 start = memparse(endp + 1, NULL); 598 599 arm_add_memory(start, size); 600 601 return 0; 602 } 603 early_param("mem", early_mem); 604 605 static void __init request_standard_resources(struct machine_desc *mdesc) 606 { 607 struct memblock_region *region; 608 struct resource *res; 609 610 kernel_code.start = virt_to_phys(_text); 611 kernel_code.end = virt_to_phys(_etext - 1); 612 kernel_data.start = virt_to_phys(_sdata); 613 kernel_data.end = virt_to_phys(_end - 1); 614 615 for_each_memblock(memory, region) { 616 res = alloc_bootmem_low(sizeof(*res)); 617 res->name = "System RAM"; 618 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 619 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 620 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 621 622 request_resource(&iomem_resource, res); 623 624 if (kernel_code.start >= res->start && 625 kernel_code.end <= res->end) 626 request_resource(res, &kernel_code); 627 if (kernel_data.start >= res->start && 628 kernel_data.end <= res->end) 629 request_resource(res, &kernel_data); 630 } 631 632 if (mdesc->video_start) { 633 video_ram.start = mdesc->video_start; 634 video_ram.end = mdesc->video_end; 635 request_resource(&iomem_resource, &video_ram); 636 } 637 638 /* 639 * Some machines don't have the possibility of ever 640 * possessing lp0, lp1 or lp2 641 */ 642 if (mdesc->reserve_lp0) 643 request_resource(&ioport_resource, &lp0); 644 if (mdesc->reserve_lp1) 645 request_resource(&ioport_resource, &lp1); 646 if (mdesc->reserve_lp2) 647 request_resource(&ioport_resource, &lp2); 648 } 649 650 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 651 struct screen_info screen_info = { 652 .orig_video_lines = 30, 653 .orig_video_cols = 80, 654 .orig_video_mode = 0, 655 .orig_video_ega_bx = 0, 656 .orig_video_isVGA = 1, 657 .orig_video_points = 8 658 }; 659 #endif 660 661 static int __init customize_machine(void) 662 { 663 /* customizes platform devices, or adds new ones */ 664 if (machine_desc->init_machine) 665 machine_desc->init_machine(); 666 return 0; 667 } 668 arch_initcall(customize_machine); 669 670 static int __init init_machine_late(void) 671 { 672 if (machine_desc->init_late) 673 machine_desc->init_late(); 674 return 0; 675 } 676 late_initcall(init_machine_late); 677 678 #ifdef CONFIG_KEXEC 679 static inline unsigned long long get_total_mem(void) 680 { 681 unsigned long total; 682 683 total = max_low_pfn - min_low_pfn; 684 return total << PAGE_SHIFT; 685 } 686 687 /** 688 * reserve_crashkernel() - reserves memory are for crash kernel 689 * 690 * This function reserves memory area given in "crashkernel=" kernel command 691 * line parameter. The memory reserved is used by a dump capture kernel when 692 * primary kernel is crashing. 693 */ 694 static void __init reserve_crashkernel(void) 695 { 696 unsigned long long crash_size, crash_base; 697 unsigned long long total_mem; 698 int ret; 699 700 total_mem = get_total_mem(); 701 ret = parse_crashkernel(boot_command_line, total_mem, 702 &crash_size, &crash_base); 703 if (ret) 704 return; 705 706 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE); 707 if (ret < 0) { 708 printk(KERN_WARNING "crashkernel reservation failed - " 709 "memory is in use (0x%lx)\n", (unsigned long)crash_base); 710 return; 711 } 712 713 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 714 "for crashkernel (System RAM: %ldMB)\n", 715 (unsigned long)(crash_size >> 20), 716 (unsigned long)(crash_base >> 20), 717 (unsigned long)(total_mem >> 20)); 718 719 crashk_res.start = crash_base; 720 crashk_res.end = crash_base + crash_size - 1; 721 insert_resource(&iomem_resource, &crashk_res); 722 } 723 #else 724 static inline void reserve_crashkernel(void) {} 725 #endif /* CONFIG_KEXEC */ 726 727 static int __init meminfo_cmp(const void *_a, const void *_b) 728 { 729 const struct membank *a = _a, *b = _b; 730 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 731 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 732 } 733 734 void __init hyp_mode_check(void) 735 { 736 #ifdef CONFIG_ARM_VIRT_EXT 737 if (is_hyp_mode_available()) { 738 pr_info("CPU: All CPU(s) started in HYP mode.\n"); 739 pr_info("CPU: Virtualization extensions available.\n"); 740 } else if (is_hyp_mode_mismatched()) { 741 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", 742 __boot_cpu_mode & MODE_MASK); 743 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); 744 } else 745 pr_info("CPU: All CPU(s) started in SVC mode.\n"); 746 #endif 747 } 748 749 void __init setup_arch(char **cmdline_p) 750 { 751 struct machine_desc *mdesc; 752 753 setup_processor(); 754 mdesc = setup_machine_fdt(__atags_pointer); 755 if (!mdesc) 756 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); 757 machine_desc = mdesc; 758 machine_name = mdesc->name; 759 760 setup_dma_zone(mdesc); 761 762 if (mdesc->restart_mode) 763 reboot_setup(&mdesc->restart_mode); 764 765 init_mm.start_code = (unsigned long) _text; 766 init_mm.end_code = (unsigned long) _etext; 767 init_mm.end_data = (unsigned long) _edata; 768 init_mm.brk = (unsigned long) _end; 769 770 /* populate cmd_line too for later use, preserving boot_command_line */ 771 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 772 *cmdline_p = cmd_line; 773 774 parse_early_param(); 775 776 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 777 sanity_check_meminfo(); 778 arm_memblock_init(&meminfo, mdesc); 779 780 paging_init(mdesc); 781 request_standard_resources(mdesc); 782 783 if (mdesc->restart) 784 arm_pm_restart = mdesc->restart; 785 786 unflatten_device_tree(); 787 788 arm_dt_init_cpu_maps(); 789 #ifdef CONFIG_SMP 790 if (is_smp()) { 791 smp_set_ops(mdesc->smp); 792 smp_init_cpus(); 793 } 794 #endif 795 796 if (!is_smp()) 797 hyp_mode_check(); 798 799 reserve_crashkernel(); 800 801 tcm_init(); 802 803 #ifdef CONFIG_MULTI_IRQ_HANDLER 804 handle_arch_irq = mdesc->handle_irq; 805 #endif 806 807 #ifdef CONFIG_VT 808 #if defined(CONFIG_VGA_CONSOLE) 809 conswitchp = &vga_con; 810 #elif defined(CONFIG_DUMMY_CONSOLE) 811 conswitchp = &dummy_con; 812 #endif 813 #endif 814 815 if (mdesc->init_early) 816 mdesc->init_early(); 817 } 818 819 820 static int __init topology_init(void) 821 { 822 int cpu; 823 824 for_each_possible_cpu(cpu) { 825 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 826 cpuinfo->cpu.hotpluggable = 1; 827 register_cpu(&cpuinfo->cpu, cpu); 828 } 829 830 return 0; 831 } 832 subsys_initcall(topology_init); 833 834 #ifdef CONFIG_HAVE_PROC_CPU 835 static int __init proc_cpu_init(void) 836 { 837 struct proc_dir_entry *res; 838 839 res = proc_mkdir("cpu", NULL); 840 if (!res) 841 return -ENOMEM; 842 return 0; 843 } 844 fs_initcall(proc_cpu_init); 845 #endif 846 847 static const char *hwcap_str[] = { 848 "swp", 849 "half", 850 "thumb", 851 "26bit", 852 "fastmult", 853 "fpa", 854 "vfp", 855 "edsp", 856 "java", 857 "iwmmxt", 858 "crunch", 859 "thumbee", 860 "neon", 861 "vfpv3", 862 "vfpv3d16", 863 "tls", 864 "vfpv4", 865 "idiva", 866 "idivt", 867 NULL 868 }; 869 870 static int c_show(struct seq_file *m, void *v) 871 { 872 int i, j; 873 u32 cpuid; 874 875 for_each_online_cpu(i) { 876 /* 877 * glibc reads /proc/cpuinfo to determine the number of 878 * online processors, looking for lines beginning with 879 * "processor". Give glibc what it expects. 880 */ 881 seq_printf(m, "processor\t: %d\n", i); 882 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); 883 seq_printf(m, "model name\t: %s rev %d (%s)\n", 884 cpu_name, cpuid & 15, elf_platform); 885 886 #if defined(CONFIG_SMP) 887 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 888 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 889 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 890 #else 891 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 892 loops_per_jiffy / (500000/HZ), 893 (loops_per_jiffy / (5000/HZ)) % 100); 894 #endif 895 /* dump out the processor features */ 896 seq_puts(m, "Features\t: "); 897 898 for (j = 0; hwcap_str[j]; j++) 899 if (elf_hwcap & (1 << j)) 900 seq_printf(m, "%s ", hwcap_str[j]); 901 902 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); 903 seq_printf(m, "CPU architecture: %s\n", 904 proc_arch[cpu_architecture()]); 905 906 if ((cpuid & 0x0008f000) == 0x00000000) { 907 /* pre-ARM7 */ 908 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); 909 } else { 910 if ((cpuid & 0x0008f000) == 0x00007000) { 911 /* ARM7 */ 912 seq_printf(m, "CPU variant\t: 0x%02x\n", 913 (cpuid >> 16) & 127); 914 } else { 915 /* post-ARM7 */ 916 seq_printf(m, "CPU variant\t: 0x%x\n", 917 (cpuid >> 20) & 15); 918 } 919 seq_printf(m, "CPU part\t: 0x%03x\n", 920 (cpuid >> 4) & 0xfff); 921 } 922 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); 923 } 924 925 seq_printf(m, "Hardware\t: %s\n", machine_name); 926 seq_printf(m, "Revision\t: %04x\n", system_rev); 927 seq_printf(m, "Serial\t\t: %08x%08x\n", 928 system_serial_high, system_serial_low); 929 930 return 0; 931 } 932 933 static void *c_start(struct seq_file *m, loff_t *pos) 934 { 935 return *pos < 1 ? (void *)1 : NULL; 936 } 937 938 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 939 { 940 ++*pos; 941 return NULL; 942 } 943 944 static void c_stop(struct seq_file *m, void *v) 945 { 946 } 947 948 const struct seq_operations cpuinfo_op = { 949 .start = c_start, 950 .next = c_next, 951 .stop = c_stop, 952 .show = c_show 953 }; 954