xref: /linux-6.15/arch/arc/kernel/setup.c (revision edb64bca)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/seq_file.h>
10 #include <linux/fs.h>
11 #include <linux/delay.h>
12 #include <linux/root_dev.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/clocksource.h>
16 #include <linux/console.h>
17 #include <linux/module.h>
18 #include <linux/cpu.h>
19 #include <linux/of_fdt.h>
20 #include <linux/of.h>
21 #include <linux/cache.h>
22 #include <uapi/linux/mount.h>
23 #include <asm/sections.h>
24 #include <asm/arcregs.h>
25 #include <asm/tlb.h>
26 #include <asm/setup.h>
27 #include <asm/page.h>
28 #include <asm/irq.h>
29 #include <asm/unwind.h>
30 #include <asm/mach_desc.h>
31 #include <asm/smp.h>
32 
33 #define FIX_PTR(x)  __asm__ __volatile__(";" : "+r"(x))
34 
35 unsigned int intr_to_DE_cnt;
36 
37 /* Part of U-boot ABI: see head.S */
38 int __initdata uboot_tag;
39 int __initdata uboot_magic;
40 char __initdata *uboot_arg;
41 
42 const struct machine_desc *machine_desc;
43 
44 struct task_struct *_current_task[NR_CPUS];	/* For stack switching */
45 
46 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
47 
48 static const struct id_to_str arc_cpu_rel[] = {
49 #ifdef CONFIG_ISA_ARCOMPACT
50 	{ 0x34, "R4.10"},
51 	{ 0x35, "R4.11"},
52 #else
53 	{ 0x51, "R2.0" },
54 	{ 0x52, "R2.1" },
55 	{ 0x53, "R3.0" },
56 	{ 0x54, "R3.10a" },
57 #endif
58 	{ 0x00, NULL   }
59 };
60 
61 static const struct id_to_str arc_cpu_nm[] = {
62 #ifdef CONFIG_ISA_ARCOMPACT
63 	{ 0x20, "ARC 600"   },
64 	{ 0x30, "ARC 770"   },  /* 750 identified seperately */
65 #else
66 	{ 0x40, "ARC EM"  },
67 	{ 0x50, "ARC HS38"  },
68 	{ 0x54, "ARC HS48"  },
69 #endif
70 	{ 0x00, "Unknown"   }
71 };
72 
73 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
74 {
75 	if (is_isa_arcompact()) {
76 		struct bcr_iccm_arcompact iccm;
77 		struct bcr_dccm_arcompact dccm;
78 
79 		READ_BCR(ARC_REG_ICCM_BUILD, iccm);
80 		if (iccm.ver) {
81 			cpu->iccm.sz = 4096 << iccm.sz;	/* 8K to 512K */
82 			cpu->iccm.base_addr = iccm.base << 16;
83 		}
84 
85 		READ_BCR(ARC_REG_DCCM_BUILD, dccm);
86 		if (dccm.ver) {
87 			unsigned long base;
88 			cpu->dccm.sz = 2048 << dccm.sz;	/* 2K to 256K */
89 
90 			base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
91 			cpu->dccm.base_addr = base & ~0xF;
92 		}
93 	} else {
94 		struct bcr_iccm_arcv2 iccm;
95 		struct bcr_dccm_arcv2 dccm;
96 		unsigned long region;
97 
98 		READ_BCR(ARC_REG_ICCM_BUILD, iccm);
99 		if (iccm.ver) {
100 			cpu->iccm.sz = 256 << iccm.sz00;	/* 512B to 16M */
101 			if (iccm.sz00 == 0xF && iccm.sz01 > 0)
102 				cpu->iccm.sz <<= iccm.sz01;
103 
104 			region = read_aux_reg(ARC_REG_AUX_ICCM);
105 			cpu->iccm.base_addr = region & 0xF0000000;
106 		}
107 
108 		READ_BCR(ARC_REG_DCCM_BUILD, dccm);
109 		if (dccm.ver) {
110 			cpu->dccm.sz = 256 << dccm.sz0;
111 			if (dccm.sz0 == 0xF && dccm.sz1 > 0)
112 				cpu->dccm.sz <<= dccm.sz1;
113 
114 			region = read_aux_reg(ARC_REG_AUX_DCCM);
115 			cpu->dccm.base_addr = region & 0xF0000000;
116 		}
117 	}
118 }
119 
120 static void read_arc_build_cfg_regs(void)
121 {
122 	struct bcr_timer timer;
123 	struct bcr_generic bcr;
124 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
125 	const struct id_to_str *tbl;
126 	struct bcr_isa_arcv2 isa;
127 	struct bcr_actionpoint ap;
128 
129 	FIX_PTR(cpu);
130 
131 	READ_BCR(AUX_IDENTITY, cpu->core);
132 
133 	for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
134 		if (cpu->core.family == tbl->id) {
135 			cpu->details = tbl->str;
136 			break;
137 		}
138 	}
139 
140 	for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
141 		if ((cpu->core.family & 0xF4) == tbl->id)
142 			break;
143 	}
144 	cpu->name = tbl->str;
145 
146 	READ_BCR(ARC_REG_TIMERS_BCR, timer);
147 	cpu->extn.timer0 = timer.t0;
148 	cpu->extn.timer1 = timer.t1;
149 	cpu->extn.rtc = timer.rtc;
150 
151 	cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
152 
153 	READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
154 
155 	cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
156 	cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
157 	cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0;        /* 1,3 */
158 	cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
159 	cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
160 	cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
161 				IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
162 
163 	READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
164 
165 	/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
166 	read_decode_ccm_bcr(cpu);
167 
168 	read_decode_mmu_bcr();
169 	read_decode_cache_bcr();
170 
171 	if (is_isa_arcompact()) {
172 		struct bcr_fp_arcompact sp, dp;
173 		struct bcr_bpu_arcompact bpu;
174 
175 		READ_BCR(ARC_REG_FP_BCR, sp);
176 		READ_BCR(ARC_REG_DPFP_BCR, dp);
177 		cpu->extn.fpu_sp = sp.ver ? 1 : 0;
178 		cpu->extn.fpu_dp = dp.ver ? 1 : 0;
179 
180 		READ_BCR(ARC_REG_BPU_BCR, bpu);
181 		cpu->bpu.ver = bpu.ver;
182 		cpu->bpu.full = bpu.fam ? 1 : 0;
183 		if (bpu.ent) {
184 			cpu->bpu.num_cache = 256 << (bpu.ent - 1);
185 			cpu->bpu.num_pred = 256 << (bpu.ent - 1);
186 		}
187 	} else {
188 		struct bcr_fp_arcv2 spdp;
189 		struct bcr_bpu_arcv2 bpu;
190 
191 		READ_BCR(ARC_REG_FP_V2_BCR, spdp);
192 		cpu->extn.fpu_sp = spdp.sp ? 1 : 0;
193 		cpu->extn.fpu_dp = spdp.dp ? 1 : 0;
194 
195 		READ_BCR(ARC_REG_BPU_BCR, bpu);
196 		cpu->bpu.ver = bpu.ver;
197 		cpu->bpu.full = bpu.ft;
198 		cpu->bpu.num_cache = 256 << bpu.bce;
199 		cpu->bpu.num_pred = 2048 << bpu.pte;
200 		cpu->bpu.ret_stk = 4 << bpu.rse;
201 
202 		if (cpu->core.family >= 0x54) {
203 
204 			struct bcr_uarch_build_arcv2 uarch;
205 
206 			/*
207 			 * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
208 			 * dual issue only (HS4x). But next uarch rev (1:0)
209 			 * allows it be configured for single issue (HS3x)
210 			 * Ensure we fiddle with dual issue only on HS4x
211 			 */
212 			READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
213 
214 			if (uarch.prod == 4) {
215 				unsigned int exec_ctrl;
216 
217 				/* dual issue hardware always present */
218 				cpu->extn.dual = 1;
219 
220 				READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
221 
222 				/* dual issue hardware enabled ? */
223 				cpu->extn.dual_enb = !(exec_ctrl & 1);
224 
225 			}
226 		}
227 	}
228 
229 	READ_BCR(ARC_REG_AP_BCR, ap);
230 	if (ap.ver) {
231 		cpu->extn.ap_num = 2 << ap.num;
232 		cpu->extn.ap_full = !ap.min;
233 	}
234 
235 	READ_BCR(ARC_REG_SMART_BCR, bcr);
236 	cpu->extn.smart = bcr.ver ? 1 : 0;
237 
238 	READ_BCR(ARC_REG_RTT_BCR, bcr);
239 	cpu->extn.rtt = bcr.ver ? 1 : 0;
240 
241 	READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
242 
243 	/* some hacks for lack of feature BCR info in old ARC700 cores */
244 	if (is_isa_arcompact()) {
245 		if (!isa.ver)	/* ISA BCR absent, use Kconfig info */
246 			cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
247 		else {
248 			/* ARC700_BUILD only has 2 bits of isa info */
249 			struct bcr_generic bcr = *(struct bcr_generic *)&isa;
250 			cpu->isa.atomic = bcr.info & 1;
251 		}
252 
253 		cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
254 
255 		 /* there's no direct way to distinguish 750 vs. 770 */
256 		if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
257 			cpu->name = "ARC750";
258 	} else {
259 		cpu->isa = isa;
260 	}
261 }
262 
263 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
264 {
265 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
266 	struct bcr_identity *core = &cpu->core;
267 	int n = 0;
268 
269 	FIX_PTR(cpu);
270 
271 	n += scnprintf(buf + n, len - n,
272 		       "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
273 		       core->family, core->cpu_id, core->chip_id);
274 
275 	n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
276 		       cpu_id, cpu->name, cpu->details,
277 		       is_isa_arcompact() ? "ARCompact" : "ARCv2",
278 		       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
279 		       IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
280 
281 	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
282 		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
283 		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
284 		       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
285 		       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
286 
287 	n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
288 		       IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
289 		       IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
290 		       IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS));
291 
292 #if defined(__ARC_UNALIGNED__) && !defined(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS)
293 	/*
294 	 * gcc 7.3.1 (GNU 2018.03) onwards generate unaligned access by default
295 	 * but -mno-unaligned-access to disable that didn't work until gcc 8.2.1
296 	 * (GNU 2019.03). So landing here implies the interim period, when
297 	 * despite Kconfig being off, gcc is generating unaligned accesses which
298 	 * could bomb later on. So better to disallow such broken builds
299 	 */
300 	BUILD_BUG_ON_MSG(1, "gcc doesn't support -mno-unaligned-access");
301 #endif
302 
303 	n += scnprintf(buf + n, len - n, "\n\t\t: ");
304 
305 	if (cpu->extn_mpy.ver) {
306 		if (cpu->extn_mpy.ver <= 0x2) {	/* ARCompact */
307 			n += scnprintf(buf + n, len - n, "mpy ");
308 		} else {
309 			int opt = 2;	/* stock MPY/MPYH */
310 
311 			if (cpu->extn_mpy.dsp)	/* OPT 7-9 */
312 				opt = cpu->extn_mpy.dsp + 6;
313 
314 			n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
315 		}
316 	}
317 
318 	n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
319 		       IS_AVAIL1(cpu->isa.div_rem, "div_rem "),
320 		       IS_AVAIL1(cpu->extn.norm, "norm "),
321 		       IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
322 		       IS_AVAIL1(cpu->extn.swap, "swap "),
323 		       IS_AVAIL1(cpu->extn.minmax, "minmax "),
324 		       IS_AVAIL1(cpu->extn.crc, "crc "),
325 		       IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
326 
327 	if (cpu->bpu.ver)
328 		n += scnprintf(buf + n, len - n,
329 			      "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
330 			      IS_AVAIL1(cpu->bpu.full, "full"),
331 			      IS_AVAIL1(!cpu->bpu.full, "partial"),
332 			      cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
333 
334 	if (is_isa_arcv2()) {
335 		struct bcr_lpb lpb;
336 
337 		READ_BCR(ARC_REG_LPB_BUILD, lpb);
338 		if (lpb.ver) {
339 			unsigned int ctl;
340 			ctl = read_aux_reg(ARC_REG_LPB_CTRL);
341 
342 			n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
343 				lpb.entries,
344 				IS_DISABLED_RUN(!ctl));
345 		}
346 	}
347 
348 	n += scnprintf(buf + n, len - n, "\n");
349 	return buf;
350 }
351 
352 static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
353 {
354 	int n = 0;
355 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
356 
357 	FIX_PTR(cpu);
358 
359 	n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
360 
361 	if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
362 		n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
363 			       IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
364 			       IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
365 
366 	if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
367 		n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
368 			       IS_AVAIL1(cpu->extn.smart, "smaRT "),
369 			       IS_AVAIL1(cpu->extn.rtt, "RTT "));
370 		if (cpu->extn.ap_num) {
371 			n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
372 				       cpu->extn.ap_num,
373 				       cpu->extn.ap_full ? "full":"min");
374 		}
375 		n += scnprintf(buf + n, len - n, "\n");
376 	}
377 
378 	if (cpu->dccm.sz || cpu->iccm.sz)
379 		n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
380 			       cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
381 			       cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
382 
383 	if (is_isa_arcv2()) {
384 
385 		/* Error Protection: ECC/Parity */
386 		struct bcr_erp erp;
387 		READ_BCR(ARC_REG_ERP_BUILD, erp);
388 
389 		if (erp.ver) {
390 			struct  ctl_erp ctl;
391 			READ_BCR(ARC_REG_ERP_CTRL, ctl);
392 
393 			/* inverted bits: 0 means enabled */
394 			n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
395 				IS_AVAIL3(erp.ic,  !ctl.dpi, "IC "),
396 				IS_AVAIL3(erp.dc,  !ctl.dpd, "DC "),
397 				IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
398 		}
399 	}
400 
401 	n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
402 			EF_ARC_OSABI_CURRENT >> 8,
403 			EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
404 			"no-legacy-syscalls" : "64-bit data any register aligned");
405 
406 	return buf;
407 }
408 
409 static void arc_chk_core_config(void)
410 {
411 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
412 	int saved = 0, present = 0;
413 	char *opt_nm = NULL;
414 
415 	if (!cpu->extn.timer0)
416 		panic("Timer0 is not present!\n");
417 
418 	if (!cpu->extn.timer1)
419 		panic("Timer1 is not present!\n");
420 
421 #ifdef CONFIG_ARC_HAS_DCCM
422 	/*
423 	 * DCCM can be arbit placed in hardware.
424 	 * Make sure it's placement/sz matches what Linux is built with
425 	 */
426 	if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
427 		panic("Linux built with incorrect DCCM Base address\n");
428 
429 	if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
430 		panic("Linux built with incorrect DCCM Size\n");
431 #endif
432 
433 #ifdef CONFIG_ARC_HAS_ICCM
434 	if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
435 		panic("Linux built with incorrect ICCM Size\n");
436 #endif
437 
438 	/*
439 	 * FP hardware/software config sanity
440 	 * -If hardware present, kernel needs to save/restore FPU state
441 	 * -If not, it will crash trying to save/restore the non-existant regs
442 	 */
443 
444 	if (is_isa_arcompact()) {
445 		opt_nm = "CONFIG_ARC_FPU_SAVE_RESTORE";
446 		saved = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
447 
448 		/* only DPDP checked since SP has no arch visible regs */
449 		present = cpu->extn.fpu_dp;
450 	} else {
451 		opt_nm = "CONFIG_ARC_HAS_ACCL_REGS";
452 		saved = IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS);
453 
454 		/* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
455 		present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
456 	}
457 
458 	if (present && !saved)
459 		pr_warn("Enable %s for working apps\n", opt_nm);
460 	else if (!present && saved)
461 		panic("Disable %s, hardware NOT present\n", opt_nm);
462 }
463 
464 /*
465  * Initialize and setup the processor core
466  * This is called by all the CPUs thus should not do special case stuff
467  *    such as only for boot CPU etc
468  */
469 
470 void setup_processor(void)
471 {
472 	char str[512];
473 	int cpu_id = smp_processor_id();
474 
475 	read_arc_build_cfg_regs();
476 	arc_init_IRQ();
477 
478 	pr_info("%s", arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
479 
480 	arc_mmu_init();
481 	arc_cache_init();
482 
483 	pr_info("%s", arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
484 	pr_info("%s", arc_platform_smp_cpuinfo());
485 
486 	arc_chk_core_config();
487 }
488 
489 static inline bool uboot_arg_invalid(unsigned long addr)
490 {
491 	/*
492 	 * Check that it is a untranslated address (although MMU is not enabled
493 	 * yet, it being a high address ensures this is not by fluke)
494 	 */
495 	if (addr < PAGE_OFFSET)
496 		return true;
497 
498 	/* Check that address doesn't clobber resident kernel image */
499 	return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
500 }
501 
502 #define IGNORE_ARGS		"Ignore U-boot args: "
503 
504 /* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
505 #define UBOOT_TAG_NONE		0
506 #define UBOOT_TAG_CMDLINE	1
507 #define UBOOT_TAG_DTB		2
508 /* We always pass 0 as magic from U-boot */
509 #define UBOOT_MAGIC_VALUE	0
510 
511 void __init handle_uboot_args(void)
512 {
513 	bool use_embedded_dtb = true;
514 	bool append_cmdline = false;
515 
516 	/* check that we know this tag */
517 	if (uboot_tag != UBOOT_TAG_NONE &&
518 	    uboot_tag != UBOOT_TAG_CMDLINE &&
519 	    uboot_tag != UBOOT_TAG_DTB) {
520 		pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
521 		goto ignore_uboot_args;
522 	}
523 
524 	if (uboot_magic != UBOOT_MAGIC_VALUE) {
525 		pr_warn(IGNORE_ARGS "non zero uboot magic\n");
526 		goto ignore_uboot_args;
527 	}
528 
529 	if (uboot_tag != UBOOT_TAG_NONE &&
530             uboot_arg_invalid((unsigned long)uboot_arg)) {
531 		pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
532 		goto ignore_uboot_args;
533 	}
534 
535 	/* see if U-boot passed an external Device Tree blob */
536 	if (uboot_tag == UBOOT_TAG_DTB) {
537 		machine_desc = setup_machine_fdt((void *)uboot_arg);
538 
539 		/* external Device Tree blob is invalid - use embedded one */
540 		use_embedded_dtb = !machine_desc;
541 	}
542 
543 	if (uboot_tag == UBOOT_TAG_CMDLINE)
544 		append_cmdline = true;
545 
546 ignore_uboot_args:
547 
548 	if (use_embedded_dtb) {
549 		machine_desc = setup_machine_fdt(__dtb_start);
550 		if (!machine_desc)
551 			panic("Embedded DT invalid\n");
552 	}
553 
554 	/*
555 	 * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
556 	 * append processing can only happen after.
557 	 */
558 	if (append_cmdline) {
559 		/* Ensure a whitespace between the 2 cmdlines */
560 		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
561 		strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
562 	}
563 }
564 
565 void __init setup_arch(char **cmdline_p)
566 {
567 	handle_uboot_args();
568 
569 	/* Save unparsed command line copy for /proc/cmdline */
570 	*cmdline_p = boot_command_line;
571 
572 	/* To force early parsing of things like mem=xxx */
573 	parse_early_param();
574 
575 	/* Platform/board specific: e.g. early console registration */
576 	if (machine_desc->init_early)
577 		machine_desc->init_early();
578 
579 	smp_init_cpus();
580 
581 	setup_processor();
582 	setup_arch_memory();
583 
584 	/* copy flat DT out of .init and then unflatten it */
585 	unflatten_and_copy_device_tree();
586 
587 	/* Can be issue if someone passes cmd line arg "ro"
588 	 * But that is unlikely so keeping it as it is
589 	 */
590 	root_mountflags &= ~MS_RDONLY;
591 
592 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
593 	conswitchp = &dummy_con;
594 #endif
595 
596 	arc_unwind_init();
597 }
598 
599 /*
600  * Called from start_kernel() - boot CPU only
601  */
602 void __init time_init(void)
603 {
604 	of_clk_init(NULL);
605 	timer_probe();
606 }
607 
608 static int __init customize_machine(void)
609 {
610 	if (machine_desc->init_machine)
611 		machine_desc->init_machine();
612 
613 	return 0;
614 }
615 arch_initcall(customize_machine);
616 
617 static int __init init_late_machine(void)
618 {
619 	if (machine_desc->init_late)
620 		machine_desc->init_late();
621 
622 	return 0;
623 }
624 late_initcall(init_late_machine);
625 /*
626  *  Get CPU information for use by the procfs.
627  */
628 
629 #define cpu_to_ptr(c)	((void *)(0xFFFF0000 | (unsigned int)(c)))
630 #define ptr_to_cpu(p)	(~0xFFFF0000UL & (unsigned int)(p))
631 
632 static int show_cpuinfo(struct seq_file *m, void *v)
633 {
634 	char *str;
635 	int cpu_id = ptr_to_cpu(v);
636 	struct device *cpu_dev = get_cpu_device(cpu_id);
637 	struct clk *cpu_clk;
638 	unsigned long freq = 0;
639 
640 	if (!cpu_online(cpu_id)) {
641 		seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
642 		goto done;
643 	}
644 
645 	str = (char *)__get_free_page(GFP_KERNEL);
646 	if (!str)
647 		goto done;
648 
649 	seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
650 
651 	cpu_clk = clk_get(cpu_dev, NULL);
652 	if (IS_ERR(cpu_clk)) {
653 		seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
654 			   cpu_id);
655 	} else {
656 		freq = clk_get_rate(cpu_clk);
657 	}
658 	if (freq)
659 		seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
660 			   freq / 1000000, (freq / 10000) % 100);
661 
662 	seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
663 		   loops_per_jiffy / (500000 / HZ),
664 		   (loops_per_jiffy / (5000 / HZ)) % 100);
665 
666 	seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
667 	seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
668 	seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
669 	seq_printf(m, arc_platform_smp_cpuinfo());
670 
671 	free_page((unsigned long)str);
672 done:
673 	seq_printf(m, "\n");
674 
675 	return 0;
676 }
677 
678 static void *c_start(struct seq_file *m, loff_t *pos)
679 {
680 	/*
681 	 * Callback returns cpu-id to iterator for show routine, NULL to stop.
682 	 * However since NULL is also a valid cpu-id (0), we use a round-about
683 	 * way to pass it w/o having to kmalloc/free a 2 byte string.
684 	 * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
685 	 */
686 	return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
687 }
688 
689 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
690 {
691 	++*pos;
692 	return c_start(m, pos);
693 }
694 
695 static void c_stop(struct seq_file *m, void *v)
696 {
697 }
698 
699 const struct seq_operations cpuinfo_op = {
700 	.start	= c_start,
701 	.next	= c_next,
702 	.stop	= c_stop,
703 	.show	= show_cpuinfo
704 };
705 
706 static DEFINE_PER_CPU(struct cpu, cpu_topology);
707 
708 static int __init topology_init(void)
709 {
710 	int cpu;
711 
712 	for_each_present_cpu(cpu)
713 	    register_cpu(&per_cpu(cpu_topology, cpu), cpu);
714 
715 	return 0;
716 }
717 
718 subsys_initcall(topology_init);
719