1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PROCESSOR_H
3 #define _ASM_POWERPC_PROCESSOR_H
4 
5 /*
6  * Copyright (C) 2001 PPC 64 Team, IBM Corp
7  */
8 
9 #include <asm/reg.h>
10 
11 #ifdef CONFIG_VSX
12 #define TS_FPRWIDTH 2
13 
14 #ifdef __BIG_ENDIAN__
15 #define TS_FPROFFSET 0
16 #define TS_VSRLOWOFFSET 1
17 #else
18 #define TS_FPROFFSET 1
19 #define TS_VSRLOWOFFSET 0
20 #endif
21 
22 #else
23 #define TS_FPRWIDTH 1
24 #define TS_FPROFFSET 0
25 #endif
26 
27 #ifdef CONFIG_PPC64
28 /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
29 #define PPR_PRIORITY 3
30 #ifdef __ASSEMBLY__
31 #define DEFAULT_PPR (PPR_PRIORITY << 50)
32 #else
33 #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
34 #endif /* __ASSEMBLY__ */
35 #endif /* CONFIG_PPC64 */
36 
37 #ifndef __ASSEMBLY__
38 #include <linux/types.h>
39 #include <linux/thread_info.h>
40 #include <asm/ptrace.h>
41 #include <asm/hw_breakpoint.h>
42 
43 /* We do _not_ want to define new machine types at all, those must die
44  * in favor of using the device-tree
45  * -- BenH.
46  */
47 
48 /* PREP sub-platform types. Unused */
49 #define _PREP_Motorola	0x01	/* motorola prep */
50 #define _PREP_Firm	0x02	/* firmworks prep */
51 #define _PREP_IBM	0x00	/* ibm prep */
52 #define _PREP_Bull	0x03	/* bull prep */
53 
54 /* CHRP sub-platform types. These are arbitrary */
55 #define _CHRP_Motorola	0x04	/* motorola chrp, the cobra */
56 #define _CHRP_IBM	0x05	/* IBM chrp, the longtrail and longtrail 2 */
57 #define _CHRP_Pegasos	0x06	/* Genesi/bplan's Pegasos and Pegasos2 */
58 #define _CHRP_briq	0x07	/* TotalImpact's briQ */
59 
60 #if defined(__KERNEL__) && defined(CONFIG_PPC32)
61 
62 extern int _chrp_type;
63 
64 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
65 
66 /* Macros for adjusting thread priority (hardware multi-threading) */
67 #define HMT_very_low()   asm volatile("or 31,31,31   # very low priority")
68 #define HMT_low()	 asm volatile("or 1,1,1	     # low priority")
69 #define HMT_medium_low() asm volatile("or 6,6,6      # medium low priority")
70 #define HMT_medium()	 asm volatile("or 2,2,2	     # medium priority")
71 #define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority")
72 #define HMT_high()	 asm volatile("or 3,3,3	     # high priority")
73 
74 #ifdef __KERNEL__
75 
76 #ifdef CONFIG_PPC64
77 #include <asm/task_size_64.h>
78 #else
79 #include <asm/task_size_32.h>
80 #endif
81 
82 struct task_struct;
83 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
84 void release_thread(struct task_struct *);
85 
86 typedef struct {
87 	unsigned long seg;
88 } mm_segment_t;
89 
90 #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
91 #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
92 
93 /* FP and VSX 0-31 register set */
94 struct thread_fp_state {
95 	u64	fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
96 	u64	fpscr;		/* Floating point status */
97 };
98 
99 /* Complete AltiVec register set including VSCR */
100 struct thread_vr_state {
101 	vector128	vr[32] __attribute__((aligned(16)));
102 	vector128	vscr __attribute__((aligned(16)));
103 };
104 
105 struct debug_reg {
106 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
107 	/*
108 	 * The following help to manage the use of Debug Control Registers
109 	 * om the BookE platforms.
110 	 */
111 	uint32_t	dbcr0;
112 	uint32_t	dbcr1;
113 #ifdef CONFIG_BOOKE
114 	uint32_t	dbcr2;
115 #endif
116 	/*
117 	 * The stored value of the DBSR register will be the value at the
118 	 * last debug interrupt. This register can only be read from the
119 	 * user (will never be written to) and has value while helping to
120 	 * describe the reason for the last debug trap.  Torez
121 	 */
122 	uint32_t	dbsr;
123 	/*
124 	 * The following will contain addresses used by debug applications
125 	 * to help trace and trap on particular address locations.
126 	 * The bits in the Debug Control Registers above help define which
127 	 * of the following registers will contain valid data and/or addresses.
128 	 */
129 	unsigned long	iac1;
130 	unsigned long	iac2;
131 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
132 	unsigned long	iac3;
133 	unsigned long	iac4;
134 #endif
135 	unsigned long	dac1;
136 	unsigned long	dac2;
137 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
138 	unsigned long	dvc1;
139 	unsigned long	dvc2;
140 #endif
141 #endif
142 };
143 
144 struct thread_struct {
145 	unsigned long	ksp;		/* Kernel stack pointer */
146 
147 #ifdef CONFIG_PPC64
148 	unsigned long	ksp_vsid;
149 #endif
150 	struct pt_regs	*regs;		/* Pointer to saved register state */
151 	mm_segment_t	addr_limit;	/* for get_fs() validation */
152 #ifdef CONFIG_BOOKE
153 	/* BookE base exception scratch space; align on cacheline */
154 	unsigned long	normsave[8] ____cacheline_aligned;
155 #endif
156 #ifdef CONFIG_PPC32
157 	void		*pgdir;		/* root of page-table tree */
158 	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */
159 #ifdef CONFIG_PPC_RTAS
160 	unsigned long	rtas_sp;	/* stack pointer for when in RTAS */
161 #endif
162 #endif
163 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
164 	unsigned long	kuap;		/* opened segments for user access */
165 #endif
166 #ifdef CONFIG_VMAP_STACK
167 	unsigned long	srr0;
168 	unsigned long	srr1;
169 	unsigned long	dar;
170 	unsigned long	dsisr;
171 #ifdef CONFIG_PPC_BOOK3S_32
172 	unsigned long	r0, r3, r4, r5, r6, r8, r9, r11;
173 	unsigned long	lr, ctr;
174 #endif
175 #endif
176 	/* Debug Registers */
177 	struct debug_reg debug;
178 	struct thread_fp_state	fp_state;
179 	struct thread_fp_state	*fp_save_area;
180 	int		fpexc_mode;	/* floating-point exception mode */
181 	unsigned int	align_ctl;	/* alignment handling control */
182 #ifdef CONFIG_HAVE_HW_BREAKPOINT
183 	struct perf_event *ptrace_bps[HBP_NUM];
184 	/*
185 	 * Helps identify source of single-step exception and subsequent
186 	 * hw-breakpoint enablement
187 	 */
188 	struct perf_event *last_hit_ubp;
189 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
190 	struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
191 	unsigned long	trap_nr;	/* last trap # on this thread */
192 	u8 load_slb;			/* Ages out SLB preload cache entries */
193 	u8 load_fp;
194 #ifdef CONFIG_ALTIVEC
195 	u8 load_vec;
196 	struct thread_vr_state vr_state;
197 	struct thread_vr_state *vr_save_area;
198 	unsigned long	vrsave;
199 	int		used_vr;	/* set if process has used altivec */
200 #endif /* CONFIG_ALTIVEC */
201 #ifdef CONFIG_VSX
202 	/* VSR status */
203 	int		used_vsr;	/* set if process has used VSX */
204 #endif /* CONFIG_VSX */
205 #ifdef CONFIG_SPE
206 	unsigned long	evr[32];	/* upper 32-bits of SPE regs */
207 	u64		acc;		/* Accumulator */
208 	unsigned long	spefscr;	/* SPE & eFP status */
209 	unsigned long	spefscr_last;	/* SPEFSCR value on last prctl
210 					   call or trap return */
211 	int		used_spe;	/* set if process has used spe */
212 #endif /* CONFIG_SPE */
213 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
214 	u8	load_tm;
215 	u64		tm_tfhar;	/* Transaction fail handler addr */
216 	u64		tm_texasr;	/* Transaction exception & summary */
217 	u64		tm_tfiar;	/* Transaction fail instr address reg */
218 	struct pt_regs	ckpt_regs;	/* Checkpointed registers */
219 
220 	unsigned long	tm_tar;
221 	unsigned long	tm_ppr;
222 	unsigned long	tm_dscr;
223 
224 	/*
225 	 * Checkpointed FP and VSX 0-31 register set.
226 	 *
227 	 * When a transaction is active/signalled/scheduled etc., *regs is the
228 	 * most recent set of/speculated GPRs with ckpt_regs being the older
229 	 * checkpointed regs to which we roll back if transaction aborts.
230 	 *
231 	 * These are analogous to how ckpt_regs and pt_regs work
232 	 */
233 	struct thread_fp_state ckfp_state; /* Checkpointed FP state */
234 	struct thread_vr_state ckvr_state; /* Checkpointed VR state */
235 	unsigned long	ckvrsave; /* Checkpointed VRSAVE */
236 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
237 #ifdef CONFIG_PPC_MEM_KEYS
238 	unsigned long	amr;
239 	unsigned long	iamr;
240 	unsigned long	uamor;
241 #endif
242 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
243 	void*		kvm_shadow_vcpu; /* KVM internal data */
244 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
245 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
246 	struct kvm_vcpu	*kvm_vcpu;
247 #endif
248 #ifdef CONFIG_PPC64
249 	unsigned long	dscr;
250 	unsigned long	fscr;
251 	/*
252 	 * This member element dscr_inherit indicates that the process
253 	 * has explicitly attempted and changed the DSCR register value
254 	 * for itself. Hence kernel wont use the default CPU DSCR value
255 	 * contained in the PACA structure anymore during process context
256 	 * switch. Once this variable is set, this behaviour will also be
257 	 * inherited to all the children of this process from that point
258 	 * onwards.
259 	 */
260 	int		dscr_inherit;
261 	unsigned long	tidr;
262 #endif
263 #ifdef CONFIG_PPC_BOOK3S_64
264 	unsigned long	tar;
265 	unsigned long	ebbrr;
266 	unsigned long	ebbhr;
267 	unsigned long	bescr;
268 	unsigned long	siar;
269 	unsigned long	sdar;
270 	unsigned long	sier;
271 	unsigned long	mmcr2;
272 	unsigned 	mmcr0;
273 
274 	unsigned 	used_ebb;
275 #endif
276 };
277 
278 #define ARCH_MIN_TASKALIGN 16
279 
280 #define INIT_SP		(sizeof(init_stack) + (unsigned long) &init_stack)
281 #define INIT_SP_LIMIT	((unsigned long)&init_stack)
282 
283 #ifdef CONFIG_SPE
284 #define SPEFSCR_INIT \
285 	.spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
286 	.spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
287 #else
288 #define SPEFSCR_INIT
289 #endif
290 
291 #ifdef CONFIG_PPC32
292 #define INIT_THREAD { \
293 	.ksp = INIT_SP, \
294 	.ksp_limit = INIT_SP_LIMIT, \
295 	.addr_limit = KERNEL_DS, \
296 	.pgdir = swapper_pg_dir, \
297 	.fpexc_mode = MSR_FE0 | MSR_FE1, \
298 	SPEFSCR_INIT \
299 }
300 #else
301 #define INIT_THREAD  { \
302 	.ksp = INIT_SP, \
303 	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
304 	.addr_limit = KERNEL_DS, \
305 	.fpexc_mode = 0, \
306 	.fscr = FSCR_TAR | FSCR_EBB \
307 }
308 #endif
309 
310 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.regs)
311 
312 unsigned long get_wchan(struct task_struct *p);
313 
314 #define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
315 #define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
316 
317 /* Get/set floating-point exception mode */
318 #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
319 #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
320 
321 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
322 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
323 
324 #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
325 #define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
326 
327 extern int get_endian(struct task_struct *tsk, unsigned long adr);
328 extern int set_endian(struct task_struct *tsk, unsigned int val);
329 
330 #define GET_UNALIGN_CTL(tsk, adr)	get_unalign_ctl((tsk), (adr))
331 #define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val))
332 
333 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
334 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
335 
336 extern void load_fp_state(struct thread_fp_state *fp);
337 extern void store_fp_state(struct thread_fp_state *fp);
338 extern void load_vr_state(struct thread_vr_state *vr);
339 extern void store_vr_state(struct thread_vr_state *vr);
340 
341 static inline unsigned int __unpack_fe01(unsigned long msr_bits)
342 {
343 	return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
344 }
345 
346 static inline unsigned long __pack_fe01(unsigned int fpmode)
347 {
348 	return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
349 }
350 
351 #ifdef CONFIG_PPC64
352 #define cpu_relax()	do { HMT_low(); HMT_medium(); barrier(); } while (0)
353 
354 #define spin_begin()	HMT_low()
355 
356 #define spin_cpu_relax()	barrier()
357 
358 #define spin_end()	HMT_medium()
359 
360 #define spin_until_cond(cond)					\
361 do {								\
362 	if (unlikely(!(cond))) {				\
363 		spin_begin();					\
364 		do {						\
365 			spin_cpu_relax();			\
366 		} while (!(cond));				\
367 		spin_end();					\
368 	}							\
369 } while (0)
370 
371 #else
372 #define cpu_relax()	barrier()
373 #endif
374 
375 /* Check that a certain kernel stack pointer is valid in task_struct p */
376 int validate_sp(unsigned long sp, struct task_struct *p,
377                        unsigned long nbytes);
378 
379 /*
380  * Prefetch macros.
381  */
382 #define ARCH_HAS_PREFETCH
383 #define ARCH_HAS_PREFETCHW
384 #define ARCH_HAS_SPINLOCK_PREFETCH
385 
386 static inline void prefetch(const void *x)
387 {
388 	if (unlikely(!x))
389 		return;
390 
391 	__asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
392 }
393 
394 static inline void prefetchw(const void *x)
395 {
396 	if (unlikely(!x))
397 		return;
398 
399 	__asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
400 }
401 
402 #define spin_lock_prefetch(x)	prefetchw(x)
403 
404 #define HAVE_ARCH_PICK_MMAP_LAYOUT
405 
406 #ifdef CONFIG_PPC64
407 static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
408 {
409 	if (is_32)
410 		return sp & 0x0ffffffffUL;
411 	return sp;
412 }
413 #else
414 static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
415 {
416 	return sp;
417 }
418 #endif
419 
420 /* asm stubs */
421 extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
422 extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
423 extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
424 #ifdef CONFIG_PPC_970_NAP
425 extern void power4_idle_nap(void);
426 #endif
427 
428 extern unsigned long cpuidle_disable;
429 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
430 
431 extern int powersave_nap;	/* set if nap mode can be used in idle loop */
432 
433 extern void power7_idle_type(unsigned long type);
434 extern void power9_idle_type(unsigned long stop_psscr_val,
435 			      unsigned long stop_psscr_mask);
436 
437 extern void flush_instruction_cache(void);
438 extern void hard_reset_now(void);
439 extern void poweroff_now(void);
440 extern int fix_alignment(struct pt_regs *);
441 extern void cvt_fd(float *from, double *to);
442 extern void cvt_df(double *from, float *to);
443 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
444 
445 #ifdef CONFIG_PPC64
446 /*
447  * We handle most unaligned accesses in hardware. On the other hand
448  * unaligned DMA can be very expensive on some ppc64 IO chips (it does
449  * powers of 2 writes until it reaches sufficient alignment).
450  *
451  * Based on this we disable the IP header alignment in network drivers.
452  */
453 #define NET_IP_ALIGN	0
454 #endif
455 
456 #endif /* __KERNEL__ */
457 #endif /* __ASSEMBLY__ */
458 #endif /* _ASM_POWERPC_PROCESSOR_H */
459