xref: /linux-6.15/include/linux/ptrace.h (revision 02bf6cc7)
1 #ifndef _LINUX_PTRACE_H
2 #define _LINUX_PTRACE_H
3 /* ptrace.h */
4 /* structs and defines to help the user use the ptrace system call. */
5 
6 /* has the defines to get at the registers. */
7 
8 #define PTRACE_TRACEME		   0
9 #define PTRACE_PEEKTEXT		   1
10 #define PTRACE_PEEKDATA		   2
11 #define PTRACE_PEEKUSR		   3
12 #define PTRACE_POKETEXT		   4
13 #define PTRACE_POKEDATA		   5
14 #define PTRACE_POKEUSR		   6
15 #define PTRACE_CONT		   7
16 #define PTRACE_KILL		   8
17 #define PTRACE_SINGLESTEP	   9
18 
19 #define PTRACE_ATTACH		  16
20 #define PTRACE_DETACH		  17
21 
22 #define PTRACE_SYSCALL		  24
23 
24 /* 0x4200-0x4300 are reserved for architecture-independent additions.  */
25 #define PTRACE_SETOPTIONS	0x4200
26 #define PTRACE_GETEVENTMSG	0x4201
27 #define PTRACE_GETSIGINFO	0x4202
28 #define PTRACE_SETSIGINFO	0x4203
29 
30 /* options set using PTRACE_SETOPTIONS */
31 #define PTRACE_O_TRACESYSGOOD	0x00000001
32 #define PTRACE_O_TRACEFORK	0x00000002
33 #define PTRACE_O_TRACEVFORK	0x00000004
34 #define PTRACE_O_TRACECLONE	0x00000008
35 #define PTRACE_O_TRACEEXEC	0x00000010
36 #define PTRACE_O_TRACEVFORKDONE	0x00000020
37 #define PTRACE_O_TRACEEXIT	0x00000040
38 
39 #define PTRACE_O_MASK		0x0000007f
40 
41 /* Wait extended result codes for the above trace options.  */
42 #define PTRACE_EVENT_FORK	1
43 #define PTRACE_EVENT_VFORK	2
44 #define PTRACE_EVENT_CLONE	3
45 #define PTRACE_EVENT_EXEC	4
46 #define PTRACE_EVENT_VFORK_DONE	5
47 #define PTRACE_EVENT_EXIT	6
48 
49 #include <asm/ptrace.h>
50 
51 #ifdef __KERNEL__
52 /*
53  * Ptrace flags
54  *
55  * The owner ship rules for task->ptrace which holds the ptrace
56  * flags is simple.  When a task is running it owns it's task->ptrace
57  * flags.  When the a task is stopped the ptracer owns task->ptrace.
58  */
59 
60 #define PT_PTRACED	0x00000001
61 #define PT_DTRACE	0x00000002	/* delayed trace (used on m68k, i386) */
62 #define PT_TRACESYSGOOD	0x00000004
63 #define PT_PTRACE_CAP	0x00000008	/* ptracer can follow suid-exec */
64 #define PT_TRACE_FORK	0x00000010
65 #define PT_TRACE_VFORK	0x00000020
66 #define PT_TRACE_CLONE	0x00000040
67 #define PT_TRACE_EXEC	0x00000080
68 #define PT_TRACE_VFORK_DONE	0x00000100
69 #define PT_TRACE_EXIT	0x00000200
70 
71 #define PT_TRACE_MASK	0x000003f4
72 
73 /* single stepping state bits (used on ARM and PA-RISC) */
74 #define PT_SINGLESTEP_BIT	31
75 #define PT_SINGLESTEP		(1<<PT_SINGLESTEP_BIT)
76 #define PT_BLOCKSTEP_BIT	30
77 #define PT_BLOCKSTEP		(1<<PT_BLOCKSTEP_BIT)
78 
79 #include <linux/compiler.h>		/* For unlikely.  */
80 #include <linux/sched.h>		/* For struct task_struct.  */
81 
82 
83 extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
84 extern int ptrace_traceme(void);
85 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
86 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
87 extern int ptrace_attach(struct task_struct *tsk);
88 extern int ptrace_detach(struct task_struct *, unsigned int);
89 extern void ptrace_disable(struct task_struct *);
90 extern int ptrace_check_attach(struct task_struct *task, int kill);
91 extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
92 extern void ptrace_notify(int exit_code);
93 extern void __ptrace_link(struct task_struct *child,
94 			  struct task_struct *new_parent);
95 extern void __ptrace_unlink(struct task_struct *child);
96 extern void exit_ptrace(struct task_struct *tracer);
97 #define PTRACE_MODE_READ   1
98 #define PTRACE_MODE_ATTACH 2
99 /* Returns 0 on success, -errno on denial. */
100 extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
101 /* Returns true on success, false on denial. */
102 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
103 
104 static inline int ptrace_reparented(struct task_struct *child)
105 {
106 	return child->real_parent != child->parent;
107 }
108 
109 static inline void ptrace_unlink(struct task_struct *child)
110 {
111 	if (unlikely(child->ptrace))
112 		__ptrace_unlink(child);
113 }
114 
115 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
116 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
117 
118 /**
119  * task_ptrace - return %PT_* flags that apply to a task
120  * @task:	pointer to &task_struct in question
121  *
122  * Returns the %PT_* flags that apply to @task.
123  */
124 static inline int task_ptrace(struct task_struct *task)
125 {
126 	return task->ptrace;
127 }
128 
129 /**
130  * ptrace_event - possibly stop for a ptrace event notification
131  * @mask:	%PT_* bit to check in @current->ptrace
132  * @event:	%PTRACE_EVENT_* value to report if @mask is set
133  * @message:	value for %PTRACE_GETEVENTMSG to return
134  *
135  * This checks the @mask bit to see if ptrace wants stops for this event.
136  * If so we stop, reporting @event and @message to the ptrace parent.
137  *
138  * Returns nonzero if we did a ptrace notification, zero if not.
139  *
140  * Called without locks.
141  */
142 static inline int ptrace_event(int mask, int event, unsigned long message)
143 {
144 	if (mask && likely(!(current->ptrace & mask)))
145 		return 0;
146 	current->ptrace_message = message;
147 	ptrace_notify((event << 8) | SIGTRAP);
148 	return 1;
149 }
150 
151 /**
152  * ptrace_init_task - initialize ptrace state for a new child
153  * @child:		new child task
154  * @ptrace:		true if child should be ptrace'd by parent's tracer
155  *
156  * This is called immediately after adding @child to its parent's children
157  * list.  @ptrace is false in the normal case, and true to ptrace @child.
158  *
159  * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
160  */
161 static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
162 {
163 	INIT_LIST_HEAD(&child->ptrace_entry);
164 	INIT_LIST_HEAD(&child->ptraced);
165 	child->parent = child->real_parent;
166 	child->ptrace = 0;
167 	if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
168 		child->ptrace = current->ptrace;
169 		__ptrace_link(child, current->parent);
170 	}
171 }
172 
173 /**
174  * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
175  * @task:	task in %EXIT_DEAD state
176  *
177  * Called with write_lock(&tasklist_lock) held.
178  */
179 static inline void ptrace_release_task(struct task_struct *task)
180 {
181 	BUG_ON(!list_empty(&task->ptraced));
182 	ptrace_unlink(task);
183 	BUG_ON(!list_empty(&task->ptrace_entry));
184 }
185 
186 #ifndef force_successful_syscall_return
187 /*
188  * System call handlers that, upon successful completion, need to return a
189  * negative value should call force_successful_syscall_return() right before
190  * returning.  On architectures where the syscall convention provides for a
191  * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
192  * others), this macro can be used to ensure that the error flag will not get
193  * set.  On architectures which do not support a separate error flag, the macro
194  * is a no-op and the spurious error condition needs to be filtered out by some
195  * other means (e.g., in user-level, by passing an extra argument to the
196  * syscall handler, or something along those lines).
197  */
198 #define force_successful_syscall_return() do { } while (0)
199 #endif
200 
201 /*
202  * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
203  *
204  * These do-nothing inlines are used when the arch does not
205  * implement single-step.  The kerneldoc comments are here
206  * to document the interface for all arch definitions.
207  */
208 
209 #ifndef arch_has_single_step
210 /**
211  * arch_has_single_step - does this CPU support user-mode single-step?
212  *
213  * If this is defined, then there must be function declarations or
214  * inlines for user_enable_single_step() and user_disable_single_step().
215  * arch_has_single_step() should evaluate to nonzero iff the machine
216  * supports instruction single-step for user mode.
217  * It can be a constant or it can test a CPU feature bit.
218  */
219 #define arch_has_single_step()		(0)
220 
221 /**
222  * user_enable_single_step - single-step in user-mode task
223  * @task: either current or a task stopped in %TASK_TRACED
224  *
225  * This can only be called when arch_has_single_step() has returned nonzero.
226  * Set @task so that when it returns to user mode, it will trap after the
227  * next single instruction executes.  If arch_has_block_step() is defined,
228  * this must clear the effects of user_enable_block_step() too.
229  */
230 static inline void user_enable_single_step(struct task_struct *task)
231 {
232 	BUG();			/* This can never be called.  */
233 }
234 
235 /**
236  * user_disable_single_step - cancel user-mode single-step
237  * @task: either current or a task stopped in %TASK_TRACED
238  *
239  * Clear @task of the effects of user_enable_single_step() and
240  * user_enable_block_step().  This can be called whether or not either
241  * of those was ever called on @task, and even if arch_has_single_step()
242  * returned zero.
243  */
244 static inline void user_disable_single_step(struct task_struct *task)
245 {
246 }
247 #endif	/* arch_has_single_step */
248 
249 #ifndef arch_has_block_step
250 /**
251  * arch_has_block_step - does this CPU support user-mode block-step?
252  *
253  * If this is defined, then there must be a function declaration or inline
254  * for user_enable_block_step(), and arch_has_single_step() must be defined
255  * too.  arch_has_block_step() should evaluate to nonzero iff the machine
256  * supports step-until-branch for user mode.  It can be a constant or it
257  * can test a CPU feature bit.
258  */
259 #define arch_has_block_step()		(0)
260 
261 /**
262  * user_enable_block_step - step until branch in user-mode task
263  * @task: either current or a task stopped in %TASK_TRACED
264  *
265  * This can only be called when arch_has_block_step() has returned nonzero,
266  * and will never be called when single-instruction stepping is being used.
267  * Set @task so that when it returns to user mode, it will trap after the
268  * next branch or trap taken.
269  */
270 static inline void user_enable_block_step(struct task_struct *task)
271 {
272 	BUG();			/* This can never be called.  */
273 }
274 #endif	/* arch_has_block_step */
275 
276 #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
277 extern void user_single_step_siginfo(struct task_struct *tsk,
278 				struct pt_regs *regs, siginfo_t *info);
279 #else
280 static inline void user_single_step_siginfo(struct task_struct *tsk,
281 				struct pt_regs *regs, siginfo_t *info)
282 {
283 	memset(info, 0, sizeof(*info));
284 	info->si_signo = SIGTRAP;
285 }
286 #endif
287 
288 #ifndef arch_ptrace_stop_needed
289 /**
290  * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
291  * @code:	current->exit_code value ptrace will stop with
292  * @info:	siginfo_t pointer (or %NULL) for signal ptrace will stop with
293  *
294  * This is called with the siglock held, to decide whether or not it's
295  * necessary to release the siglock and call arch_ptrace_stop() with the
296  * same @code and @info arguments.  It can be defined to a constant if
297  * arch_ptrace_stop() is never required, or always is.  On machines where
298  * this makes sense, it should be defined to a quick test to optimize out
299  * calling arch_ptrace_stop() when it would be superfluous.  For example,
300  * if the thread has not been back to user mode since the last stop, the
301  * thread state might indicate that nothing needs to be done.
302  */
303 #define arch_ptrace_stop_needed(code, info)	(0)
304 #endif
305 
306 #ifndef arch_ptrace_stop
307 /**
308  * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
309  * @code:	current->exit_code value ptrace will stop with
310  * @info:	siginfo_t pointer (or %NULL) for signal ptrace will stop with
311  *
312  * This is called with no locks held when arch_ptrace_stop_needed() has
313  * just returned nonzero.  It is allowed to block, e.g. for user memory
314  * access.  The arch can have machine-specific work to be done before
315  * ptrace stops.  On ia64, register backing store gets written back to user
316  * memory here.  Since this can be costly (requires dropping the siglock),
317  * we only do it when the arch requires it for this particular stop, as
318  * indicated by arch_ptrace_stop_needed().
319  */
320 #define arch_ptrace_stop(code, info)		do { } while (0)
321 #endif
322 
323 #ifndef arch_ptrace_untrace
324 /*
325  * Do machine-specific work before untracing child.
326  *
327  * This is called for a normal detach as well as from ptrace_exit()
328  * when the tracing task dies.
329  *
330  * Called with write_lock(&tasklist_lock) held.
331  */
332 #define arch_ptrace_untrace(task)		do { } while (0)
333 #endif
334 
335 extern int task_current_syscall(struct task_struct *target, long *callno,
336 				unsigned long args[6], unsigned int maxargs,
337 				unsigned long *sp, unsigned long *pc);
338 
339 #endif
340 
341 #endif
342