15a0015d6SChris Zankel /*
25a0015d6SChris Zankel * arch/xtensa/kernel/process.c
35a0015d6SChris Zankel *
45a0015d6SChris Zankel * Xtensa Processor version.
55a0015d6SChris Zankel *
65a0015d6SChris Zankel * This file is subject to the terms and conditions of the GNU General Public
75a0015d6SChris Zankel * License. See the file "COPYING" in the main directory of this archive
85a0015d6SChris Zankel * for more details.
95a0015d6SChris Zankel *
105a0015d6SChris Zankel * Copyright (C) 2001 - 2005 Tensilica Inc.
115a0015d6SChris Zankel *
125a0015d6SChris Zankel * Joe Taylor <[email protected], [email protected]>
135a0015d6SChris Zankel * Chris Zankel <[email protected]>
145a0015d6SChris Zankel * Marc Gauthier <[email protected], [email protected]>
155a0015d6SChris Zankel * Kevin Chea
165a0015d6SChris Zankel */
175a0015d6SChris Zankel
185a0015d6SChris Zankel #include <linux/errno.h>
195a0015d6SChris Zankel #include <linux/sched.h>
20b17b0153SIngo Molnar #include <linux/sched/debug.h>
2129930025SIngo Molnar #include <linux/sched/task.h>
2268db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
235a0015d6SChris Zankel #include <linux/kernel.h>
245a0015d6SChris Zankel #include <linux/mm.h>
255a0015d6SChris Zankel #include <linux/smp.h>
265a0015d6SChris Zankel #include <linux/stddef.h>
275a0015d6SChris Zankel #include <linux/unistd.h>
285a0015d6SChris Zankel #include <linux/ptrace.h>
295a0015d6SChris Zankel #include <linux/elf.h>
30c91e02bdSMax Filippov #include <linux/hw_breakpoint.h>
315a0015d6SChris Zankel #include <linux/init.h>
325a0015d6SChris Zankel #include <linux/prctl.h>
335a0015d6SChris Zankel #include <linux/init_task.h>
345a0015d6SChris Zankel #include <linux/module.h>
355a0015d6SChris Zankel #include <linux/mqueue.h>
3673089cbfSChris Zankel #include <linux/fs.h>
375a0e3ad6STejun Heo #include <linux/slab.h>
3811ad47a0SFrederic Weisbecker #include <linux/rcupdate.h>
395a0015d6SChris Zankel
407c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
415a0015d6SChris Zankel #include <asm/io.h>
425a0015d6SChris Zankel #include <asm/processor.h>
435a0015d6SChris Zankel #include <asm/platform.h>
445a0015d6SChris Zankel #include <asm/mmu.h>
455a0015d6SChris Zankel #include <asm/irq.h>
4660063497SArun Sharma #include <linux/atomic.h>
470013a854SSam Ravnborg #include <asm/asm-offsets.h>
48173d6681SChris Zankel #include <asm/regs.h>
49c91e02bdSMax Filippov #include <asm/hw_breakpoint.h>
50*0e60f0b7SMax Filippov #include <asm/sections.h>
5111e969bcSMax Filippov #include <asm/traps.h>
525a0015d6SChris Zankel
535a0015d6SChris Zankel extern void ret_from_fork(void);
543306a726SMax Filippov extern void ret_from_kernel_thread(void);
555a0015d6SChris Zankel
5647f3fc94SAdrian Bunk void (*pm_power_off)(void) = NULL;
5747f3fc94SAdrian Bunk EXPORT_SYMBOL(pm_power_off);
5847f3fc94SAdrian Bunk
595a0015d6SChris Zankel
60050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
6140d1a07bSMax Filippov #include <linux/stackprotector.h>
6240d1a07bSMax Filippov unsigned long __stack_chk_guard __read_mostly;
6340d1a07bSMax Filippov EXPORT_SYMBOL(__stack_chk_guard);
6440d1a07bSMax Filippov #endif
6540d1a07bSMax Filippov
66c658eac6SChris Zankel #if XTENSA_HAVE_COPROCESSORS
67c658eac6SChris Zankel
local_coprocessors_flush_release_all(void)6811e969bcSMax Filippov void local_coprocessors_flush_release_all(void)
69c658eac6SChris Zankel {
7011e969bcSMax Filippov struct thread_info **coprocessor_owner;
7111e969bcSMax Filippov struct thread_info *unique_owner[XCHAL_CP_MAX];
7211e969bcSMax Filippov int n = 0;
7311e969bcSMax Filippov int i, j;
7411e969bcSMax Filippov
7511e969bcSMax Filippov coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
7611e969bcSMax Filippov xtensa_set_sr(XCHAL_CP_MASK, cpenable);
7711e969bcSMax Filippov
7811e969bcSMax Filippov for (i = 0; i < XCHAL_CP_MAX; i++) {
7911e969bcSMax Filippov struct thread_info *ti = coprocessor_owner[i];
8011e969bcSMax Filippov
8111e969bcSMax Filippov if (ti) {
8211e969bcSMax Filippov coprocessor_flush(ti, i);
8311e969bcSMax Filippov
8411e969bcSMax Filippov for (j = 0; j < n; j++)
8511e969bcSMax Filippov if (unique_owner[j] == ti)
8611e969bcSMax Filippov break;
8711e969bcSMax Filippov if (j == n)
8811e969bcSMax Filippov unique_owner[n++] = ti;
8911e969bcSMax Filippov
9011e969bcSMax Filippov coprocessor_owner[i] = NULL;
9111e969bcSMax Filippov }
9211e969bcSMax Filippov }
9311e969bcSMax Filippov for (i = 0; i < n; i++) {
9411e969bcSMax Filippov /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
9511e969bcSMax Filippov smp_wmb();
9611e969bcSMax Filippov unique_owner[i]->cpenable = 0;
9711e969bcSMax Filippov }
9811e969bcSMax Filippov xtensa_set_sr(0, cpenable);
9911e969bcSMax Filippov }
10011e969bcSMax Filippov
local_coprocessor_release_all(void * info)10111e969bcSMax Filippov static void local_coprocessor_release_all(void *info)
10211e969bcSMax Filippov {
10311e969bcSMax Filippov struct thread_info *ti = info;
10411e969bcSMax Filippov struct thread_info **coprocessor_owner;
105c658eac6SChris Zankel int i;
106c658eac6SChris Zankel
10711e969bcSMax Filippov coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
108c658eac6SChris Zankel
109c658eac6SChris Zankel /* Walk through all cp owners and release it for the requested one. */
110c658eac6SChris Zankel
111c658eac6SChris Zankel for (i = 0; i < XCHAL_CP_MAX; i++) {
11211e969bcSMax Filippov if (coprocessor_owner[i] == ti)
11311e969bcSMax Filippov coprocessor_owner[i] = NULL;
114c658eac6SChris Zankel }
11511e969bcSMax Filippov /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
11611e969bcSMax Filippov smp_wmb();
11711e969bcSMax Filippov ti->cpenable = 0;
118be38e4f2SMax Filippov if (ti == current_thread_info())
119cad6fadeSMax Filippov xtensa_set_sr(0, cpenable);
12011e969bcSMax Filippov }
121c658eac6SChris Zankel
coprocessor_release_all(struct thread_info * ti)12211e969bcSMax Filippov void coprocessor_release_all(struct thread_info *ti)
12311e969bcSMax Filippov {
12411e969bcSMax Filippov if (ti->cpenable) {
12511e969bcSMax Filippov /* pairs with memw (2) in fast_coprocessor */
12611e969bcSMax Filippov smp_rmb();
12711e969bcSMax Filippov smp_call_function_single(ti->cp_owner_cpu,
12811e969bcSMax Filippov local_coprocessor_release_all,
12911e969bcSMax Filippov ti, true);
13011e969bcSMax Filippov }
13111e969bcSMax Filippov }
13211e969bcSMax Filippov
local_coprocessor_flush_all(void * info)13311e969bcSMax Filippov static void local_coprocessor_flush_all(void *info)
13411e969bcSMax Filippov {
13511e969bcSMax Filippov struct thread_info *ti = info;
13611e969bcSMax Filippov struct thread_info **coprocessor_owner;
13711e969bcSMax Filippov unsigned long old_cpenable;
13811e969bcSMax Filippov int i;
13911e969bcSMax Filippov
14011e969bcSMax Filippov coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
14111e969bcSMax Filippov old_cpenable = xtensa_xsr(ti->cpenable, cpenable);
14211e969bcSMax Filippov
14311e969bcSMax Filippov for (i = 0; i < XCHAL_CP_MAX; i++) {
14411e969bcSMax Filippov if (coprocessor_owner[i] == ti)
14511e969bcSMax Filippov coprocessor_flush(ti, i);
14611e969bcSMax Filippov }
14711e969bcSMax Filippov xtensa_set_sr(old_cpenable, cpenable);
148c658eac6SChris Zankel }
149c658eac6SChris Zankel
coprocessor_flush_all(struct thread_info * ti)150c658eac6SChris Zankel void coprocessor_flush_all(struct thread_info *ti)
151c658eac6SChris Zankel {
15211e969bcSMax Filippov if (ti->cpenable) {
15311e969bcSMax Filippov /* pairs with memw (2) in fast_coprocessor */
15411e969bcSMax Filippov smp_rmb();
15511e969bcSMax Filippov smp_call_function_single(ti->cp_owner_cpu,
15611e969bcSMax Filippov local_coprocessor_flush_all,
15711e969bcSMax Filippov ti, true);
158c658eac6SChris Zankel }
15911e969bcSMax Filippov }
160c658eac6SChris Zankel
local_coprocessor_flush_release_all(void * info)16111e969bcSMax Filippov static void local_coprocessor_flush_release_all(void *info)
16211e969bcSMax Filippov {
16311e969bcSMax Filippov local_coprocessor_flush_all(info);
16411e969bcSMax Filippov local_coprocessor_release_all(info);
16511e969bcSMax Filippov }
16611e969bcSMax Filippov
coprocessor_flush_release_all(struct thread_info * ti)16711e969bcSMax Filippov void coprocessor_flush_release_all(struct thread_info *ti)
16811e969bcSMax Filippov {
16911e969bcSMax Filippov if (ti->cpenable) {
17011e969bcSMax Filippov /* pairs with memw (2) in fast_coprocessor */
17111e969bcSMax Filippov smp_rmb();
17211e969bcSMax Filippov smp_call_function_single(ti->cp_owner_cpu,
17311e969bcSMax Filippov local_coprocessor_flush_release_all,
17411e969bcSMax Filippov ti, true);
17511e969bcSMax Filippov }
176c658eac6SChris Zankel }
177c658eac6SChris Zankel
178c658eac6SChris Zankel #endif
179c658eac6SChris Zankel
180c658eac6SChris Zankel
1815a0015d6SChris Zankel /*
1825a0015d6SChris Zankel * Powermanagement idle function, if any is provided by the platform.
1835a0015d6SChris Zankel */
arch_cpu_idle(void)184f4e2e9a4SThomas Gleixner void arch_cpu_idle(void)
1855a0015d6SChris Zankel {
1865a0015d6SChris Zankel platform_idle();
18789b30987SPeter Zijlstra raw_local_irq_disable();
1885a0015d6SChris Zankel }
1895a0015d6SChris Zankel
1905a0015d6SChris Zankel /*
191c658eac6SChris Zankel * This is called when the thread calls exit().
1925a0015d6SChris Zankel */
exit_thread(struct task_struct * tsk)193e6464694SJiri Slaby void exit_thread(struct task_struct *tsk)
1945a0015d6SChris Zankel {
195c658eac6SChris Zankel #if XTENSA_HAVE_COPROCESSORS
196e6464694SJiri Slaby coprocessor_release_all(task_thread_info(tsk));
197c658eac6SChris Zankel #endif
1985a0015d6SChris Zankel }
1995a0015d6SChris Zankel
200c658eac6SChris Zankel /*
201c658eac6SChris Zankel * Flush thread state. This is called when a thread does an execve()
202c658eac6SChris Zankel * Note that we flush coprocessor registers for the case execve fails.
203c658eac6SChris Zankel */
flush_thread(void)2045a0015d6SChris Zankel void flush_thread(void)
2055a0015d6SChris Zankel {
206c658eac6SChris Zankel #if XTENSA_HAVE_COPROCESSORS
207c658eac6SChris Zankel struct thread_info *ti = current_thread_info();
20811e969bcSMax Filippov coprocessor_flush_release_all(ti);
209c658eac6SChris Zankel #endif
210c91e02bdSMax Filippov flush_ptrace_hw_breakpoint(current);
211c658eac6SChris Zankel }
212c658eac6SChris Zankel
213c658eac6SChris Zankel /*
21455ccf3feSSuresh Siddha * this gets called so that we can store coprocessor state into memory and
21555ccf3feSSuresh Siddha * copy the current task into the new thread.
216c658eac6SChris Zankel */
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)21755ccf3feSSuresh Siddha int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
218c658eac6SChris Zankel {
219c658eac6SChris Zankel #if XTENSA_HAVE_COPROCESSORS
22055ccf3feSSuresh Siddha coprocessor_flush_all(task_thread_info(src));
221c658eac6SChris Zankel #endif
22255ccf3feSSuresh Siddha *dst = *src;
22355ccf3feSSuresh Siddha return 0;
2245a0015d6SChris Zankel }
2255a0015d6SChris Zankel
2265a0015d6SChris Zankel /*
2275a0015d6SChris Zankel * Copy thread.
2285a0015d6SChris Zankel *
2293306a726SMax Filippov * There are two modes in which this function is called:
2303306a726SMax Filippov * 1) Userspace thread creation,
2313306a726SMax Filippov * regs != NULL, usp_thread_fn is userspace stack pointer.
2323306a726SMax Filippov * It is expected to copy parent regs (in case CLONE_VM is not set
2333306a726SMax Filippov * in the clone_flags) and set up passed usp in the childregs.
2343306a726SMax Filippov * 2) Kernel thread creation,
2353306a726SMax Filippov * regs == NULL, usp_thread_fn is the function to run in the new thread
2363306a726SMax Filippov * and thread_fn_arg is its parameter.
2373306a726SMax Filippov * childregs are not used for the kernel threads.
2383306a726SMax Filippov *
2395a0015d6SChris Zankel * The stack layout for the new thread looks like this:
2405a0015d6SChris Zankel *
2413306a726SMax Filippov * +------------------------+
2425a0015d6SChris Zankel * | childregs |
2435a0015d6SChris Zankel * +------------------------+ <- thread.sp = sp in dummy-frame
2445a0015d6SChris Zankel * | dummy-frame | (saved in dummy-frame spill-area)
2455a0015d6SChris Zankel * +------------------------+
2465a0015d6SChris Zankel *
2473306a726SMax Filippov * We create a dummy frame to return to either ret_from_fork or
2483306a726SMax Filippov * ret_from_kernel_thread:
2493306a726SMax Filippov * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
2505a0015d6SChris Zankel * sp points to itself (thread.sp)
2513306a726SMax Filippov * a2, a3 are unused for userspace threads,
2523306a726SMax Filippov * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
2535a0015d6SChris Zankel *
2545a0015d6SChris Zankel * Note: This is a pristine frame, so we don't need any spill region on top of
2555a0015d6SChris Zankel * childregs.
25684ed3053SMarc Gauthier *
25784ed3053SMarc Gauthier * The fun part: if we're keeping the same VM (i.e. cloning a thread,
25884ed3053SMarc Gauthier * not an entire process), we're normally given a new usp, and we CANNOT share
25984ed3053SMarc Gauthier * any live address register windows. If we just copy those live frames over,
26084ed3053SMarc Gauthier * the two threads (parent and child) will overflow the same frames onto the
26184ed3053SMarc Gauthier * parent stack at different times, likely corrupting the parent stack (esp.
26284ed3053SMarc Gauthier * if the parent returns from functions that called clone() and calls new
26384ed3053SMarc Gauthier * ones, before the child overflows its now old copies of its parent windows).
26484ed3053SMarc Gauthier * One solution is to spill windows to the parent stack, but that's fairly
26584ed3053SMarc Gauthier * involved. Much simpler to just not copy those live frames across.
2665a0015d6SChris Zankel */
2675a0015d6SChris Zankel
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)268c5febea0SEric W. Biederman int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
2695a0015d6SChris Zankel {
270c5febea0SEric W. Biederman unsigned long clone_flags = args->flags;
271c5febea0SEric W. Biederman unsigned long usp_thread_fn = args->stack;
272c5febea0SEric W. Biederman unsigned long tls = args->tls;
2733306a726SMax Filippov struct pt_regs *childregs = task_pt_regs(p);
2745a0015d6SChris Zankel
27539070cb8SChris Zankel #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
27639070cb8SChris Zankel struct thread_info *ti;
27739070cb8SChris Zankel #endif
27839070cb8SChris Zankel
2790b537257SMax Filippov #if defined(__XTENSA_WINDOWED_ABI__)
2805a0015d6SChris Zankel /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
281062b1c19SMax Filippov SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
282062b1c19SMax Filippov SPILL_SLOT(childregs, 0) = 0;
2835a0015d6SChris Zankel
2845a0015d6SChris Zankel p->thread.sp = (unsigned long)childregs;
2850b537257SMax Filippov #elif defined(__XTENSA_CALL0_ABI__)
2860b537257SMax Filippov /* Reserve 16 bytes for the _switch_to stack frame. */
2870b537257SMax Filippov p->thread.sp = (unsigned long)childregs - 16;
2880b537257SMax Filippov #else
2890b537257SMax Filippov #error Unsupported Xtensa ABI
2900b537257SMax Filippov #endif
291c658eac6SChris Zankel
2925bd2e97cSEric W. Biederman if (!args->fn) {
2933306a726SMax Filippov struct pt_regs *regs = current_pt_regs();
2943306a726SMax Filippov unsigned long usp = usp_thread_fn ?
2953306a726SMax Filippov usp_thread_fn : regs->areg[1];
2965a0015d6SChris Zankel
2973306a726SMax Filippov p->thread.ra = MAKE_RA_FOR_CALL(
2983306a726SMax Filippov (unsigned long)ret_from_fork, 0x1);
2993306a726SMax Filippov
3003306a726SMax Filippov *childregs = *regs;
3015a0015d6SChris Zankel childregs->areg[1] = usp;
3023306a726SMax Filippov childregs->areg[2] = 0;
3036ebe7da2SChris Zankel
3046ebe7da2SChris Zankel /* When sharing memory with the parent thread, the child
3056ebe7da2SChris Zankel usually starts on a pristine stack, so we have to reset
3066ebe7da2SChris Zankel windowbase, windowstart and wmask.
3076ebe7da2SChris Zankel (Note that such a new thread is required to always create
3086ebe7da2SChris Zankel an initial call4 frame)
3096ebe7da2SChris Zankel The exception is vfork, where the new thread continues to
3106ebe7da2SChris Zankel run on the parent's stack until it calls execve. This could
3116ebe7da2SChris Zankel be a call8 or call12, which requires a legal stack frame
3126ebe7da2SChris Zankel of the previous caller for the overflow handlers to work.
3136ebe7da2SChris Zankel (Note that it's always legal to overflow live registers).
3146ebe7da2SChris Zankel In this case, ensure to spill at least the stack pointer
3156ebe7da2SChris Zankel of that frame. */
3166ebe7da2SChris Zankel
31784ed3053SMarc Gauthier if (clone_flags & CLONE_VM) {
3186ebe7da2SChris Zankel /* check that caller window is live and same stack */
3196ebe7da2SChris Zankel int len = childregs->wmask & ~0xf;
3206ebe7da2SChris Zankel if (regs->areg[1] == usp && len != 0) {
3216ebe7da2SChris Zankel int callinc = (regs->areg[0] >> 30) & 3;
3226ebe7da2SChris Zankel int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
3236ebe7da2SChris Zankel put_user(regs->areg[caller_ars+1],
3246ebe7da2SChris Zankel (unsigned __user*)(usp - 12));
3256ebe7da2SChris Zankel }
3266ebe7da2SChris Zankel childregs->wmask = 1;
3276ebe7da2SChris Zankel childregs->windowstart = 1;
3286ebe7da2SChris Zankel childregs->windowbase = 0;
32984ed3053SMarc Gauthier }
330c50842dfSChris Zankel
3315a0015d6SChris Zankel if (clone_flags & CLONE_SETTLS)
332c346b94fSAmanieu d'Antras childregs->threadptr = tls;
3335a0015d6SChris Zankel } else {
3343306a726SMax Filippov p->thread.ra = MAKE_RA_FOR_CALL(
3353306a726SMax Filippov (unsigned long)ret_from_kernel_thread, 1);
3363306a726SMax Filippov
3370b537257SMax Filippov /* pass parameters to ret_from_kernel_thread: */
3380b537257SMax Filippov #if defined(__XTENSA_WINDOWED_ABI__)
3390b537257SMax Filippov /*
3400b537257SMax Filippov * a2 = thread_fn, a3 = thread_fn arg.
3410b537257SMax Filippov * Window underflow will load registers from the
3420b537257SMax Filippov * spill slots on the stack on return from _switch_to.
3433306a726SMax Filippov */
3445bd2e97cSEric W. Biederman SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
3455bd2e97cSEric W. Biederman SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
3460b537257SMax Filippov #elif defined(__XTENSA_CALL0_ABI__)
3470b537257SMax Filippov /*
3480b537257SMax Filippov * a12 = thread_fn, a13 = thread_fn arg.
3490b537257SMax Filippov * _switch_to epilogue will load registers from the stack.
3500b537257SMax Filippov */
3515bd2e97cSEric W. Biederman ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
3525bd2e97cSEric W. Biederman ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
3530b537257SMax Filippov #else
3540b537257SMax Filippov #error Unsupported Xtensa ABI
3550b537257SMax Filippov #endif
3563306a726SMax Filippov
3573306a726SMax Filippov /* Childregs are only used when we're going to userspace
3583306a726SMax Filippov * in which case start_thread will set them up.
3593306a726SMax Filippov */
3605a0015d6SChris Zankel }
361c658eac6SChris Zankel
362c658eac6SChris Zankel #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
363c658eac6SChris Zankel ti = task_thread_info(p);
364c658eac6SChris Zankel ti->cpenable = 0;
365c658eac6SChris Zankel #endif
366c658eac6SChris Zankel
367c91e02bdSMax Filippov clear_ptrace_hw_breakpoint(p);
368c91e02bdSMax Filippov
3695a0015d6SChris Zankel return 0;
3705a0015d6SChris Zankel }
3715a0015d6SChris Zankel
3725a0015d6SChris Zankel
3735a0015d6SChris Zankel /*
3745a0015d6SChris Zankel * These bracket the sleeping functions..
3755a0015d6SChris Zankel */
3765a0015d6SChris Zankel
__get_wchan(struct task_struct * p)37742a20f86SKees Cook unsigned long __get_wchan(struct task_struct *p)
3785a0015d6SChris Zankel {
3795a0015d6SChris Zankel unsigned long sp, pc;
38004fe6fafSAl Viro unsigned long stack_page = (unsigned long) task_stack_page(p);
3815a0015d6SChris Zankel int count = 0;
3825a0015d6SChris Zankel
3835a0015d6SChris Zankel sp = p->thread.sp;
384*0e60f0b7SMax Filippov pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
3855a0015d6SChris Zankel
3865a0015d6SChris Zankel do {
3875a0015d6SChris Zankel if (sp < stack_page + sizeof(struct task_struct) ||
3885a0015d6SChris Zankel sp >= (stack_page + THREAD_SIZE) ||
3895a0015d6SChris Zankel pc == 0)
3905a0015d6SChris Zankel return 0;
3915a0015d6SChris Zankel if (!in_sched_functions(pc))
3925a0015d6SChris Zankel return pc;
3935a0015d6SChris Zankel
3945a0015d6SChris Zankel /* Stack layout: sp-4: ra, sp-3: sp' */
3955a0015d6SChris Zankel
396*0e60f0b7SMax Filippov pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text);
397d90b88fdSMax Filippov sp = SPILL_SLOT(sp, 1);
3985a0015d6SChris Zankel } while (count++ < 16);
3995a0015d6SChris Zankel return 0;
4005a0015d6SChris Zankel }
401