blob: 96a393c1504196fff513c939fec4e177719d6437 [file] [log] [blame]
/*
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
void z_thread_entry_wrapper(k_thread_entry_t thread,
void *arg1,
void *arg2,
void *arg3);
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
struct __esf *stack_init;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
#endif
/* Initial stack frame for thread */
stack_init = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr);
/* Setup the initial stack frame */
stack_init->a0 = (ulong_t)entry;
stack_init->a1 = (ulong_t)p1;
stack_init->a2 = (ulong_t)p2;
stack_init->a3 = (ulong_t)p3;
/*
* Following the RISC-V architecture,
* the MSTATUS register (used to globally enable/disable interrupt),
* as well as the MEPC register (used to by the core to save the
* value of the program counter at which an interrupt/exception occcurs)
* need to be saved on the stack, upon an interrupt/exception
* and restored prior to returning from the interrupt/exception.
* This shall allow to handle nested interrupts.
*
* Given that context switching is performed via a system call exception
* within the RISCV architecture implementation, initially set:
* 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable
* interrupts when the newly created thread will be scheduled;
* 2) MEPC to the address of the z_thread_entry_wrapper in the thread
* stack.
* Hence, when going out of an interrupt/exception/context-switch,
* after scheduling the newly created thread:
* 1) interrupts will be enabled, as the MSTATUS register will be
* restored following the MSTATUS value set within the thread stack;
* 2) the core will jump to z_thread_entry_wrapper, as the program
* counter will be restored following the MEPC value set within the
* thread stack.
*/
stack_init->mstatus = MSTATUS_DEF_RESTORE;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
if ((thread->base.user_options & K_FP_REGS) != 0) {
stack_init->mstatus |= MSTATUS_FS_INIT;
}
stack_init->fp_state = 0;
#endif
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
stack_init->soc_context = soc_esf_init;
#endif
thread->callee_saved.sp = (ulong_t)stack_init;
}
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
unsigned int key;
if (thread != _current) {
return -EINVAL;
}
if (arch_is_in_isr()) {
return -EINVAL;
}
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Disable all floating point capabilities for the thread */
thread->base.user_options &= ~K_FP_REGS;
/* Clear the FS bits to disable the FPU. */
__asm__ volatile (
"mv t0, %0\n"
"csrrc x0, mstatus, t0\n"
:
: "r" (MSTATUS_FS_MASK)
);
irq_unlock(key);
return 0;
}
int arch_float_enable(struct k_thread *thread)
{
unsigned int key;
if (thread != _current) {
return -EINVAL;
}
if (arch_is_in_isr()) {
return -EINVAL;
}
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Enable all floating point capabilities for the thread. */
thread->base.user_options |= K_FP_REGS;
/* Set the FS bits to Initial to enable the FPU. */
__asm__ volatile (
"mv t0, %0\n"
"csrrs x0, mstatus, t0\n"
:
: "r" (MSTATUS_FS_INIT)
);
irq_unlock(key);
return 0;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */