blob: e035935dd8013c046069128419aff6514191b00d [file] [log] [blame]
/*
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
* Copyright (c) 2020 BayLibre, SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
#include <arch/riscv/csr.h>
#include <stdio.h>
#include <core_pmp.h>
#if defined(CONFIG_USERSPACE) && !defined(CONFIG_SMP)
/*
* Glogal variable used to know the current mode running.
* Is not boolean because it must match the PMP granularity of the arch.
*/
uint32_t is_user_mode;
#endif
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
extern void z_riscv_thread_start(void);
struct __esf *stack_init;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
#endif
/* Initial stack frame for thread */
stack_init = (struct __esf *)Z_STACK_PTR_ALIGN(
Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr)
);
/* Setup the initial stack frame */
stack_init->a0 = (ulong_t)entry;
stack_init->a1 = (ulong_t)p1;
stack_init->a2 = (ulong_t)p2;
stack_init->a3 = (ulong_t)p3;
#ifdef CONFIG_THREAD_LOCAL_STORAGE
thread->callee_saved.tp = (ulong_t)thread->tls;
#endif
/*
* Following the RISC-V architecture,
* the MSTATUS register (used to globally enable/disable interrupt),
* as well as the MEPC register (used to by the core to save the
* value of the program counter at which an interrupt/exception occurs)
* need to be saved on the stack, upon an interrupt/exception
* and restored prior to returning from the interrupt/exception.
* This shall allow to handle nested interrupts.
*
* Given that thread startup happens through the exception exit
* path, initially set:
* 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable
* interrupts when the newly created thread will be scheduled;
* 2) MEPC to the address of the z_thread_entry in the thread
* stack.
* Hence, when going out of an interrupt/exception/context-switch,
* after scheduling the newly created thread:
* 1) interrupts will be enabled, as the MSTATUS register will be
* restored following the MSTATUS value set within the thread stack;
* 2) the core will jump to z_thread_entry, as the program
* counter will be restored following the MEPC value set within the
* thread stack.
*/
stack_init->mstatus = MSTATUS_DEF_RESTORE;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Shared FP mode: enable FPU of threads with K_FP_REGS. */
if ((thread->base.user_options & K_FP_REGS) != 0) {
stack_init->mstatus |= MSTATUS_FS_INIT;
}
thread->callee_saved.fcsr = 0;
#elif defined(CONFIG_FPU)
/* Unshared FP mode: enable FPU of each thread. */
stack_init->mstatus |= MSTATUS_FS_INIT;
#endif
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Clear PMP context if RISC-V PMP is used. */
z_riscv_pmp_init_thread(thread);
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
#if defined(CONFIG_USERSPACE)
/* Clear user thread context */
thread->arch.priv_stack_start = 0;
/* the unwound stack pointer upon exiting exception */
stack_init->sp = (ulong_t)(stack_init + 1);
#endif /* CONFIG_USERSPACE */
/* Assign thread entry point and mstatus.MPRV mode. */
if (IS_ENABLED(CONFIG_USERSPACE)
&& (thread->base.user_options & K_USER)) {
/* User thread */
stack_init->mepc = (ulong_t)k_thread_user_mode_enter;
} else {
/* Supervisor thread */
stack_init->mepc = (ulong_t)z_thread_entry;
#if defined(CONFIG_PMP_STACK_GUARD)
/* Enable PMP in mstatus.MPRV mode for RISC-V machine mode
* if thread is supervisor thread.
*/
stack_init->mstatus |= MSTATUS_MPRV;
#endif /* CONFIG_PMP_STACK_GUARD */
}
#if defined(CONFIG_PMP_STACK_GUARD)
/* Setup PMP regions of PMP stack guard of thread. */
z_riscv_init_stack_guard(thread);
#endif /* CONFIG_PMP_STACK_GUARD */
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
stack_init->soc_context = soc_esf_init;
#endif
thread->callee_saved.sp = (ulong_t)stack_init;
/* where to go when returning from z_riscv_switch() */
thread->callee_saved.ra = (ulong_t)z_riscv_thread_start;
/* our switch handle is the thread pointer itself */
thread->switch_handle = thread;
}
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
unsigned int key;
if (thread != _current) {
return -EINVAL;
}
if (arch_is_in_isr()) {
return -EINVAL;
}
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Disable all floating point capabilities for the thread */
thread->base.user_options &= ~K_FP_REGS;
/* Clear the FS bits to disable the FPU. */
__asm__ volatile (
"mv t0, %0\n"
"csrrc x0, mstatus, t0\n"
:
: "r" (MSTATUS_FS_MASK)
);
irq_unlock(key);
return 0;
}
int arch_float_enable(struct k_thread *thread, unsigned int options)
{
unsigned int key;
if (thread != _current) {
return -EINVAL;
}
if (arch_is_in_isr()) {
return -EINVAL;
}
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Enable all floating point capabilities for the thread. */
thread->base.user_options |= K_FP_REGS;
/* Set the FS bits to Initial and clear the fcsr to enable the FPU. */
__asm__ volatile (
"mv t0, %0\n"
"csrrs x0, mstatus, t0\n"
"fscsr x0, x0\n"
:
: "r" (MSTATUS_FS_INIT)
);
irq_unlock(key);
return 0;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#ifdef CONFIG_USERSPACE
/*
* User space entry function
*
* This function is the entry point to user mode from privileged execution.
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*/
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
ulong_t top_of_user_stack, top_of_priv_stack;
ulong_t status;
/* Set up privileged stack */
#ifdef CONFIG_GEN_PRIV_STACKS
_current->arch.priv_stack_start =
(ulong_t)z_priv_stack_find(_current->stack_obj);
#else
_current->arch.priv_stack_start =
(ulong_t)(_current->stack_obj) +
Z_RISCV_STACK_GUARD_SIZE;
#endif /* CONFIG_GEN_PRIV_STACKS */
top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start
+ CONFIG_PRIVILEGED_STACK_SIZE);
top_of_user_stack = Z_STACK_PTR_ALIGN(
_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
status = csr_read(mstatus);
/* Set next CPU status to user mode */
status = INSERT_FIELD(status, MSTATUS_MPP, PRV_U);
/* Enable IRQs for user mode */
status = INSERT_FIELD(status, MSTATUS_MPIE, 1);
/* Disable IRQs for m-mode until the mode switch */
status = INSERT_FIELD(status, MSTATUS_MIE, 0);
csr_write(mstatus, status);
csr_write(mepc, z_thread_entry);
/* exception stack has to be in mscratch */
csr_write(mscratch, top_of_priv_stack);
/* Set up Physical Memory Protection */
#if defined(CONFIG_PMP_STACK_GUARD)
z_riscv_init_stack_guard(_current);
#endif
z_riscv_init_user_accesses(_current);
z_riscv_configure_user_allowed_stack(_current);
#if !defined(CONFIG_SMP)
is_user_mode = true;
#endif
register void *a0 __asm__("a0") = user_entry;
register void *a1 __asm__("a1") = p1;
register void *a2 __asm__("a2") = p2;
register void *a3 __asm__("a3") = p3;
__asm__ volatile (
"mv sp, %4; mret"
:
: "r" (a0), "r" (a1), "r" (a2), "r" (a3), "r" (top_of_user_stack)
: "memory");
CODE_UNREACHABLE;
}
#endif /* CONFIG_USERSPACE */