| /* |
| * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com> |
| * |
| * SPDX-License-Identifier: Apache-2.0 |
| */ |
| |
| #include <zephyr/kernel.h> |
| #include <zephyr/kernel_structs.h> |
| #include <kernel_internal.h> |
| #include <inttypes.h> |
| #include <zephyr/exc_handle.h> |
| #include <zephyr/logging/log.h> |
| LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
| |
| #ifdef CONFIG_USERSPACE |
| Z_EXC_DECLARE(z_riscv_user_string_nlen); |
| |
| static const struct z_exc_handle exceptions[] = { |
| Z_EXC_HANDLE(z_riscv_user_string_nlen), |
| }; |
| #endif /* CONFIG_USERSPACE */ |
| |
| #if __riscv_xlen == 32 |
| #define PR_REG "%08" PRIxPTR |
| #define NO_REG " " |
| #elif __riscv_xlen == 64 |
| #define PR_REG "%016" PRIxPTR |
| #define NO_REG " " |
| #endif |
| |
| FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, |
| const z_arch_esf_t *esf) |
| { |
| if (esf != NULL) { |
| LOG_ERR(" a0: " PR_REG " t0: " PR_REG, esf->a0, esf->t0); |
| LOG_ERR(" a1: " PR_REG " t1: " PR_REG, esf->a1, esf->t1); |
| LOG_ERR(" a2: " PR_REG " t2: " PR_REG, esf->a2, esf->t2); |
| #if defined(CONFIG_RISCV_ISA_RV32E) |
| LOG_ERR(" a3: " PR_REG, esf->a3); |
| LOG_ERR(" a4: " PR_REG, esf->a4); |
| LOG_ERR(" a5: " PR_REG, esf->a5); |
| #else |
| LOG_ERR(" a3: " PR_REG " t3: " PR_REG, esf->a3, esf->t3); |
| LOG_ERR(" a4: " PR_REG " t4: " PR_REG, esf->a4, esf->t4); |
| LOG_ERR(" a5: " PR_REG " t5: " PR_REG, esf->a5, esf->t5); |
| LOG_ERR(" a6: " PR_REG " t6: " PR_REG, esf->a6, esf->t6); |
| LOG_ERR(" a7: " PR_REG, esf->a7); |
| #endif /* CONFIG_RISCV_ISA_RV32E */ |
| #ifdef CONFIG_USERSPACE |
| LOG_ERR(" sp: " PR_REG, esf->sp); |
| #endif |
| LOG_ERR(" ra: " PR_REG, esf->ra); |
| LOG_ERR(" mepc: " PR_REG, esf->mepc); |
| LOG_ERR("mstatus: " PR_REG, esf->mstatus); |
| LOG_ERR(""); |
| } |
| |
| z_fatal_error(reason, esf); |
| CODE_UNREACHABLE; |
| } |
| |
| static char *cause_str(unsigned long cause) |
| { |
| switch (cause) { |
| case 0: |
| return "Instruction address misaligned"; |
| case 1: |
| return "Instruction Access fault"; |
| case 2: |
| return "Illegal instruction"; |
| case 3: |
| return "Breakpoint"; |
| case 4: |
| return "Load address misaligned"; |
| case 5: |
| return "Load access fault"; |
| case 6: |
| return "Store/AMO address misaligned"; |
| case 7: |
| return "Store/AMO access fault"; |
| case 8: |
| return "Environment call from U-mode"; |
| case 9: |
| return "Environment call from S-mode"; |
| case 11: |
| return "Environment call from M-mode"; |
| case 12: |
| return "Instruction page fault"; |
| case 13: |
| return "Load page fault"; |
| case 15: |
| return "Store/AMO page fault"; |
| default: |
| return "unknown"; |
| } |
| } |
| |
| static bool bad_stack_pointer(z_arch_esf_t *esf) |
| { |
| #ifdef CONFIG_PMP_STACK_GUARD |
| /* |
| * Check if the kernel stack pointer prior this exception (before |
| * storing the exception stack frame) was in the stack guard area. |
| */ |
| uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t); |
| |
| #ifdef CONFIG_USERSPACE |
| if (_current->arch.priv_stack_start != 0 && |
| sp >= _current->arch.priv_stack_start && |
| sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) { |
| return true; |
| } |
| |
| if (z_stack_is_user_capable(_current->stack_obj) && |
| sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED && |
| sp < _current->stack_info.start - K_THREAD_STACK_RESERVED |
| + Z_RISCV_STACK_GUARD_SIZE) { |
| return true; |
| } |
| #endif /* CONFIG_USERSPACE */ |
| |
| if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED && |
| sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED |
| + Z_RISCV_STACK_GUARD_SIZE) { |
| return true; |
| } |
| #endif /* CONFIG_PMP_STACK_GUARD */ |
| |
| #ifdef CONFIG_USERSPACE |
| if ((esf->mstatus & MSTATUS_MPP) == 0 && |
| (esf->sp < _current->stack_info.start || |
| esf->sp > _current->stack_info.start + |
| _current->stack_info.size - |
| _current->stack_info.delta)) { |
| /* user stack pointer moved outside of its allowed stack */ |
| return true; |
| } |
| #endif |
| |
| return false; |
| } |
| |
| void _Fault(z_arch_esf_t *esf) |
| { |
| #ifdef CONFIG_USERSPACE |
| /* |
| * Perform an assessment whether an PMP fault shall be |
| * treated as recoverable. |
| */ |
| for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { |
| unsigned long start = (unsigned long)exceptions[i].start; |
| unsigned long end = (unsigned long)exceptions[i].end; |
| |
| if (esf->mepc >= start && esf->mepc < end) { |
| esf->mepc = (unsigned long)exceptions[i].fixup; |
| return; |
| } |
| } |
| #endif /* CONFIG_USERSPACE */ |
| |
| unsigned long mcause; |
| |
| __asm__ volatile("csrr %0, mcause" : "=r" (mcause)); |
| |
| #ifndef CONFIG_SOC_OPENISA_RV32M1_RISCV32 |
| unsigned long mtval; |
| __asm__ volatile("csrr %0, mtval" : "=r" (mtval)); |
| #endif |
| |
| mcause &= SOC_MCAUSE_EXP_MASK; |
| LOG_ERR(""); |
| LOG_ERR(" mcause: %ld, %s", mcause, cause_str(mcause)); |
| #ifndef CONFIG_SOC_OPENISA_RV32M1_RISCV32 |
| LOG_ERR(" mtval: %lx", mtval); |
| #endif |
| |
| unsigned int reason = K_ERR_CPU_EXCEPTION; |
| |
| if (bad_stack_pointer(esf)) { |
| reason = K_ERR_STACK_CHK_FAIL; |
| } |
| |
| z_riscv_fatal_error(reason, esf); |
| } |
| |
| #ifdef CONFIG_USERSPACE |
| FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) |
| { |
| user_fault(K_ERR_KERNEL_OOPS); |
| CODE_UNREACHABLE; |
| } |
| |
| void z_impl_user_fault(unsigned int reason) |
| { |
| z_arch_esf_t *oops_esf = _current->syscall_frame; |
| |
| if (((_current->base.user_options & K_USER) != 0) && |
| reason != K_ERR_STACK_CHK_FAIL) { |
| reason = K_ERR_KERNEL_OOPS; |
| } |
| z_riscv_fatal_error(reason, oops_esf); |
| } |
| |
| static void z_vrfy_user_fault(unsigned int reason) |
| { |
| z_impl_user_fault(reason); |
| } |
| |
| #include <syscalls/user_fault_mrsh.c> |
| |
| #endif /* CONFIG_USERSPACE */ |