kernel: convert most thread APIs to system calls
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
diff --git a/arch/arm/core/thread_abort.c b/arch/arm/core/thread_abort.c
index 86b7659..3d398a5 100644
--- a/arch/arm/core/thread_abort.c
+++ b/arch/arm/core/thread_abort.c
@@ -26,7 +26,7 @@
extern void _k_thread_single_abort(struct k_thread *thread);
-void k_thread_abort(k_tid_t thread)
+void _impl_k_thread_abort(k_tid_t thread)
{
unsigned int key;
diff --git a/include/kernel.h b/include/kernel.h
index 76b3553..394ccbd 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -620,7 +620,7 @@
*
* @return N/A
*/
-extern void k_yield(void);
+__syscall void k_yield(void);
/**
* @brief Wake up a sleeping thread.
@@ -633,7 +633,7 @@
*
* @return N/A
*/
-extern void k_wakeup(k_tid_t thread);
+__syscall void k_wakeup(k_tid_t thread);
/**
* @brief Get thread ID of the current thread.
@@ -653,7 +653,7 @@
* @retval 0 Thread spawning canceled.
* @retval -EINVAL Thread has already started executing.
*/
-extern int k_thread_cancel(k_tid_t thread);
+__syscall int k_thread_cancel(k_tid_t thread);
/**
* @brief Abort a thread.
@@ -669,7 +669,7 @@
*
* @return N/A
*/
-extern void k_thread_abort(k_tid_t thread);
+__syscall void k_thread_abort(k_tid_t thread);
/**
@@ -681,7 +681,7 @@
*
* @param thread thread to start
*/
-extern void k_thread_start(k_tid_t thread);
+__syscall void k_thread_start(k_tid_t thread);
/**
* @cond INTERNAL_HIDDEN
@@ -808,7 +808,7 @@
*
* @return N/A
*/
-extern void k_thread_priority_set(k_tid_t thread, int prio);
+__syscall void k_thread_priority_set(k_tid_t thread, int prio);
/**
* @brief Suspend a thread.
@@ -824,7 +824,7 @@
*
* @return N/A
*/
-extern void k_thread_suspend(k_tid_t thread);
+__syscall void k_thread_suspend(k_tid_t thread);
/**
* @brief Resume a suspended thread.
@@ -838,7 +838,7 @@
*
* @return N/A
*/
-extern void k_thread_resume(k_tid_t thread);
+__syscall void k_thread_resume(k_tid_t thread);
/**
* @brief Set time-slicing period and scope.
@@ -908,7 +908,7 @@
* @return 0 if invoked by an ISR or by a cooperative thread.
* @return Non-zero if invoked by a preemptible thread.
*/
-extern int k_is_preempt_thread(void);
+__syscall int k_is_preempt_thread(void);
/**
* @} end addtogroup isr_apis
@@ -963,7 +963,7 @@
*
* @return N/A
*/
-extern void k_thread_custom_data_set(void *value);
+__syscall void k_thread_custom_data_set(void *value);
/**
* @brief Get current thread's custom data.
@@ -972,7 +972,7 @@
*
* @return Current custom data value.
*/
-extern void *k_thread_custom_data_get(void);
+__syscall void *k_thread_custom_data_get(void);
/**
* @} end addtogroup thread_apis
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index 3e6482e..6a30042 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -50,18 +50,22 @@
}
#ifdef CONFIG_MULTITHREADING
-#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
- __ASSERT(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
+#define _VALID_PRIO(prio, entry_point) \
+ (((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
(_is_prio_higher_or_equal((prio), \
K_LOWEST_APPLICATION_THREAD_PRIO) && \
_is_prio_lower_or_equal((prio), \
- K_HIGHEST_APPLICATION_THREAD_PRIO)), \
+ K_HIGHEST_APPLICATION_THREAD_PRIO)))
+
+#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
+ __ASSERT(_VALID_PRIO((prio), (entry_point)), \
"invalid priority (%d); allowed range: %d to %d", \
(prio), \
K_LOWEST_APPLICATION_THREAD_PRIO, \
K_HIGHEST_APPLICATION_THREAD_PRIO); \
} while ((0))
#else
+#define _VALID_PRIO(prio, entry_point) ((prio) == -1)
#define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index 0de0d60..fdb0cef 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -278,7 +278,7 @@
}
#endif
-void k_thread_priority_set(k_tid_t tid, int prio)
+void _impl_k_thread_priority_set(k_tid_t tid, int prio)
{
/*
* Use NULL, since we cannot know what the entry point is (we do not
@@ -294,6 +294,20 @@
_reschedule_threads(key);
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_priority_set(u32_t thread, u32_t prio, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6,
+ void *ssf)
+{
+ _SYSCALL_ARG2;
+
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ _SYSCALL_VERIFY(_VALID_PRIO(prio, NULL), ssf);
+ _impl_k_thread_priority_set((k_tid_t)thread, prio);
+ return 0;
+}
+#endif
+
/*
* Interrupts must be locked when calling this function.
*
@@ -320,7 +334,7 @@
#endif
}
-void k_yield(void)
+void _impl_k_yield(void)
{
__ASSERT(!_is_in_isr(), "");
@@ -338,6 +352,17 @@
}
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_yield(u32_t arg1, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ _SYSCALL_ARG0;
+
+ _impl_k_yield();
+ return 0;
+}
+#endif
+
void _impl_k_sleep(s32_t duration)
{
#ifdef CONFIG_MULTITHREADING
@@ -381,7 +406,7 @@
}
#endif
-void k_wakeup(k_tid_t thread)
+void _impl_k_wakeup(k_tid_t thread)
{
int key = irq_lock();
@@ -405,6 +430,18 @@
}
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_wakeup(u32_t thread, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ _SYSCALL_ARG1;
+
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ _impl_k_wakeup((k_tid_t)thread);
+ return 0;
+}
+#endif
+
k_tid_t _impl_k_current_get(void)
{
return _current;
@@ -483,7 +520,18 @@
}
#endif /* CONFIG_TIMESLICING */
-int k_is_preempt_thread(void)
+int _impl_k_is_preempt_thread(void)
{
return !_is_in_isr() && _is_preempt(_current);
}
+
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_is_preempt_thread(u32_t arg1, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6,
+ void *ssf)
+{
+ _SYSCALL_ARG0;
+
+ return _impl_k_is_preempt_thread();
+}
+#endif
diff --git a/kernel/thread.c b/kernel/thread.c
index 0cbace6..a933fc7 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -23,6 +23,7 @@
#include <ksched.h>
#include <wait_q.h>
#include <atomic.h>
+#include <syscall_handler.h>
extern struct _static_thread_data _static_thread_data_list_start[];
extern struct _static_thread_data _static_thread_data_list_end[];
@@ -112,17 +113,38 @@
}
#ifdef CONFIG_THREAD_CUSTOM_DATA
-
-void k_thread_custom_data_set(void *value)
+void _impl_k_thread_custom_data_set(void *value)
{
_current->custom_data = value;
}
-void *k_thread_custom_data_get(void)
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_custom_data_set(u32_t arg1, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6,
+ void *ssf)
+{
+ _SYSCALL_ARG1;
+
+ _impl_k_thread_custom_data_set((void *)arg1);
+ return 0;
+}
+#endif
+
+void *_impl_k_thread_custom_data_get(void)
{
return _current->custom_data;
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_custom_data_get(u32_t arg1, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6,
+ void *ssf)
+{
+ _SYSCALL_ARG0;
+
+ return (u32_t)_impl_k_thread_custom_data_get();
+}
+#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_THREAD_CUSTOM_DATA */
#if defined(CONFIG_THREAD_MONITOR)
@@ -214,7 +236,7 @@
}
#ifdef CONFIG_MULTITHREADING
-void k_thread_start(struct k_thread *thread)
+void _impl_k_thread_start(struct k_thread *thread)
{
int key = irq_lock(); /* protect kernel queues */
@@ -235,6 +257,18 @@
irq_unlock(key);
}
+
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_start(u32_t thread, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ _SYSCALL_ARG1;
+
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ _impl_k_thread_start((struct k_thread *)thread);
+ return 0;
+}
+#endif
#endif
#ifdef CONFIG_MULTITHREADING
@@ -292,7 +326,7 @@
}
#endif
-int k_thread_cancel(k_tid_t tid)
+int _impl_k_thread_cancel(k_tid_t tid)
{
struct k_thread *thread = tid;
@@ -312,6 +346,17 @@
return 0;
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_cancel(u32_t thread, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ _SYSCALL_ARG1;
+
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ return _impl_k_thread_cancel((struct k_thread *)thread);
+}
+#endif
+
static inline int is_in_any_group(struct _static_thread_data *thread_data,
u32_t groups)
{
@@ -369,7 +414,7 @@
_mark_thread_as_suspended(thread);
}
-void k_thread_suspend(struct k_thread *thread)
+void _impl_k_thread_suspend(struct k_thread *thread)
{
unsigned int key = irq_lock();
@@ -382,6 +427,18 @@
}
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_suspend(u32_t thread, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ _SYSCALL_ARG1;
+
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ _impl_k_thread_suspend((k_tid_t)thread);
+ return 0;
+}
+#endif
+
void _k_thread_single_resume(struct k_thread *thread)
{
_mark_thread_as_not_suspended(thread);
@@ -391,7 +448,7 @@
}
}
-void k_thread_resume(struct k_thread *thread)
+void _impl_k_thread_resume(struct k_thread *thread)
{
unsigned int key = irq_lock();
@@ -400,6 +457,18 @@
_reschedule_threads(key);
}
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_resume(u32_t thread, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ _SYSCALL_ARG1;
+
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ _impl_k_thread_resume((k_tid_t)thread);
+ return 0;
+}
+#endif
+
void _k_thread_single_abort(struct k_thread *thread)
{
if (thread->fn_abort != NULL) {
diff --git a/kernel/thread_abort.c b/kernel/thread_abort.c
index 4ed4456..97170e3 100644
--- a/kernel/thread_abort.c
+++ b/kernel/thread_abort.c
@@ -19,11 +19,12 @@
#include <wait_q.h>
#include <ksched.h>
#include <misc/__assert.h>
+#include <syscall_handler.h>
extern void _k_thread_single_abort(struct k_thread *thread);
#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
-void k_thread_abort(k_tid_t thread)
+void _impl_k_thread_abort(k_tid_t thread)
{
unsigned int key;
@@ -44,3 +45,16 @@
_reschedule_threads(key);
}
#endif
+
+#ifdef CONFIG_USERSPACE
+u32_t _handler_k_thread_abort(u32_t thread_p, u32_t arg2, u32_t arg3,
+ u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
+{
+ struct k_thread *thread = (struct k_thread *)thread_p;
+ _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf);
+ _SYSCALL_VERIFY(!(thread->base.user_options & K_ESSENTIAL), ssf);
+
+ _impl_k_thread_abort((struct k_thread *)thread);
+ return 0;
+}
+#endif