zephyr: replace zephyr integer types with C99 types
git grep -l 'u\(8\|16\|32\|64\)_t' | \
xargs sed -i "s/u\(8\|16\|32\|64\)_t/uint\1_t/g"
git grep -l 's\(8\|16\|32\|64\)_t' | \
xargs sed -i "s/s\(8\|16\|32\|64\)_t/int\1_t/g"
Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
diff --git a/kernel/Kconfig b/kernel/Kconfig
index 4245572..ffa6aaf 100644
--- a/kernel/Kconfig
+++ b/kernel/Kconfig
@@ -577,7 +577,7 @@
The k_timeout_t API has changed to become an opaque type
that must be initialized with macros. Older applications
can choose this to continue using the old style of timeouts
- (which were s32_t counts of milliseconds), at the cost of
+ (which were int32_t counts of milliseconds), at the cost of
not being able to use new features.
config TIMEOUT_64BIT
diff --git a/kernel/device.c b/kernel/device.c
index 53df56d..0e6f2aa 100644
--- a/kernel/device.c
+++ b/kernel/device.c
@@ -24,8 +24,8 @@
extern struct device __device_end[];
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
-extern u32_t __device_busy_start[];
-extern u32_t __device_busy_end[];
+extern uint32_t __device_busy_start[];
+extern uint32_t __device_busy_end[];
#define DEVICE_BUSY_SIZE (__device_busy_end - __device_busy_start)
#endif
@@ -40,7 +40,7 @@
*
* @param level init level to run.
*/
-void z_sys_init_run_level(s32_t level)
+void z_sys_init_run_level(int32_t level)
{
static const struct init_entry *levels[] = {
__init_PRE_KERNEL_1_start,
@@ -119,7 +119,7 @@
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
int device_pm_control_nop(struct device *unused_device,
- u32_t unused_ctrl_command,
+ uint32_t unused_ctrl_command,
void *unused_context,
device_pm_cb cb,
void *unused_arg)
diff --git a/kernel/idle.c b/kernel/idle.c
index 701b4b7..5393803 100644
--- a/kernel/idle.c
+++ b/kernel/idle.c
@@ -63,7 +63,7 @@
* @return N/A
*/
#if !SMP_FALLBACK
-static void set_kernel_idle_time_in_ticks(s32_t ticks)
+static void set_kernel_idle_time_in_ticks(int32_t ticks)
{
#ifdef CONFIG_SYS_POWER_MANAGEMENT
_kernel.idle = ticks;
@@ -72,7 +72,7 @@
static void sys_power_save_idle(void)
{
- s32_t ticks = z_get_next_timeout_expiry();
+ int32_t ticks = z_get_next_timeout_expiry();
/* The documented behavior of CONFIG_TICKLESS_IDLE_THRESH is
* that the system should not enter a tickless idle for
@@ -113,7 +113,7 @@
}
#endif
-void z_sys_power_save_idle_exit(s32_t ticks)
+void z_sys_power_save_idle_exit(int32_t ticks)
{
#if defined(CONFIG_SYS_POWER_SLEEP_STATES)
/* Some CPU low power states require notification at the ISR
@@ -146,7 +146,7 @@
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
/* record timestamp when idling begins */
- extern u32_t z_timestamp_idle;
+ extern uint32_t z_timestamp_idle;
z_timestamp_idle = k_cycle_get_32();
#endif
diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h
index 781a40d..1ced9df 100644
--- a/kernel/include/kernel_arch_interface.h
+++ b/kernel/include/kernel_arch_interface.h
@@ -36,7 +36,7 @@
*
* @param usec_to_wait Wait period, in microseconds
*/
-void arch_busy_wait(u32_t usec_to_wait);
+void arch_busy_wait(uint32_t usec_to_wait);
#endif
/** @} */
diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h
index c137d78..c0852d0 100644
--- a/kernel/include/kernel_internal.h
+++ b/kernel/include/kernel_internal.h
@@ -44,7 +44,7 @@
k_thread_stack_t *stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, const char *name);
+ int prio, uint32_t options, const char *name);
static inline void z_new_thread_init(struct k_thread *thread,
char *stack, size_t stack_size)
@@ -117,15 +117,15 @@
extern void smp_timer_init(void);
-extern void z_early_boot_rand_get(u8_t *buf, size_t length);
+extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
#if CONFIG_STACK_POINTER_RANDOM
extern int z_stack_adjust_initialized;
#endif
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
-extern u32_t z_timestamp_main; /* timestamp when main task starts */
-extern u32_t z_timestamp_idle; /* timestamp when CPU goes idle */
+extern uint32_t z_timestamp_main; /* timestamp when main task starts */
+extern uint32_t z_timestamp_idle; /* timestamp when CPU goes idle */
#endif
extern struct k_thread z_main_thread;
@@ -138,7 +138,7 @@
CONFIG_ISR_STACK_SIZE);
#ifdef CONFIG_GEN_PRIV_STACKS
-extern u8_t *z_priv_stack_find(k_thread_stack_t *stack);
+extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
#endif
#ifdef __cplusplus
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index 4a88bef..2f350fa 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -43,11 +43,11 @@
void z_unpend_thread_no_timeout(struct k_thread *thread);
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout);
-int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
+int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
k_timeout_t timeout);
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
-void z_reschedule_irqlock(u32_t key);
+void z_reschedule_irqlock(uint32_t key);
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
void z_unpend_thread(struct k_thread *thread);
int z_unpend_all(_wait_q_t *wait_q);
@@ -110,7 +110,7 @@
static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
{
- u8_t state = thread->base.thread_state;
+ uint8_t state = thread->base.thread_state;
return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD |
_THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
@@ -133,7 +133,7 @@
return (thread->base.thread_state & _THREAD_PRESTART) == 0U;
}
-static inline bool z_is_thread_state_set(struct k_thread *thread, u32_t state)
+static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
{
return (thread->base.thread_state & state) != 0U;
}
@@ -170,13 +170,13 @@
thread->base.thread_state &= ~_THREAD_PENDING;
}
-static inline void z_set_thread_states(struct k_thread *thread, u32_t states)
+static inline void z_set_thread_states(struct k_thread *thread, uint32_t states)
{
thread->base.thread_state |= states;
}
static inline void z_reset_thread_states(struct k_thread *thread,
- u32_t states)
+ uint32_t states)
{
thread->base.thread_state &= ~states;
}
diff --git a/kernel/init.c b/kernel/init.c
index fb25ff3..6f97f64 100644
--- a/kernel/init.c
+++ b/kernel/init.c
@@ -52,8 +52,8 @@
/* boot time measurement items */
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
-u32_t __noinit z_timestamp_main; /* timestamp when main task starts */
-u32_t __noinit z_timestamp_idle; /* timestamp when CPU goes idle */
+uint32_t __noinit z_timestamp_main; /* timestamp when main task starts */
+uint32_t __noinit z_timestamp_idle; /* timestamp when CPU goes idle */
#endif
/* init/main and idle threads */
@@ -108,11 +108,11 @@
(void)memset(__bss_start, 0, __bss_end - __bss_start);
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
(void)memset(&__ccm_bss_start, 0,
- ((u32_t) &__ccm_bss_end - (u32_t) &__ccm_bss_start));
+ ((uint32_t) &__ccm_bss_end - (uint32_t) &__ccm_bss_start));
#endif
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
(void)memset(&__dtcm_bss_start, 0,
- ((u32_t) &__dtcm_bss_end - (u32_t) &__dtcm_bss_start));
+ ((uint32_t) &__dtcm_bss_end - (uint32_t) &__dtcm_bss_start));
#endif
#ifdef CONFIG_CODE_DATA_RELOCATION
extern void bss_zeroing_relocation(void);
@@ -121,7 +121,7 @@
#endif /* CONFIG_CODE_DATA_RELOCATION */
#ifdef CONFIG_COVERAGE_GCOV
(void)memset(&__gcov_bss_start, 0,
- ((u32_t) &__gcov_bss_end - (u32_t) &__gcov_bss_start));
+ ((uint32_t) &__gcov_bss_end - (uint32_t) &__gcov_bss_start));
#endif
}
@@ -169,9 +169,9 @@
* value gets set later in z_cstart().
*/
uintptr_t guard_copy = __stack_chk_guard;
- u8_t *src = (u8_t *)&_app_smem_rom_start;
- u8_t *dst = (u8_t *)&_app_smem_start;
- u32_t count = _app_smem_end - _app_smem_start;
+ uint8_t *src = (uint8_t *)&_app_smem_rom_start;
+ uint8_t *dst = (uint8_t *)&_app_smem_start;
+ uint32_t count = _app_smem_end - _app_smem_start;
guard_copy = __stack_chk_guard;
while (count > 0) {
@@ -373,9 +373,9 @@
#endif /* CONFIG_MULTITHREADING */
#if defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR)
-void z_early_boot_rand_get(u8_t *buf, size_t length)
+void z_early_boot_rand_get(uint8_t *buf, size_t length)
{
- int n = sizeof(u32_t);
+ int n = sizeof(uint32_t);
#ifdef CONFIG_ENTROPY_HAS_DRIVER
struct device *entropy = device_get_binding(DT_CHOSEN_ZEPHYR_ENTROPY_LABEL);
int rc;
@@ -410,12 +410,12 @@
*/
while (length > 0) {
- u32_t rndbits;
- u8_t *p_rndbits = (u8_t *)&rndbits;
+ uint32_t rndbits;
+ uint8_t *p_rndbits = (uint8_t *)&rndbits;
rndbits = sys_rand32_get();
- if (length < sizeof(u32_t)) {
+ if (length < sizeof(uint32_t)) {
n = length;
}
@@ -467,7 +467,7 @@
#ifdef CONFIG_STACK_CANARIES
uintptr_t stack_guard;
- z_early_boot_rand_get((u8_t *)&stack_guard, sizeof(stack_guard));
+ z_early_boot_rand_get((uint8_t *)&stack_guard, sizeof(stack_guard));
__stack_chk_guard = stack_guard;
__stack_chk_guard <<= 8;
#endif /* CONFIG_STACK_CANARIES */
diff --git a/kernel/kheap.c b/kernel/kheap.c
index 12344ce..8951649 100644
--- a/kernel/kheap.c
+++ b/kernel/kheap.c
@@ -28,7 +28,7 @@
void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
{
- s64_t now, end = z_timeout_end_calc(timeout);
+ int64_t now, end = z_timeout_end_calc(timeout);
void *ret = NULL;
k_spinlock_key_t key = k_spin_lock(&h->lock);
diff --git a/kernel/mailbox.c b/kernel/mailbox.c
index cd9016f..bb20e8f 100644
--- a/kernel/mailbox.c
+++ b/kernel/mailbox.c
@@ -120,7 +120,7 @@
static int mbox_message_match(struct k_mbox_msg *tx_msg,
struct k_mbox_msg *rx_msg)
{
- u32_t temp_info;
+ uint32_t temp_info;
if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
(tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c
index 8534ae7..1458269 100644
--- a/kernel/mem_domain.c
+++ b/kernel/mem_domain.c
@@ -13,17 +13,17 @@
#include <spinlock.h>
static struct k_spinlock lock;
-static u8_t max_partitions;
+static uint8_t max_partitions;
#if (defined(CONFIG_EXECUTE_XOR_WRITE) || \
defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)) && __ASSERT_ON
static bool sane_partition(const struct k_mem_partition *part,
const struct k_mem_partition *parts,
- u32_t num_parts)
+ uint32_t num_parts)
{
bool exec, write;
- u32_t last;
- u32_t i;
+ uint32_t last;
+ uint32_t i;
last = part->start + part->size - 1;
exec = K_MEM_PARTITION_IS_EXECUTABLE(part->attr);
@@ -38,7 +38,7 @@
for (i = 0U; i < num_parts; i++) {
bool cur_write, cur_exec;
- u32_t cur_last;
+ uint32_t cur_last;
cur_last = parts[i].start + parts[i].size - 1;
@@ -80,7 +80,7 @@
#define sane_partition_domain(...) (true)
#endif
-void k_mem_domain_init(struct k_mem_domain *domain, u8_t num_parts,
+void k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
struct k_mem_partition *parts[])
{
k_spinlock_key_t key;
@@ -95,7 +95,7 @@
(void)memset(domain->partitions, 0, sizeof(domain->partitions));
if (num_parts != 0U) {
- u32_t i;
+ uint32_t i;
for (i = 0U; i < num_parts; i++) {
__ASSERT(parts[i] != NULL, "");
diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c
index 6c4dc80..7c51689 100644
--- a/kernel/mem_slab.c
+++ b/kernel/mem_slab.c
@@ -31,7 +31,7 @@
*/
static int create_free_list(struct k_mem_slab *slab)
{
- u32_t j;
+ uint32_t j;
char *p;
/* blocks must be word aligned */
@@ -80,7 +80,7 @@
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
- size_t block_size, u32_t num_blocks)
+ size_t block_size, uint32_t num_blocks)
{
int rc = 0;
diff --git a/kernel/mempool_sys.c b/kernel/mempool_sys.c
index da78107..a8d749b 100644
--- a/kernel/mempool_sys.c
+++ b/kernel/mempool_sys.c
@@ -48,14 +48,14 @@
size_t size, k_timeout_t timeout)
{
int ret;
- u64_t end = 0;
+ uint64_t end = 0;
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
end = z_timeout_end_calc(timeout);
while (true) {
- u32_t level_num, block_num;
+ uint32_t level_num, block_num;
ret = z_sys_mem_pool_block_alloc(&p->base, size,
&level_num, &block_num,
@@ -72,7 +72,7 @@
z_pend_curr_unlocked(&p->wait_q, timeout);
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
- s64_t remaining = end - z_tick_get();
+ int64_t remaining = end - z_tick_get();
if (remaining <= 0) {
break;
diff --git a/kernel/msg_q.c b/kernel/msg_q.c
index f7351f2..106d46a 100644
--- a/kernel/msg_q.c
+++ b/kernel/msg_q.c
@@ -47,7 +47,7 @@
#endif /* CONFIG_OBJECT_TRACING */
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
- u32_t max_msgs)
+ uint32_t max_msgs)
{
msgq->msg_size = msg_size;
msgq->max_msgs = max_msgs;
@@ -66,7 +66,7 @@
}
int z_impl_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
- u32_t max_msgs)
+ uint32_t max_msgs)
{
void *buffer;
int ret;
@@ -90,7 +90,7 @@
#ifdef CONFIG_USERSPACE
int z_vrfy_k_msgq_alloc_init(struct k_msgq *q, size_t msg_size,
- u32_t max_msgs)
+ uint32_t max_msgs)
{
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(q, K_OBJ_MSGQ));
@@ -312,14 +312,14 @@
}
#include <syscalls/k_msgq_purge_mrsh.c>
-static inline u32_t z_vrfy_k_msgq_num_free_get(struct k_msgq *q)
+static inline uint32_t z_vrfy_k_msgq_num_free_get(struct k_msgq *q)
{
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
return z_impl_k_msgq_num_free_get(q);
}
#include <syscalls/k_msgq_num_free_get_mrsh.c>
-static inline u32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *q)
+static inline uint32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *q)
{
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
return z_impl_k_msgq_num_used_get(q);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 3181966..551cbe2 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -93,7 +93,7 @@
#include <syscalls/k_mutex_init_mrsh.c>
#endif
-static s32_t new_prio_for_inheritance(s32_t target, s32_t limit)
+static int32_t new_prio_for_inheritance(int32_t target, int32_t limit)
{
int new_prio = z_is_prio_higher(target, limit) ? target : limit;
@@ -102,7 +102,7 @@
return new_prio;
}
-static bool adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
+static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio)
{
if (mutex->owner->base.prio != new_prio) {
diff --git a/kernel/pipes.c b/kernel/pipes.c
index d28d7a3..f841767 100644
--- a/kernel/pipes.c
+++ b/kernel/pipes.c
@@ -639,7 +639,7 @@
sys_dlist_get(&xfer_list);
while ((thread != NULL) && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
- bytes_copied = pipe_xfer((u8_t *)data + num_bytes_read,
+ bytes_copied = pipe_xfer((uint8_t *)data + num_bytes_read,
bytes_to_read - num_bytes_read,
desc->buffer, desc->bytes_to_xfer);
@@ -663,7 +663,7 @@
if ((writer != NULL) && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)writer->base.swap_data;
- bytes_copied = pipe_xfer((u8_t *)data + num_bytes_read,
+ bytes_copied = pipe_xfer((uint8_t *)data + num_bytes_read,
bytes_to_read - num_bytes_read,
desc->buffer, desc->bytes_to_xfer);
@@ -722,7 +722,7 @@
struct k_pipe_desc pipe_desc;
- pipe_desc.buffer = (u8_t *)data + num_bytes_read;
+ pipe_desc.buffer = (uint8_t *)data + num_bytes_read;
pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
diff --git a/kernel/poll.c b/kernel/poll.c
index 4fe88ff..9c0ab72 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -35,7 +35,7 @@
*/
static struct k_spinlock lock;
-void k_poll_event_init(struct k_poll_event *event, u32_t type,
+void k_poll_event_init(struct k_poll_event *event, uint32_t type,
int mode, void *obj)
{
__ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
@@ -53,7 +53,7 @@
}
/* must be called with interrupts locked */
-static inline bool is_condition_met(struct k_poll_event *event, u32_t *state)
+static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
{
switch (event->type) {
case K_POLL_TYPE_SEM_AVAILABLE:
@@ -182,7 +182,7 @@
}
}
-static inline void set_event_ready(struct k_poll_event *event, u32_t state)
+static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
{
event->poller = NULL;
event->state |= state;
@@ -197,7 +197,7 @@
for (int ii = 0; ii < num_events; ii++) {
k_spinlock_key_t key;
- u32_t state;
+ uint32_t state;
key = k_spin_lock(&lock);
if (is_condition_met(&events[ii], &state)) {
@@ -217,7 +217,7 @@
return events_registered;
}
-static int k_poll_poller_cb(struct k_poll_event *event, u32_t state)
+static int k_poll_poller_cb(struct k_poll_event *event, uint32_t state)
{
struct k_thread *thread = event->poller->thread;
@@ -307,7 +307,7 @@
int ret;
k_spinlock_key_t key;
struct k_poll_event *events_copy = NULL;
- u32_t bounds;
+ uint32_t bounds;
/* Validate the events buffer and make a copy of it in an
* allocated kernel-side buffer.
@@ -378,7 +378,7 @@
#endif
/* must be called with interrupts locked */
-static int signal_poll_event(struct k_poll_event *event, u32_t state)
+static int signal_poll_event(struct k_poll_event *event, uint32_t state)
{
struct _poller *poller = event->poller;
int retcode = 0;
@@ -399,7 +399,7 @@
return retcode;
}
-void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state)
+void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
{
struct k_poll_event *poll_event;
@@ -521,7 +521,7 @@
k_work_submit_to_queue(work_q, &twork->work);
}
-static int triggered_work_poller_cb(struct k_poll_event *event, u32_t status)
+static int triggered_work_poller_cb(struct k_poll_event *event, uint32_t status)
{
struct _poller *poller = event->poller;
diff --git a/kernel/queue.c b/kernel/queue.c
index 578325a..1564fb7 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -33,7 +33,7 @@
{
void *ret;
- if ((node != NULL) && (sys_sfnode_flags_get(node) != (u8_t)0)) {
+ if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) {
/* If the flag is set, then the enqueue operation for this item
* did a behind-the scenes memory allocation of an alloc_node
* struct, which is what got put in the queue. Free it and pass
@@ -106,7 +106,7 @@
z_ready_thread(thread);
}
-static inline void handle_poll_events(struct k_queue *queue, u32_t state)
+static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&queue->poll_events, state);
@@ -137,7 +137,7 @@
#include <syscalls/k_queue_cancel_wait_mrsh.c>
#endif
-static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
+static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
bool alloc)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
@@ -189,14 +189,14 @@
(void)queue_insert(queue, NULL, data, false);
}
-s32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
+int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
{
return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data,
true);
}
#ifdef CONFIG_USERSPACE
-static inline s32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
+static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
void *data)
{
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
@@ -205,13 +205,13 @@
#include <syscalls/k_queue_alloc_append_mrsh.c>
#endif
-s32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
+int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
{
return queue_insert(queue, NULL, data, true);
}
#ifdef CONFIG_USERSPACE
-static inline s32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
+static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
void *data)
{
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
diff --git a/kernel/sched.c b/kernel/sched.c
index 093dd28..20567cb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -288,7 +288,7 @@
}
}
-void k_sched_time_slice_set(s32_t slice, int prio)
+void k_sched_time_slice_set(int32_t slice, int prio)
{
LOCKED(&sched_spinlock) {
_current_cpu->slice_ticks = 0;
@@ -506,7 +506,7 @@
}
}
- u32_t mask = _THREAD_DEAD;
+ uint32_t mask = _THREAD_DEAD;
/* If the abort is happening in interrupt context,
* that means that execution will never return to the
@@ -649,7 +649,7 @@
}
#endif
-int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
+int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
{
pend(_current, wait_q, timeout);
@@ -737,7 +737,7 @@
}
}
-static inline int resched(u32_t key)
+static inline int resched(uint32_t key)
{
#ifdef CONFIG_SMP
_current_cpu->swap_ok = 0;
@@ -755,7 +755,7 @@
}
}
-void z_reschedule_irqlock(u32_t key)
+void z_reschedule_irqlock(uint32_t key)
{
if (resched(key)) {
z_swap_irqlock(key);
@@ -1076,7 +1076,7 @@
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
"invalid thread priority %d", prio));
- Z_OOPS(Z_SYSCALL_VERIFY_MSG((s8_t)prio >= thread->base.prio,
+ Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
"thread priority may only be downgraded (%d < %d)",
prio, thread->base.prio));
@@ -1142,10 +1142,10 @@
#include <syscalls/k_yield_mrsh.c>
#endif
-static s32_t z_tick_sleep(s32_t ticks)
+static int32_t z_tick_sleep(int32_t ticks)
{
#ifdef CONFIG_MULTITHREADING
- u32_t expected_wakeup_time;
+ uint32_t expected_wakeup_time;
__ASSERT(!arch_is_in_isr(), "");
@@ -1195,7 +1195,7 @@
return 0;
}
-s32_t z_impl_k_sleep(k_timeout_t timeout)
+int32_t z_impl_k_sleep(k_timeout_t timeout)
{
k_ticks_t ticks;
@@ -1203,7 +1203,7 @@
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
k_thread_suspend(_current);
- return (s32_t) K_TICKS_FOREVER;
+ return (int32_t) K_TICKS_FOREVER;
}
#ifdef CONFIG_LEGACY_TIMEOUT_API
@@ -1217,16 +1217,16 @@
}
#ifdef CONFIG_USERSPACE
-static inline s32_t z_vrfy_k_sleep(k_timeout_t timeout)
+static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
{
return z_impl_k_sleep(timeout);
}
#include <syscalls/k_sleep_mrsh.c>
#endif
-s32_t z_impl_k_usleep(int us)
+int32_t z_impl_k_usleep(int us)
{
- s32_t ticks;
+ int32_t ticks;
ticks = k_us_to_ticks_ceil64(us);
ticks = z_tick_sleep(ticks);
@@ -1234,7 +1234,7 @@
}
#ifdef CONFIG_USERSPACE
-static inline s32_t z_vrfy_k_usleep(int us)
+static inline int32_t z_vrfy_k_usleep(int us)
{
return z_impl_k_usleep(us);
}
@@ -1381,7 +1381,7 @@
# endif
-static int cpu_mask_mod(k_tid_t thread, u32_t enable_mask, u32_t disable_mask)
+static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
{
int ret = 0;
diff --git a/kernel/stack.c b/kernel/stack.c
index be00a1f..d9c4b53 100644
--- a/kernel/stack.c
+++ b/kernel/stack.c
@@ -42,7 +42,7 @@
#endif /* CONFIG_OBJECT_TRACING */
void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
- u32_t num_entries)
+ uint32_t num_entries)
{
z_waitq_init(&stack->wait_q);
stack->lock = (struct k_spinlock) {};
@@ -53,16 +53,16 @@
z_object_init(stack);
}
-s32_t z_impl_k_stack_alloc_init(struct k_stack *stack, u32_t num_entries)
+int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
{
void *buffer;
- s32_t ret;
+ int32_t ret;
buffer = z_thread_malloc(num_entries * sizeof(stack_data_t));
if (buffer != NULL) {
k_stack_init(stack, buffer, num_entries);
stack->flags = K_STACK_FLAG_ALLOC;
- ret = (s32_t)0;
+ ret = (int32_t)0;
} else {
ret = -ENOMEM;
}
@@ -71,8 +71,8 @@
}
#ifdef CONFIG_USERSPACE
-static inline s32_t z_vrfy_k_stack_alloc_init(struct k_stack *stack,
- u32_t num_entries)
+static inline int32_t z_vrfy_k_stack_alloc_init(struct k_stack *stack,
+ uint32_t num_entries)
{
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK));
Z_OOPS(Z_SYSCALL_VERIFY(num_entries > 0));
@@ -87,7 +87,7 @@
return -EAGAIN;
}
- if ((stack->flags & K_STACK_FLAG_ALLOC) != (u8_t)0) {
+ if ((stack->flags & K_STACK_FLAG_ALLOC) != (uint8_t)0) {
k_free(stack->base);
stack->base = NULL;
stack->flags &= ~K_STACK_FLAG_ALLOC;
diff --git a/kernel/thread.c b/kernel/thread.c
index a1288ae..5dc1b4e 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -115,19 +115,19 @@
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
-void z_impl_k_busy_wait(u32_t usec_to_wait)
+void z_impl_k_busy_wait(uint32_t usec_to_wait)
{
#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
/* use 64-bit math to prevent overflow when multiplying */
- u32_t cycles_to_wait = (u32_t)(
- (u64_t)usec_to_wait *
- (u64_t)sys_clock_hw_cycles_per_sec() /
- (u64_t)USEC_PER_SEC
+ uint32_t cycles_to_wait = (uint32_t)(
+ (uint64_t)usec_to_wait *
+ (uint64_t)sys_clock_hw_cycles_per_sec() /
+ (uint64_t)USEC_PER_SEC
);
- u32_t start_cycles = k_cycle_get_32();
+ uint32_t start_cycles = k_cycle_get_32();
for (;;) {
- u32_t current_cycles = k_cycle_get_32();
+ uint32_t current_cycles = k_cycle_get_32();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
@@ -140,7 +140,7 @@
}
#ifdef CONFIG_USERSPACE
-static inline void z_vrfy_k_busy_wait(u32_t usec_to_wait)
+static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
{
z_impl_k_busy_wait(usec_to_wait);
}
@@ -358,13 +358,13 @@
*/
void z_check_stack_sentinel(void)
{
- u32_t *stack;
+ uint32_t *stack;
if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
return;
}
- stack = (u32_t *)_current->stack_info.start;
+ stack = (uint32_t *)_current->stack_info.start;
if (*stack != STACK_SENTINEL) {
/* Restore it so further checks don't trigger this same error */
*stack = STACK_SENTINEL;
@@ -422,9 +422,9 @@
size_t random_val;
if (!z_stack_adjust_initialized) {
- z_early_boot_rand_get((u8_t *)&random_val, sizeof(random_val));
+ z_early_boot_rand_get((uint8_t *)&random_val, sizeof(random_val));
} else {
- sys_rand_get((u8_t *)&random_val, sizeof(random_val));
+ sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
}
/* Don't need to worry about alignment of the size here,
@@ -457,7 +457,7 @@
k_thread_stack_t *stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, const char *name)
+ int prio, uint32_t options, const char *name)
{
Z_ASSERT_VALID_PRIO(prio, entry);
@@ -504,7 +504,7 @@
* We periodically check that it's still present and kill the thread
* if it isn't.
*/
- *((u32_t *)new_thread->stack_info.start) = STACK_SENTINEL;
+ *((uint32_t *)new_thread->stack_info.start) = STACK_SENTINEL;
#endif /* CONFIG_STACK_SENTINEL */
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
@@ -575,7 +575,7 @@
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, k_timeout_t delay)
+ int prio, uint32_t options, k_timeout_t delay)
{
__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
@@ -602,7 +602,7 @@
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, k_timeout_t delay)
+ int prio, uint32_t options, k_timeout_t delay)
{
size_t total_size, stack_obj_size;
struct z_object *stack_object;
@@ -720,12 +720,12 @@
#endif
void z_init_thread_base(struct _thread_base *thread_base, int priority,
- u32_t initial_state, unsigned int options)
+ uint32_t initial_state, unsigned int options)
{
/* k_q_node is initialized upon first insertion in a list */
- thread_base->user_options = (u8_t)options;
- thread_base->thread_state = (u8_t)initial_state;
+ thread_base->user_options = (uint8_t)options;
+ thread_base->thread_state = (uint8_t)initial_state;
thread_base->prio = priority;
@@ -829,15 +829,15 @@
int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr)
{
- const u8_t *start = (u8_t *)thread->stack_info.start;
+ const uint8_t *start = (uint8_t *)thread->stack_info.start;
size_t size = thread->stack_info.size;
size_t unused = 0;
- const u8_t *checked_stack = start;
+ const uint8_t *checked_stack = start;
/* Take the address of any local variable as a shallow bound for the
* stack pointer. Addresses above it are guaranteed to be
* accessible.
*/
- const u8_t *stack_pointer = (const u8_t *)&start;
+ const uint8_t *stack_pointer = (const uint8_t *)&start;
/* If we are currently running on the stack being analyzed, some
* memory management hardware will generate an exception if we
diff --git a/kernel/timeout.c b/kernel/timeout.c
index 523a7a4..e5f3b05 100644
--- a/kernel/timeout.c
+++ b/kernel/timeout.c
@@ -17,7 +17,7 @@
__i.key == 0; \
k_spin_unlock(lck, __key), __i.key = 1)
-static u64_t curr_tick;
+static uint64_t curr_tick;
static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
@@ -64,16 +64,16 @@
sys_dlist_remove(&t->node);
}
-static s32_t elapsed(void)
+static int32_t elapsed(void)
{
return announce_remaining == 0 ? z_clock_elapsed() : 0;
}
-static s32_t next_timeout(void)
+static int32_t next_timeout(void)
{
struct _timeout *to = first();
- s32_t ticks_elapsed = elapsed();
- s32_t ret = to == NULL ? MAX_WAIT : MAX(0, to->dticks - ticks_elapsed);
+ int32_t ticks_elapsed = elapsed();
+ int32_t ret = to == NULL ? MAX_WAIT : MAX(0, to->dticks - ticks_elapsed);
#ifdef CONFIG_TIMESLICING
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
@@ -184,9 +184,9 @@
return ticks;
}
-s32_t z_get_next_timeout_expiry(void)
+int32_t z_get_next_timeout_expiry(void)
{
- s32_t ret = (s32_t) K_TICKS_FOREVER;
+ int32_t ret = (int32_t) K_TICKS_FOREVER;
LOCKED(&timeout_lock) {
ret = next_timeout();
@@ -194,7 +194,7 @@
return ret;
}
-void z_set_timeout_expiry(s32_t ticks, bool idle)
+void z_set_timeout_expiry(int32_t ticks, bool idle)
{
LOCKED(&timeout_lock) {
int next = next_timeout();
@@ -217,7 +217,7 @@
}
}
-void z_clock_announce(s32_t ticks)
+void z_clock_announce(int32_t ticks)
{
#ifdef CONFIG_TIMESLICING
z_time_slice(ticks);
@@ -253,9 +253,9 @@
k_spin_unlock(&timeout_lock, key);
}
-s64_t z_tick_get(void)
+int64_t z_tick_get(void)
{
- u64_t t = 0U;
+ uint64_t t = 0U;
LOCKED(&timeout_lock) {
t = curr_tick + z_clock_elapsed();
@@ -263,22 +263,22 @@
return t;
}
-u32_t z_tick_get_32(void)
+uint32_t z_tick_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
- return (u32_t)z_tick_get();
+ return (uint32_t)z_tick_get();
#else
- return (u32_t)curr_tick;
+ return (uint32_t)curr_tick;
#endif
}
-s64_t z_impl_k_uptime_ticks(void)
+int64_t z_impl_k_uptime_ticks(void)
{
return z_tick_get();
}
#ifdef CONFIG_USERSPACE
-static inline s64_t z_vrfy_k_uptime_ticks(void)
+static inline int64_t z_vrfy_k_uptime_ticks(void)
{
return z_impl_k_uptime_ticks();
}
@@ -290,7 +290,7 @@
* synchronously with the user passing a new timeout value. It should
* not be used iteratively to adjust a timeout.
*/
-u64_t z_timeout_end_calc(k_timeout_t timeout)
+uint64_t z_timeout_end_calc(k_timeout_t timeout)
{
k_ticks_t dt;
diff --git a/kernel/timer.c b/kernel/timer.c
index 350a603..9a9b268 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -186,10 +186,10 @@
#include <syscalls/k_timer_stop_mrsh.c>
#endif
-u32_t z_impl_k_timer_status_get(struct k_timer *timer)
+uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
{
k_spinlock_key_t key = k_spin_lock(&lock);
- u32_t result = timer->status;
+ uint32_t result = timer->status;
timer->status = 0U;
k_spin_unlock(&lock, key);
@@ -198,7 +198,7 @@
}
#ifdef CONFIG_USERSPACE
-static inline u32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
+static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
{
Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_status_get(timer);
@@ -206,12 +206,12 @@
#include <syscalls/k_timer_status_get_mrsh.c>
#endif
-u32_t z_impl_k_timer_status_sync(struct k_timer *timer)
+uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
{
__ASSERT(!arch_is_in_isr(), "");
k_spinlock_key_t key = k_spin_lock(&lock);
- u32_t result = timer->status;
+ uint32_t result = timer->status;
if (result == 0U) {
if (!z_is_inactive_timeout(&timer->timeout)) {
@@ -235,7 +235,7 @@
}
#ifdef CONFIG_USERSPACE
-static inline u32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
+static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
{
Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
return z_impl_k_timer_status_sync(timer);
diff --git a/kernel/userspace.c b/kernel/userspace.c
index 04f3239..941a767 100644
--- a/kernel/userspace.c
+++ b/kernel/userspace.c
@@ -55,7 +55,7 @@
#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
#ifdef CONFIG_DYNAMIC_OBJECTS
-extern u8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
+extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
#endif
static void clear_perms_cb(struct z_object *ko, void *ctx_ptr);
@@ -100,7 +100,7 @@
*/
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_PRIVILEGE_STACK_ALIGN == 0);
-u8_t *z_priv_stack_find(k_thread_stack_t *stack)
+uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
{
struct z_object *obj = z_object_find(stack);
@@ -117,7 +117,7 @@
struct z_object kobj;
sys_dnode_t obj_list;
struct rbnode node; /* must be immediately before data member */
- u8_t data[]; /* The object itself */
+ uint8_t data[]; /* The object itself */
};
extern struct z_object *z_object_gperf_find(void *obj);
diff --git a/kernel/version.c b/kernel/version.c
index a2b7cf0..799e555 100644
--- a/kernel/version.c
+++ b/kernel/version.c
@@ -15,7 +15,7 @@
*
* @return kernel version
*/
-u32_t sys_kernel_version_get(void)
+uint32_t sys_kernel_version_get(void)
{
return KERNELVERSION;
}