kernel: add closing comments to config endifs

Add a closing comment to the endif with the configuration
information to which the endif belongs too.
To make the code more clearer if the configs need adaptions.

Signed-off-by: Simon Hein <Shein@baumer.com>
diff --git a/kernel/atomic_c.c b/kernel/atomic_c.c
index 1790953..c873192 100644
--- a/kernel/atomic_c.c
+++ b/kernel/atomic_c.c
@@ -56,7 +56,7 @@
 #else
 #define ATOMIC_SYSCALL_HANDLER_TARGET(name)
 #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
-#endif
+#endif /* CONFIG_USERSPACE */
 
 /**
  *
@@ -411,4 +411,4 @@
 #include <syscalls/atomic_xor_mrsh.c>
 #include <syscalls/atomic_and_mrsh.c>
 #include <syscalls/atomic_nand_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
diff --git a/kernel/banner.c b/kernel/banner.c
index a9925ea..5274c3e 100644
--- a/kernel/banner.c
+++ b/kernel/banner.c
@@ -14,15 +14,15 @@
 #define BANNER_POSTFIX " (delayed boot " DELAY_STR "ms)"
 #else
 #define BANNER_POSTFIX ""
-#endif
+#endif /* defined(CONFIG_BOOT_DELAY) && (CONFIG_BOOT_DELAY > 0) */
 
 #ifndef BANNER_VERSION
 #ifdef BUILD_VERSION
 #define BANNER_VERSION STRINGIFY(BUILD_VERSION)
 #else
 #define BANNER_VERSION KERNEL_VERSION_STRING
-#endif
-#endif
+#endif /* BUILD_VERSION */
+#endif /* !BANNER_VERSION */
 
 void boot_banner(void)
 {
diff --git a/kernel/condvar.c b/kernel/condvar.c
index 1aa26e9..9d8ca7a 100644
--- a/kernel/condvar.c
+++ b/kernel/condvar.c
@@ -14,7 +14,7 @@
 
 #ifdef CONFIG_OBJ_CORE_CONDVAR
 static struct k_obj_type obj_type_condvar;
-#endif
+#endif /* CONFIG_OBJ_CORE_CONDVAR */
 
 static struct k_spinlock lock;
 
@@ -25,7 +25,7 @@
 
 #ifdef CONFIG_OBJ_CORE_CONDVAR
 	k_obj_core_init_and_link(K_OBJ_CORE(condvar), &obj_type_condvar);
-#endif
+#endif /* CONFIG_OBJ_CORE_CONDVAR */
 
 	SYS_PORT_TRACING_OBJ_INIT(k_condvar, condvar, 0);
 
@@ -39,7 +39,7 @@
 	return z_impl_k_condvar_init(condvar);
 }
 #include <syscalls/k_condvar_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_condvar_signal(struct k_condvar *condvar)
 {
@@ -71,7 +71,7 @@
 	return z_impl_k_condvar_signal(condvar);
 }
 #include <syscalls/k_condvar_signal_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_condvar_broadcast(struct k_condvar *condvar)
 {
@@ -104,7 +104,7 @@
 	return z_impl_k_condvar_broadcast(condvar);
 }
 #include <syscalls/k_condvar_broadcast_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
 			  k_timeout_t timeout)
@@ -133,7 +133,7 @@
 	return z_impl_k_condvar_wait(condvar, mutex, timeout);
 }
 #include <syscalls/k_condvar_wait_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_CONDVAR
 static int init_condvar_obj_core_list(void)
@@ -155,4 +155,4 @@
 
 SYS_INIT(init_condvar_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_CONDVAR */
diff --git a/kernel/cpu_mask.c b/kernel/cpu_mask.c
index 4efeb29..4f42228 100644
--- a/kernel/cpu_mask.c
+++ b/kernel/cpu_mask.c
@@ -13,7 +13,7 @@
 # ifdef CONFIG_SMP
 /* Right now we use a two byte for this mask */
 BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 16, "Too many CPUs for mask word");
-# endif
+# endif /* CONFIG_SMP */
 
 
 static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
@@ -23,7 +23,7 @@
 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
 	__ASSERT(z_is_thread_prevented_from_running(thread),
 		 "Running threads cannot change CPU pin");
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
 
 	K_SPINLOCK(&_sched_spinlock) {
 		if (z_is_thread_prevented_from_running(thread)) {
@@ -39,7 +39,7 @@
 
 		__ASSERT((m == 0) || ((m & (m - 1)) == 0),
 			 "Only one CPU allowed in mask when PIN_ONLY");
-#endif
+#endif /* defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY) */
 
 	return ret;
 }
diff --git a/kernel/dynamic.c b/kernel/dynamic.c
index b9d34cb..66cd998 100644
--- a/kernel/dynamic.c
+++ b/kernel/dynamic.c
@@ -20,7 +20,7 @@
 #define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE
 #else
 #define BA_SIZE 1
-#endif
+#endif /* CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 */
 
 struct dyn_cb_data {
 	k_tid_t tid;
@@ -71,7 +71,7 @@
 		 * enabled we can't proceed.
 		 */
 		return NULL;
-#endif
+#endif /* CONFIG_DYNAMIC_OBJECTS */
 	}
 
 	return z_thread_stack_alloc_dyn(Z_KERNEL_STACK_OBJ_ALIGN,
@@ -106,7 +106,7 @@
 	return z_impl_k_thread_stack_alloc(size, flags);
 }
 #include <syscalls/k_thread_stack_alloc_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static void dyn_cb(const struct k_thread *thread, void *user_data)
 {
@@ -154,7 +154,7 @@
 		}
 #else
 		k_free(stack);
-#endif
+#endif /* CONFIG_USERSPACE */
 	} else {
 		LOG_DBG("Invalid stack %p", stack);
 		return -EINVAL;
@@ -169,4 +169,4 @@
 	return z_impl_k_thread_stack_free(stack);
 }
 #include <syscalls/k_thread_stack_free_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
diff --git a/kernel/events.c b/kernel/events.c
index 8cb90dc..082f418 100644
--- a/kernel/events.c
+++ b/kernel/events.c
@@ -47,7 +47,7 @@
 
 #ifdef CONFIG_OBJ_CORE_EVENT
 static struct k_obj_type obj_type_event;
-#endif
+#endif /* CONFIG_OBJ_CORE_EVENT */
 
 void z_impl_k_event_init(struct k_event *event)
 {
@@ -62,7 +62,7 @@
 
 #ifdef CONFIG_OBJ_CORE_EVENT
 	k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
-#endif
+#endif /* CONFIG_OBJ_CORE_EVENT */
 }
 
 #ifdef CONFIG_USERSPACE
@@ -72,7 +72,7 @@
 	z_impl_k_event_init(event);
 }
 #include <syscalls/k_event_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 /**
  * @brief determine if desired set of events been satisfied
@@ -191,7 +191,7 @@
 	return z_impl_k_event_post(event, events);
 }
 #include <syscalls/k_event_post_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
 {
@@ -205,7 +205,7 @@
 	return z_impl_k_event_set(event, events);
 }
 #include <syscalls/k_event_set_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
 			       uint32_t events_mask)
@@ -221,7 +221,7 @@
 	return z_impl_k_event_set_masked(event, events, events_mask);
 }
 #include <syscalls/k_event_set_masked_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
 {
@@ -235,7 +235,7 @@
 	return z_impl_k_event_clear(event, events);
 }
 #include <syscalls/k_event_clear_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
 				      unsigned int options, k_timeout_t timeout)
@@ -321,7 +321,7 @@
 	return z_impl_k_event_wait(event, events, reset, timeout);
 }
 #include <syscalls/k_event_wait_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 /**
  * Wait for all of the specified events
@@ -343,7 +343,7 @@
 	return z_impl_k_event_wait_all(event, events, reset, timeout);
 }
 #include <syscalls/k_event_wait_all_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_EVENT
 static int init_event_obj_core_list(void)
@@ -364,4 +364,4 @@
 
 SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_EVENT */
diff --git a/kernel/fatal.c b/kernel/fatal.c
index dae2eb6..caee224 100644
--- a/kernel/fatal.c
+++ b/kernel/fatal.c
@@ -108,7 +108,7 @@
 	if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
 		LOG_ERR("Fault during interrupt handling\n");
 	}
-#endif
+#endif /* CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
 
 	LOG_ERR("Current thread: %p (%s)", thread,
 		thread_name_get(thread));
diff --git a/kernel/idle.c b/kernel/idle.c
index ae39f71..bef193a 100644
--- a/kernel/idle.c
+++ b/kernel/idle.c
@@ -30,7 +30,7 @@
 #endif	/* CONFIG_PM */
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 	sys_clock_idle_exit();
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 }
 
 void idle(void *unused1, void *unused2, void *unused3)
@@ -87,7 +87,7 @@
 		}
 #else
 		k_cpu_idle();
-#endif
+#endif /* CONFIG_PM */
 
 #if !defined(CONFIG_PREEMPT_ENABLED)
 # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
@@ -103,8 +103,8 @@
 		if (_kernel.ready_q.cache != _current) {
 			z_swap_unlocked();
 		}
-# endif
-#endif
+# endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
+#endif /* !defined(CONFIG_PREEMPT_ENABLED) */
 	}
 }
 
diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h
index a20c7a7..18944dc 100644
--- a/kernel/include/kernel_arch_interface.h
+++ b/kernel/include/kernel_arch_interface.h
@@ -37,7 +37,7 @@
  * @param usec_to_wait Wait period, in microseconds
  */
 void arch_busy_wait(uint32_t usec_to_wait);
-#endif
+#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
 
 /** @} */
 
@@ -154,7 +154,7 @@
  */
 static ALWAYS_INLINE void
 arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
-#endif /* CONFIG_USE_SWITCH i*/
+#endif /* CONFIG_USE_SWITCH */
 
 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
 /**
diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h
index 34ff4c0..a5e69b8 100644
--- a/kernel/include/kernel_internal.h
+++ b/kernel/include/kernel_internal.h
@@ -43,7 +43,7 @@
 {
 	/* Do nothing */
 }
-#endif
+#endif /* CONFIG_XIP */
 
 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
 void z_bss_zero_boot(void);
@@ -52,7 +52,7 @@
 {
 	/* Do nothing */
 }
-#endif
+#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
 
 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
 void z_bss_zero_pinned(void);
@@ -61,7 +61,7 @@
 {
 	/* Do nothing */
 }
-#endif
+#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
 
 FUNC_NORETURN void z_cstart(void);
 
@@ -135,27 +135,27 @@
 extern void z_smp_init(void);
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 extern void smp_timer_init(void);
-#endif
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
+#endif /* CONFIG_SMP */
 
 extern void z_early_rand_get(uint8_t *buf, size_t length);
 
 #if CONFIG_STACK_POINTER_RANDOM
 extern int z_stack_adjust_initialized;
-#endif
+#endif /* CONFIG_STACK_POINTER_RANDOM */
 
 extern struct k_thread z_main_thread;
 
 
 #ifdef CONFIG_MULTITHREADING
 extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
-#endif
+#endif /* CONFIG_MULTITHREADING */
 K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
 				    CONFIG_ISR_STACK_SIZE);
 
 #ifdef CONFIG_GEN_PRIV_STACKS
 extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
-#endif
+#endif /* CONFIG_GEN_PRIV_STACKS */
 
 /* Calculate stack usage. */
 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
@@ -189,7 +189,7 @@
  * and synchronously communicate with gdb on host.
  */
 extern int z_gdb_main_loop(struct gdb_ctx *ctx);
-#endif
+#endif /* CONFIG_GDBSTUB */
 
 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
 void z_thread_mark_switched_in(void);
@@ -263,7 +263,7 @@
  */
 void pm_system_resume(void);
 
-#endif
+#endif /* CONFIG_PM */
 
 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
 /**
@@ -287,7 +287,7 @@
 int z_thread_stats_reset(struct k_obj_core *obj_core);
 int z_thread_stats_disable(struct k_obj_core *obj_core);
 int z_thread_stats_enable(struct k_obj_core *obj_core);
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
 
 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
 int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
@@ -295,7 +295,7 @@
 
 int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
 int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
 
 #ifdef __cplusplus
 }
diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h
index 5644dbb..41b310a 100644
--- a/kernel/include/kernel_offsets.h
+++ b/kernel/include/kernel_offsets.h
@@ -34,23 +34,23 @@
 
 #if defined(CONFIG_FPU_SHARING)
 GEN_OFFSET_SYM(_cpu_t, fp_ctx);
-#endif
+#endif /* CONFIG_FPU_SHARING */
 
 #ifdef CONFIG_PM
 GEN_OFFSET_SYM(_kernel_t, idle);
-#endif
+#endif /* CONFIG_PM */
 
 #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
 GEN_OFFSET_SYM(_kernel_t, ready_q);
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
 
 #ifndef CONFIG_SMP
 GEN_OFFSET_SYM(_ready_q_t, cache);
-#endif
+#endif /* CONFIG_SMP */
 
 #ifdef CONFIG_FPU_SHARING
 GEN_OFFSET_SYM(_kernel_t, current_fp);
-#endif
+#endif /* CONFIG_FPU_SHARING */
 
 GEN_OFFSET_SYM(_thread_base_t, user_options);
 
@@ -60,15 +60,15 @@
 
 #ifdef CONFIG_USE_SWITCH
 GEN_OFFSET_SYM(_thread_t, switch_handle);
-#endif
+#endif /* CONFIG_USE_SWITCH */
 
 #ifdef CONFIG_THREAD_STACK_INFO
 GEN_OFFSET_SYM(_thread_t, stack_info);
-#endif
+#endif /* CONFIG_THREAD_STACK_INFO */
 
 #ifdef CONFIG_THREAD_LOCAL_STORAGE
 GEN_OFFSET_SYM(_thread_t, tls);
-#endif
+#endif /* CONFIG_THREAD_LOCAL_STORAGE */
 
 GEN_ABSOLUTE_SYM(__z_interrupt_stack_SIZEOF, sizeof(z_interrupt_stacks[0]));
 
@@ -76,12 +76,12 @@
 #ifdef CONFIG_DEVICE_DEPS
 GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_HANDLES_OFFSET,
 		 offsetof(struct device, deps));
-#endif
+#endif /* CONFIG_DEVICE_DEPS */
 
 #ifdef CONFIG_PM_DEVICE
 GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_PM_OFFSET,
 		 offsetof(struct device, pm));
-#endif
+#endif /* CONFIG_PM_DEVICE */
 
 /* member offsets in the pm_device structure. Used in image post-processing */
 
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index 5ef720b..74e4bc9 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -34,7 +34,7 @@
 #else
 #define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
 #define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
-#endif
+#endif /* CONFIG_MULTITHREADING */
 
 void z_sched_init(void);
 void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
@@ -80,7 +80,7 @@
 	return thread->base.is_idle;
 #else
 	return thread == &z_idle_threads[0];
-#endif
+#endif /* CONFIG_SMP */
 #else
 	return false;
 #endif /* CONFIG_MULTITHREADING */
@@ -417,7 +417,7 @@
 #ifdef CONFIG_SCHED_THREAD_USAGE
 	z_sched_usage_stop();
 	z_sched_usage_start(thread);
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE */
 }
 
 #endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h
index 862969a..d3638b6 100644
--- a/kernel/include/kswap.h
+++ b/kernel/include/kswap.h
@@ -15,7 +15,7 @@
 extern void z_check_stack_sentinel(void);
 #else
 #define z_check_stack_sentinel() /**/
-#endif
+#endif /* CONFIG_STACK_SENTINEL */
 
 extern struct k_spinlock _sched_spinlock;
 
@@ -63,7 +63,7 @@
 	 * non-null.
 	 */
 	barrier_dmem_fence_full();
-#endif
+#endif /* CONFIG_SMP */
 }
 
 /* New style context switching.  arch_switch() is a lower level
@@ -99,8 +99,8 @@
 	__ASSERT(arch_irq_unlocked(key) ||
 		 _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
 		 "Context switching while holding lock!");
-# endif
-#endif
+# endif /* CONFIG_ARM64 */
+#endif /* CONFIG_SPIN_VALIDATE */
 
 	old_thread = _current;
 
@@ -131,18 +131,18 @@
 		if (!is_spinlock) {
 			z_smp_release_global_lock(new_thread);
 		}
-#endif
+#endif /* CONFIG_SMP */
 		z_thread_mark_switched_out();
 		z_sched_switch_spin(new_thread);
 		_current_cpu->current = new_thread;
 
 #ifdef CONFIG_TIMESLICING
 		z_reset_time_slice(new_thread);
-#endif
+#endif /* CONFIG_TIMESLICING */
 
 #ifdef CONFIG_SPIN_VALIDATE
 		z_spin_lock_set_owner(&_sched_spinlock);
-#endif
+#endif /* CONFIG_SPIN_VALIDATE */
 
 		arch_cohere_stacks(old_thread, NULL, new_thread);
 
@@ -152,7 +152,7 @@
 		 * time.  See z_sched_switch_spin().
 		 */
 		z_requeue_current(old_thread);
-#endif
+#endif /* CONFIG_SMP */
 		void *newsh = new_thread->switch_handle;
 
 		if (IS_ENABLED(CONFIG_SMP)) {
@@ -241,24 +241,24 @@
 	dummy_thread->base.thread_state = _THREAD_DUMMY;
 #ifdef CONFIG_SCHED_CPU_MASK
 	dummy_thread->base.cpu_mask = -1;
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK */
 	dummy_thread->base.user_options = K_ESSENTIAL;
 #ifdef CONFIG_THREAD_STACK_INFO
 	dummy_thread->stack_info.start = 0U;
 	dummy_thread->stack_info.size = 0U;
-#endif
+#endif /* CONFIG_THREAD_STACK_INFO */
 #ifdef CONFIG_USERSPACE
 	dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
-#endif
+#endif /* CONFIG_USERSPACE */
 #if (K_HEAP_MEM_POOL_SIZE > 0)
 	k_thread_system_pool_assign(dummy_thread);
 #else
 	dummy_thread->resource_pool = NULL;
-#endif
+#endif /* K_HEAP_MEM_POOL_SIZE */
 
 #ifdef CONFIG_TIMESLICE_PER_THREAD
 	dummy_thread->base.slice_ticks = 0;
-#endif
+#endif /* CONFIG_TIMESLICE_PER_THREAD */
 
 	_current_cpu->current = dummy_thread;
 }
diff --git a/kernel/include/kthread.h b/kernel/include/kthread.h
index 96a41d3..423e698 100644
--- a/kernel/include/kthread.h
+++ b/kernel/include/kthread.h
@@ -17,7 +17,7 @@
  * thread->next_thread (until NULL)
  */
 extern struct k_spinlock z_thread_monitor_lock;
-#endif
+#endif /* CONFIG_THREAD_MONITOR */
 
 /* clean up when a thread is aborted */
 
@@ -42,8 +42,8 @@
 #else
 	ARG_UNUSED(delay);
 	k_thread_start(thread);
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 }
-#endif
+#endif /* CONFIG_MULTITHREADING */
 
 #endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */
diff --git a/kernel/include/mmu.h b/kernel/include/mmu.h
index 8879556..4650b65 100644
--- a/kernel/include/mmu.h
+++ b/kernel/include/mmu.h
@@ -54,7 +54,7 @@
 #define Z_FREE_VM_START	Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
 #else
 #define Z_FREE_VM_START	Z_KERNEL_VIRT_END
-#endif
+#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
 
 /*
  * Macros and data structures for physical page frame accounting,
@@ -121,7 +121,7 @@
 } __aligned(4);
 #else
 } __packed;
-#endif
+#endif /* CONFIG_XTENSA */
 
 static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
 {
@@ -237,7 +237,7 @@
 				     CONFIG_MMU_PAGE_SIZE))
 #else
 #define Z_VM_RESERVED	0
-#endif
+#endif /* CONFIG_DEMAND_PAGING */
 
 #ifdef CONFIG_DEMAND_PAGING
 /*
diff --git a/kernel/include/priority_q.h b/kernel/include/priority_q.h
index 61496aa..e9ea0bf 100644
--- a/kernel/include/priority_q.h
+++ b/kernel/include/priority_q.h
@@ -16,7 +16,7 @@
 #  define _priq_run_best	_priq_dumb_mask_best
 # else
 #  define _priq_run_best	z_priq_dumb_best
-# endif
+# endif /* CONFIG_SCHED_CPU_MASK */
 /* Scalable Scheduling */
 #elif defined(CONFIG_SCHED_SCALABLE)
 #define _priq_run_add		z_priq_rb_add
diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h
index ad9564f..a62242a 100644
--- a/kernel/include/timeout_q.h
+++ b/kernel/include/timeout_q.h
@@ -73,7 +73,7 @@
 	ARG_UNUSED(ticks);
 }
 
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 
 #ifdef __cplusplus
 }
diff --git a/kernel/init.c b/kernel/init.c
index 81705e8..506e3ef 100644
--- a/kernel/init.c
+++ b/kernel/init.c
@@ -85,7 +85,7 @@
 					      pos->thread);
 		}
 	}
-#endif
+#endif /* CONFIG_USERSPACE */
 
 	/*
 	 * Non-legacy static threads may be started immediately or
@@ -128,12 +128,12 @@
 	INIT_LEVEL_APPLICATION,
 #ifdef CONFIG_SMP
 	INIT_LEVEL_SMP,
-#endif
+#endif /* CONFIG_SMP */
 };
 
 #ifdef CONFIG_SMP
 extern const struct init_entry __init_SMP_start[];
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * storage space for the interrupt stack
@@ -173,8 +173,8 @@
 	.disable = NULL,
 	.enable  = NULL,
 };
-#endif
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
+#endif /* CONFIG_OBJ_CORE_SYSTEM */
 
 /* LCOV_EXCL_START
  *
@@ -245,7 +245,7 @@
 #ifdef CONFIG_COVERAGE_GCOV
 	z_early_memset(&__gcov_bss_start, 0,
 		       ((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
-#endif
+#endif /* CONFIG_COVERAGE_GCOV */
 }
 
 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
@@ -279,7 +279,7 @@
 __boot_func
 #else
 __pinned_func
-#endif
+#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
 void z_bss_zero_pinned(void)
 {
 	z_early_memset(&lnkr_pinned_bss_start, 0,
@@ -293,7 +293,7 @@
 extern __thread volatile uintptr_t __stack_chk_guard;
 #else
 extern volatile uintptr_t __stack_chk_guard;
-#endif
+#endif /* CONFIG_STACK_CANARIES_TLS */
 #endif /* CONFIG_STACK_CANARIES */
 
 /* LCOV_EXCL_STOP */
@@ -322,7 +322,7 @@
 		__init_APPLICATION_start,
 #ifdef CONFIG_SMP
 		__init_SMP_start,
-#endif
+#endif /* CONFIG_SMP */
 		/* End marker */
 		__init_end,
 	};
@@ -391,13 +391,13 @@
 	z_sys_init_run_level(INIT_LEVEL_POST_KERNEL);
 #if CONFIG_STACK_POINTER_RANDOM
 	z_stack_adjust_initialized = 1;
-#endif
+#endif /* CONFIG_STACK_POINTER_RANDOM */
 	boot_banner();
 
 #if defined(CONFIG_CPP)
 	void z_cpp_init_static(void);
 	z_cpp_init_static();
-#endif
+#endif /* CONFIG_CPP */
 
 	/* Final init level before app starts */
 	z_sys_init_run_level(INIT_LEVEL_APPLICATION);
@@ -406,14 +406,14 @@
 
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 #ifdef CONFIG_SMP
 	if (!IS_ENABLED(CONFIG_SMP_BOOT_DELAY)) {
 		z_smp_init();
 	}
 	z_sys_init_run_level(INIT_LEVEL_SMP);
-#endif
+#endif /* CONFIG_SMP */
 
 #ifdef CONFIG_MMU
 	z_mem_manage_boot_finish();
@@ -429,7 +429,7 @@
 #ifdef CONFIG_COVERAGE_DUMP
 	/* Dump coverage data once the main() has exited. */
 	gcov_coverage_dump();
-#endif
+#endif /* CONFIG_COVERAGE_DUMP */
 } /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
 
 #if defined(CONFIG_MULTITHREADING)
@@ -446,7 +446,7 @@
 	snprintk(tname, 8, "idle %02d", i);
 #else
 	char *tname = "idle";
-#endif
+#endif /* CONFIG_MP_MAX_NUM_CPUS */
 
 #else
 	char *tname = NULL;
@@ -460,7 +460,7 @@
 
 #ifdef CONFIG_SMP
 	thread->base.is_idle = 1U;
-#endif
+#endif /* CONFIG_SMP */
 }
 
 void z_init_cpu(int id)
@@ -524,7 +524,7 @@
 	 *   to work as intended
 	 */
 	_kernel.ready_q.cache = &z_main_thread;
-#endif
+#endif /* CONFIG_SMP */
 	stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
 				       CONFIG_MAIN_STACK_SIZE, bg_thread_main,
 				       NULL, NULL, NULL,
@@ -551,7 +551,7 @@
 	 * will never be rescheduled in.
 	 */
 	z_swap_unlocked();
-#endif
+#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
 }
 #endif /* CONFIG_MULTITHREADING */
@@ -573,7 +573,7 @@
 			buf += rc;
 		}
 	}
-#endif
+#endif /* CONFIG_ENTROPY_HAS_DRIVER */
 
 	while (length > 0) {
 		uint32_t val;
@@ -621,7 +621,7 @@
 	struct k_thread dummy_thread;
 
 	z_dummy_thread_init(&dummy_thread);
-#endif
+#endif /* CONFIG_MULTITHREADING */
 	/* do any necessary initialization of static devices */
 	z_device_state_init();
 
@@ -640,7 +640,7 @@
 #ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
 	timing_init();
 	timing_start();
-#endif
+#endif /* CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT */
 
 #ifdef CONFIG_MULTITHREADING
 	switch_to_main_thread(prepare_multithreading());
@@ -661,7 +661,7 @@
 	while (true) {
 	}
 	/* LCOV_EXCL_STOP */
-#endif
+#endif /* ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING */
 #endif /* CONFIG_MULTITHREADING */
 
 	/*
@@ -683,7 +683,7 @@
 
 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
 	k_obj_type_stats_init(&obj_type_cpu, &cpu_stats_desc);
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
 
 	return 0;
 }
@@ -697,13 +697,13 @@
 
 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
 	k_obj_type_stats_init(&obj_type_kernel, &kernel_stats_desc);
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
 
 	k_obj_core_init_and_link(K_OBJ_CORE(&_kernel), &obj_type_kernel);
 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
 	k_obj_core_stats_register(K_OBJ_CORE(&_kernel), _kernel.usage,
 				  sizeof(_kernel.usage));
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
 
 	return 0;
 }
@@ -713,4 +713,4 @@
 
 SYS_INIT(init_kernel_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_SYSTEM */
diff --git a/kernel/irq_offload.c b/kernel/irq_offload.c
index e27c210..f6db81c 100644
--- a/kernel/irq_offload.c
+++ b/kernel/irq_offload.c
@@ -19,5 +19,5 @@
 	k_sem_take(&offload_sem, K_FOREVER);
 	arch_irq_offload(routine, parameter);
 	k_sem_give(&offload_sem);
-#endif
+#endif /* CONFIG_IRQ_OFFLOAD_NESTED */
 }
diff --git a/kernel/mailbox.c b/kernel/mailbox.c
index 8e796a4..0d69076 100644
--- a/kernel/mailbox.c
+++ b/kernel/mailbox.c
@@ -22,7 +22,7 @@
 
 #ifdef CONFIG_OBJ_CORE_MAILBOX
 static struct k_obj_type  obj_type_mailbox;
-#endif
+#endif /* CONFIG_OBJ_CORE_MAILBOX */
 
 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
 
@@ -97,7 +97,7 @@
 
 #ifdef CONFIG_OBJ_CORE_MAILBOX
 	k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
-#endif
+#endif /* CONFIG_OBJ_CORE_MAILBOX */
 
 	SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
 }
@@ -189,7 +189,7 @@
 		}
 		return;
 	}
-#endif
+#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
 
 	/* synchronous send: wake up sending thread */
 	arch_thread_return_value_set(sending_thread, 0);
@@ -256,7 +256,7 @@
 				z_reschedule(&mbox->lock, key);
 				return 0;
 			}
-#endif
+#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
 			SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
 
 			/*
@@ -286,7 +286,7 @@
 		k_spin_unlock(&mbox->lock, key);
 		return 0;
 	}
-#endif
+#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
 	SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
 
 	/* synchronous send: sender waits on tx queue for receiver or timeout */
@@ -335,7 +335,7 @@
 	(void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
 }
-#endif
+#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
 
 void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
 {
@@ -463,4 +463,4 @@
 
 SYS_INIT(init_mailbox_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_MAILBOX */
diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c
index fd8f053..5c41c14 100644
--- a/kernel/mem_domain.c
+++ b/kernel/mem_domain.c
@@ -43,7 +43,7 @@
 			part->start);
 		return false;
 	}
-#endif
+#endif /* CONFIG_EXECUTE_XOR_WRITE */
 
 	if (part->size == 0U) {
 		LOG_ERR("zero sized partition at %p with base 0x%lx",
@@ -124,7 +124,7 @@
 		ret = -ENOMEM;
 		goto unlock_out;
 	}
-#endif
+#endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
 	if (num_parts != 0U) {
 		uint32_t i;
 
@@ -145,7 +145,7 @@
 			CHECKIF(ret2 != 0) {
 				ret = ret2;
 			}
-#endif
+#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
 		}
 	}
 
@@ -200,7 +200,7 @@
 
 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
 	ret = arch_mem_domain_partition_add(domain, p_idx);
-#endif
+#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
 
 unlock_out:
 	k_spin_unlock(&z_mem_domain_lock, key);
@@ -242,7 +242,7 @@
 
 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
 	ret = arch_mem_domain_partition_remove(domain, p_idx);
-#endif
+#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
 
 	/* A zero-sized partition denotes it's a free partition */
 	domain->partitions[p_idx].size = 0U;
@@ -271,7 +271,7 @@
 
 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
 	ret = arch_mem_domain_thread_add(thread);
-#endif
+#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
 
 	return ret;
 }
@@ -287,7 +287,7 @@
 
 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
 	ret = arch_mem_domain_thread_remove(thread);
-#endif
+#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
 
 	return ret;
 }
diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c
index 3be1066..609063e 100644
--- a/kernel/mem_slab.c
+++ b/kernel/mem_slab.c
@@ -55,7 +55,7 @@
 	ptr->max_allocated_bytes = slab->info.max_used * slab->info.block_size;
 #else
 	ptr->max_allocated_bytes = 0;
-#endif
+#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
 	k_spin_unlock(&slab->lock, key);
 
 	return 0;
@@ -73,7 +73,7 @@
 
 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
 	slab->info.max_used = slab->info.num_used;
-#endif
+#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
 
 	k_spin_unlock(&slab->lock, key);
 
@@ -89,8 +89,8 @@
 	.disable = NULL,
 	.enable = NULL,
 };
-#endif
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
+#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
 
 /**
  * @brief Initialize kernel memory slab subsystem.
@@ -141,8 +141,8 @@
 			offsetof(struct k_mem_slab, obj_core));
 #ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
 	k_obj_type_stats_init(&obj_type_mem_slab, &mem_slab_stats_desc);
-#endif
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
+#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
 
 	/* Initialize statically defined mem_slabs */
 
@@ -158,8 +158,8 @@
 #ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
 		k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info,
 					  sizeof(struct k_mem_slab_info));
-#endif
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
+#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
 	}
 
 out:
@@ -182,7 +182,7 @@
 
 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
 	slab->info.max_used = 0U;
-#endif
+#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
 
 	rc = create_free_list(slab);
 	if (rc < 0) {
@@ -191,11 +191,11 @@
 
 #ifdef CONFIG_OBJ_CORE_MEM_SLAB
 	k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab);
-#endif
+#endif /* CONFIG_OBJ_CORE_MEM_SLAB */
 #ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
 	k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info,
 				  sizeof(struct k_mem_slab_info));
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
 
 	z_waitq_init(&slab->wait_q);
 	k_object_init(slab);
@@ -221,7 +221,7 @@
 #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
 		slab->info.max_used = MAX(slab->info.num_used,
 					  slab->info.max_used);
-#endif
+#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
 
 		result = 0;
 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
@@ -298,7 +298,7 @@
 				     slab->info.block_size;
 #else
 	stats->max_allocated_bytes = 0;
-#endif
+#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
 
 	k_spin_unlock(&slab->lock, key);
 
@@ -320,4 +320,4 @@
 
 	return 0;
 }
-#endif
+#endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
diff --git a/kernel/mempool.c b/kernel/mempool.c
index b3943b5..13e9c7a 100644
--- a/kernel/mempool.c
+++ b/kernel/mempool.c
@@ -119,7 +119,7 @@
 }
 #else
 #define _SYSTEM_HEAP	NULL
-#endif
+#endif /* K_HEAP_MEM_POOL_SIZE */
 
 void *z_thread_aligned_alloc(size_t align, size_t size)
 {
diff --git a/kernel/mmu.c b/kernel/mmu.c
index eda0c50..6cb444f 100644
--- a/kernel/mmu.c
+++ b/kernel/mmu.c
@@ -24,7 +24,7 @@
 
 #ifdef CONFIG_DEMAND_PAGING
 #include <zephyr/kernel/mm/demand_paging.h>
-#endif
+#endif /* CONFIG_DEMAND_PAGING */
 
 /*
  * General terminology:
@@ -76,7 +76,7 @@
 #define COLOR(x)	printk(_CONCAT(ANSI_, x))
 #else
 #define COLOR(x)	do { } while (false)
-#endif
+#endif /* COLOR_PAGE_FRAMES */
 
 /* LCOV_EXCL_START */
 static void page_frame_dump(struct z_page_frame *pf)
@@ -729,7 +729,7 @@
 	}
 #else
 	ret = z_free_page_count;
-#endif
+#endif /* CONFIG_DEMAND_PAGING */
 	k_spin_unlock(&z_mm_lock, key);
 
 	return ret * (size_t)CONFIG_MMU_PAGE_SIZE;
@@ -767,7 +767,7 @@
 
 #ifndef CONFIG_KERNEL_DIRECT_MAP
 	__ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled");
-#endif
+#endif /* CONFIG_KERNEL_DIRECT_MAP */
 	addr_offset = k_mem_region_align(&aligned_phys, &aligned_size,
 					 phys, size,
 					 CONFIG_MMU_PAGE_SIZE);
@@ -959,12 +959,12 @@
 	 * boot process. Will be un-pinned once boot process completes.
 	 */
 	mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, true);
-#endif
+#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
 
 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
 	/* Pin the page frames correspondng to the pinned symbols */
 	mark_linker_section_pinned(lnkr_pinned_start, lnkr_pinned_end, true);
-#endif
+#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
 
 	/* Any remaining pages that aren't mapped, reserved, or pinned get
 	 * added to the free pages list
@@ -979,10 +979,10 @@
 #ifdef CONFIG_DEMAND_PAGING
 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
 	z_paging_histogram_init();
-#endif
+#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
 	k_mem_paging_backing_store_init();
 	k_mem_paging_eviction_init();
-#endif
+#endif /* CONFIG_DEMAND_PAGING */
 #if __ASSERT_ON
 	page_frames_initialized = true;
 #endif
@@ -996,7 +996,7 @@
 	 * memory to be cleared.
 	 */
 	z_bss_zero();
-#endif
+#endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
 }
 
 void z_mem_manage_boot_finish(void)
@@ -1006,7 +1006,7 @@
 	 * as they don't need to be in memory all the time anymore.
 	 */
 	mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, false);
-#endif
+#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
 }
 
 #ifdef CONFIG_DEMAND_PAGING
@@ -1016,7 +1016,7 @@
 extern struct k_mem_paging_histogram_t z_paging_histogram_eviction;
 extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
 extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
-#endif
+#endif /* CONFIG_DEMAND_PAGING_STATS */
 
 static inline void do_backing_store_page_in(uintptr_t location)
 {
@@ -1162,7 +1162,7 @@
 	__ASSERT(!z_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
 		 phys);
 	pf->flags |= Z_PAGE_FRAME_BUSY;
-#endif
+#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
 	/* Update dirty parameter, since we set to true if it wasn't backed
 	 * even if otherwise clean
 	 */
@@ -1320,7 +1320,7 @@
 	}
 #else
 	ARG_UNUSED(faulting_thread);
-#endif
+#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
 
 #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
 	if (k_is_in_isr()) {
@@ -1328,7 +1328,7 @@
 
 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
 		faulting_thread->paging_stats.pagefaults.in_isr++;
-#endif
+#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
 	}
 #endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
 #endif /* CONFIG_DEMAND_PAGING_STATS */
diff --git a/kernel/msg_q.c b/kernel/msg_q.c
index e11e9a2..b315cde 100644
--- a/kernel/msg_q.c
+++ b/kernel/msg_q.c
@@ -27,7 +27,7 @@
 
 #ifdef CONFIG_OBJ_CORE_MSGQ
 static struct k_obj_type obj_type_msgq;
-#endif
+#endif /* CONFIG_OBJ_CORE_MSGQ */
 
 #ifdef CONFIG_POLL
 static inline void handle_poll_events(struct k_msgq *msgq, uint32_t state)
@@ -55,7 +55,7 @@
 
 #ifdef CONFIG_OBJ_CORE_MSGQ
 	k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq);
-#endif
+#endif /* CONFIG_OBJ_CORE_MSGQ */
 
 	SYS_PORT_TRACING_OBJ_INIT(k_msgq, msgq);
 
@@ -98,7 +98,7 @@
 	return z_impl_k_msgq_alloc_init(msgq, msg_size, max_msgs);
 }
 #include <syscalls/k_msgq_alloc_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int k_msgq_cleanup(struct k_msgq *msgq)
 {
@@ -193,7 +193,7 @@
 	return z_impl_k_msgq_put(msgq, data, timeout);
 }
 #include <syscalls/k_msgq_put_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
 {
@@ -211,7 +211,7 @@
 	z_impl_k_msgq_get_attrs(msgq, attrs);
 }
 #include <syscalls/k_msgq_get_attrs_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
 {
@@ -291,7 +291,7 @@
 	return z_impl_k_msgq_get(msgq, data, timeout);
 }
 #include <syscalls/k_msgq_get_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data)
 {
@@ -325,7 +325,7 @@
 	return z_impl_k_msgq_peek(msgq, data);
 }
 #include <syscalls/k_msgq_peek_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
 {
@@ -371,7 +371,7 @@
 	return z_impl_k_msgq_peek_at(msgq, data, idx);
 }
 #include <syscalls/k_msgq_peek_at_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_msgq_purge(struct k_msgq *msgq)
 {
@@ -416,7 +416,7 @@
 }
 #include <syscalls/k_msgq_num_used_get_mrsh.c>
 
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_MSGQ
 static int init_msgq_obj_core_list(void)
@@ -438,4 +438,4 @@
 SYS_INIT(init_msgq_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
 
-#endif
+#endif /* CONFIG_OBJ_CORE_MSGQ */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 328a75c..e5ead48 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -49,7 +49,7 @@
 
 #ifdef CONFIG_OBJ_CORE_MUTEX
 static struct k_obj_type obj_type_mutex;
-#endif
+#endif /* CONFIG_OBJ_CORE_MUTEX */
 
 int z_impl_k_mutex_init(struct k_mutex *mutex)
 {
@@ -62,7 +62,7 @@
 
 #ifdef CONFIG_OBJ_CORE_MUTEX
 	k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
-#endif
+#endif /* CONFIG_OBJ_CORE_MUTEX */
 
 	SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0);
 
@@ -76,7 +76,7 @@
 	return z_impl_k_mutex_init(mutex);
 }
 #include <syscalls/k_mutex_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static int32_t new_prio_for_inheritance(int32_t target, int32_t limit)
 {
@@ -205,7 +205,7 @@
 	return z_impl_k_mutex_lock(mutex, timeout);
 }
 #include <syscalls/k_mutex_lock_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_mutex_unlock(struct k_mutex *mutex)
 {
@@ -289,7 +289,7 @@
 	return z_impl_k_mutex_unlock(mutex);
 }
 #include <syscalls/k_mutex_unlock_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_MUTEX
 static int init_mutex_obj_core_list(void)
@@ -310,4 +310,4 @@
 
 SYS_INIT(init_mutex_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_MUTEX */
diff --git a/kernel/obj_core.c b/kernel/obj_core.c
index 8ac4f73..ebd8843 100644
--- a/kernel/obj_core.c
+++ b/kernel/obj_core.c
@@ -28,7 +28,7 @@
 	obj_core->type = type;
 #ifdef CONFIG_OBJ_CORE_STATS
 	obj_core->stats = NULL;
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS */
 }
 
 void k_obj_core_link(struct k_obj_core *obj_core)
@@ -324,4 +324,4 @@
 
 	return rv;
 }
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS */
diff --git a/kernel/pipes.c b/kernel/pipes.c
index 355377c..de5c665 100644
--- a/kernel/pipes.c
+++ b/kernel/pipes.c
@@ -33,7 +33,7 @@
 			     k_timeout_t timeout);
 #ifdef CONFIG_OBJ_CORE_PIPE
 static struct k_obj_type obj_type_pipe;
-#endif
+#endif /* CONFIG_OBJ_CORE_PIPE */
 
 
 void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
@@ -52,12 +52,12 @@
 
 #if defined(CONFIG_POLL)
 	sys_dlist_init(&pipe->poll_events);
-#endif
+#endif /* CONFIG_POLL */
 	k_object_init(pipe);
 
 #ifdef CONFIG_OBJ_CORE_PIPE
 	k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe);
-#endif
+#endif /* CONFIG_OBJ_CORE_PIPE */
 }
 
 int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
@@ -94,7 +94,7 @@
 	return z_impl_k_pipe_alloc_init(pipe, size);
 }
 #include <syscalls/k_pipe_alloc_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static inline void handle_poll_events(struct k_pipe *pipe)
 {
@@ -102,7 +102,7 @@
 	z_handle_obj_poll_events(&pipe->poll_events, K_POLL_STATE_PIPE_DATA_AVAILABLE);
 #else
 	ARG_UNUSED(pipe);
-#endif
+#endif /* CONFIG_POLL */
 }
 
 void z_impl_k_pipe_flush(struct k_pipe *pipe)
@@ -127,7 +127,7 @@
 	z_impl_k_pipe_flush(pipe);
 }
 #include <syscalls/k_pipe_flush_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_pipe_buffer_flush(struct k_pipe *pipe)
 {
@@ -154,7 +154,7 @@
 
 	z_impl_k_pipe_buffer_flush(pipe);
 }
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int k_pipe_cleanup(struct k_pipe *pipe)
 {
@@ -526,7 +526,7 @@
 				 timeout);
 }
 #include <syscalls/k_pipe_put_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
 			     void *data, size_t bytes_to_read,
@@ -734,7 +734,7 @@
 				timeout);
 }
 #include <syscalls/k_pipe_get_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
 {
@@ -771,7 +771,7 @@
 	return z_impl_k_pipe_read_avail(pipe);
 }
 #include <syscalls/k_pipe_read_avail_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
 {
@@ -808,7 +808,7 @@
 	return z_impl_k_pipe_write_avail(pipe);
 }
 #include <syscalls/k_pipe_write_avail_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_PIPE
 static int init_pipe_obj_core_list(void)
@@ -829,4 +829,4 @@
 
 SYS_INIT(init_pipe_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_PIPE */
diff --git a/kernel/poll.c b/kernel/poll.c
index d983af7..185aba6 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -93,7 +93,7 @@
 			*state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
 			return true;
 		}
-#endif
+#endif /* CONFIG_PIPES */
 	case K_POLL_TYPE_IGNORE:
 		break;
 	default:
@@ -159,7 +159,7 @@
 		__ASSERT(event->pipe != NULL, "invalid pipe\n");
 		add_event(&event->pipe->poll_events, event, poller);
 		break;
-#endif
+#endif /* CONFIG_PIPES */
 	case K_POLL_TYPE_IGNORE:
 		/* nothing to do */
 		break;
@@ -200,7 +200,7 @@
 		__ASSERT(event->pipe != NULL, "invalid pipe\n");
 		remove_event = true;
 		break;
-#endif
+#endif /* CONFIG_PIPES */
 	case K_POLL_TYPE_IGNORE:
 		/* nothing to do */
 		break;
@@ -417,7 +417,7 @@
 		case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
 			K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
 			break;
-#endif
+#endif /* CONFIG_PIPES */
 		default:
 			ret = -EINVAL;
 			goto out_free;
@@ -435,7 +435,7 @@
 	K_OOPS(1);
 }
 #include <syscalls/k_poll_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 /* must be called with interrupts locked */
 static int signal_poll_event(struct k_poll_event *event, uint32_t state)
@@ -494,7 +494,7 @@
 	z_impl_k_poll_signal_init(sig);
 }
 #include <syscalls/k_poll_signal_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
 {
@@ -522,7 +522,7 @@
 	z_impl_k_poll_signal_check(sig, signaled, result);
 }
 #include <syscalls/k_poll_signal_check_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
 {
@@ -565,7 +565,7 @@
 }
 #include <syscalls/k_poll_signal_reset_mrsh.c>
 
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static void triggered_work_handler(struct k_work *work)
 {
diff --git a/kernel/queue.c b/kernel/queue.c
index 04aaa14..b99bfb0 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -76,7 +76,7 @@
 	z_impl_k_queue_init(queue);
 }
 #include <syscalls/k_queue_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static void prepare_thread_to_run(struct k_thread *thread, void *data)
 {
@@ -91,7 +91,7 @@
 #else
 	ARG_UNUSED(queue);
 	ARG_UNUSED(state);
-#endif
+#endif /* CONFIG_POLL */
 }
 
 void z_impl_k_queue_cancel_wait(struct k_queue *queue)
@@ -118,7 +118,7 @@
 	z_impl_k_queue_cancel_wait(queue);
 }
 #include <syscalls/k_queue_cancel_wait_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
 			    bool alloc, bool is_append)
@@ -221,7 +221,7 @@
 	return z_impl_k_queue_alloc_append(queue, data);
 }
 #include <syscalls/k_queue_alloc_append_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
 {
@@ -242,7 +242,7 @@
 	return z_impl_k_queue_alloc_prepend(queue, data);
 }
 #include <syscalls/k_queue_alloc_prepend_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
 {
@@ -454,7 +454,7 @@
 
 SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_FIFO */
 
 #ifdef CONFIG_OBJ_CORE_LIFO
 struct k_obj_type _obj_type_lifo;
@@ -477,4 +477,4 @@
 
 SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_LIFO */
diff --git a/kernel/sched.c b/kernel/sched.c
index 5797404..bcc80d6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -50,7 +50,7 @@
 #else
 	ARG_UNUSED(thread);
 	return 0;
-#endif
+#endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
 }
 
 #if CONFIG_ASSERT
@@ -58,7 +58,7 @@
 {
 	return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
 }
-#endif
+#endif /* CONFIG_ASSERT */
 
 /*
  * Return value same as e.g. memcmp
@@ -98,7 +98,7 @@
 		 */
 		return (int32_t) (d2 - d1);
 	}
-#endif
+#endif /* CONFIG_SCHED_DEADLINE */
 	return 0;
 }
 
@@ -154,7 +154,7 @@
 	}
 	return NULL;
 }
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK */
 
 #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
 static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
@@ -174,7 +174,7 @@
 
 	sys_dlist_append(pq, &thread->base.qnode_dlist);
 }
-#endif
+#endif /* CONFIG_SCHED_DUMB || CONFIG_WAITQ_DUMB */
 
 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
 {
@@ -193,7 +193,7 @@
 #else
 	ARG_UNUSED(thread);
 	return &_kernel.ready_q.runq;
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
 }
 
 static ALWAYS_INLINE void *curr_cpu_runq(void)
@@ -202,7 +202,7 @@
 	return &arch_curr_cpu()->ready_q.runq;
 #else
 	return &_kernel.ready_q.runq;
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
 }
 
 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
@@ -239,7 +239,7 @@
 		/* add current to end of queue means "yield" */
 		_current_cpu->swap_ok = true;
 	}
-#endif
+#endif /* CONFIG_SMP */
 }
 
 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
@@ -266,7 +266,7 @@
 			arch_sched_ipi();
 		}
 	}
-#endif
+#endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
 }
 
 #ifdef CONFIG_SMP
@@ -296,7 +296,7 @@
 	return (thread->base.thread_state &
 		(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
 }
-#endif
+#endif /* CONFIG_SMP */
 
 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
 static inline void clear_halting(struct k_thread *thread)
@@ -311,7 +311,7 @@
 		halt_thread(_current, is_aborting(_current) ?
 				      _THREAD_DEAD : _THREAD_SUSPENDED);
 	}
-#endif
+#endif /* CONFIG_SMP */
 
 	struct k_thread *thread = runq_best();
 
@@ -332,6 +332,9 @@
 		}
 	}
 #endif
+/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
+ * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
+ */
 
 #ifndef CONFIG_SMP
 	/* In uniprocessor mode, we can leave the current thread in
@@ -386,7 +389,7 @@
 
 	_current_cpu->swap_ok = false;
 	return thread;
-#endif
+#endif /* CONFIG_SMP */
 }
 
 static void move_thread_to_end_of_prio_q(struct k_thread *thread)
@@ -404,7 +407,7 @@
 	if (arch_num_cpus() > 1) {
 		_kernel.pending_ipi = true;
 	}
-#endif
+#endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
 }
 
 #ifdef CONFIG_TIMESLICING
@@ -421,7 +424,7 @@
  * a noop condition in z_time_slice().
  */
 static struct k_thread *pending_current;
-#endif
+#endif /* CONFIG_SWAP_NONATOMIC */
 
 static inline int slice_time(struct k_thread *thread)
 {
@@ -433,7 +436,7 @@
 	}
 #else
 	ARG_UNUSED(thread);
-#endif
+#endif /* CONFIG_TIMESLICE_PER_THREAD */
 	return ret;
 }
 
@@ -447,7 +450,7 @@
 
 #ifdef CONFIG_TIMESLICE_PER_THREAD
 	ret |= thread->base.slice_ticks != 0;
-#endif
+#endif /* CONFIG_TIMESLICE_PER_THREAD */
 
 	return ret;
 }
@@ -498,7 +501,7 @@
 		thread->base.slice_data = data;
 	}
 }
-#endif
+#endif /* CONFIG_TIMESLICE_PER_THREAD */
 
 /* Called out of each timer interrupt */
 void z_time_slice(void)
@@ -513,7 +516,7 @@
 		return;
 	}
 	pending_current = NULL;
-#endif
+#endif /* CONFIG_SWAP_NONATOMIC */
 
 	if (slice_expired[_current_cpu->id] && sliceable(curr)) {
 #ifdef CONFIG_TIMESLICE_PER_THREAD
@@ -522,7 +525,7 @@
 			curr->base.slice_expired(curr, curr->base.slice_data);
 			key = k_spin_lock(&_sched_spinlock);
 		}
-#endif
+#endif /* CONFIG_TIMESLICE_PER_THREAD */
 		if (!z_is_thread_prevented_from_running(curr)) {
 			move_thread_to_end_of_prio_q(curr);
 		}
@@ -530,7 +533,7 @@
 	}
 	k_spin_unlock(&_sched_spinlock, key);
 }
-#endif
+#endif /* CONFIG_TIMESLICING */
 
 /* Track cooperative threads preempted by metairqs so we can return to
  * them specifically.  Called at the moment a new thread has been
@@ -551,6 +554,9 @@
 #else
 	ARG_UNUSED(thread);
 #endif
+/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
+ * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
+ */
 }
 
 static void update_cache(int preempt_ok)
@@ -563,7 +569,7 @@
 		if (thread != _current) {
 			z_reset_time_slice(thread);
 		}
-#endif
+#endif /* CONFIG_TIMESLICING */
 		update_metairq_preempt(thread);
 		_kernel.ready_q.cache = thread;
 	} else {
@@ -578,7 +584,7 @@
 	 * reason the scheduler will make the same decision anyway.
 	 */
 	_current_cpu->swap_ok = preempt_ok;
-#endif
+#endif /* CONFIG_SMP */
 }
 
 static bool thread_active_elsewhere(struct k_thread *thread)
@@ -598,7 +604,7 @@
 			return true;
 		}
 	}
-#endif
+#endif /* CONFIG_SMP */
 	ARG_UNUSED(thread);
 	return false;
 }
@@ -607,7 +613,7 @@
 {
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(thread));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	/* If thread is queued already, do not try and added it to the
 	 * run queue again
@@ -693,7 +699,7 @@
 		 */
 #ifdef CONFIG_SCHED_IPI_SUPPORTED
 		arch_sched_ipi();
-#endif
+#endif /* CONFIG_SCHED_IPI_SUPPORTED */
 	}
 
 	if (is_halting(thread) && (thread != _current)) {
@@ -719,7 +725,7 @@
 		}
 		return; /* lock has been released */
 	}
-#endif
+#endif /* CONFIG_SMP */
 	halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
 	if ((thread == _current) && !arch_is_in_isr()) {
 		z_swap(&_sched_spinlock, key);
@@ -757,7 +763,7 @@
 	z_impl_k_thread_suspend(thread);
 }
 #include <syscalls/k_thread_suspend_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_thread_resume(struct k_thread *thread)
 {
@@ -786,7 +792,7 @@
 	z_impl_k_thread_resume(thread);
 }
 #include <syscalls/k_thread_resume_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static _wait_q_t *pended_on_thread(struct k_thread *thread)
 {
@@ -829,7 +835,7 @@
 {
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 	add_to_waitq_locked(thread, wait_q);
 	add_thread_timeout(thread, timeout);
 }
@@ -873,7 +879,7 @@
 		if (do_nothing) {
 			continue;
 		}
-#endif
+#endif /* CONFIG_EVENTS */
 
 		if (!killed) {
 			/* The thread is not being killed */
@@ -899,14 +905,14 @@
 
 	z_sched_wake_thread(thread, true);
 }
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 
 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
 	       _wait_q_t *wait_q, k_timeout_t timeout)
 {
 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
 	pending_current = _current;
-#endif
+#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
 	__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
 
 	/* We do a "lock swap" prior to calling z_swap(), such that
@@ -994,7 +1000,7 @@
 {
 #ifdef CONFIG_SMP
 	_current_cpu->swap_ok = 0;
-#endif
+#endif /* CONFIG_SMP */
 
 	return arch_irq_unlocked(key) && !arch_is_in_isr();
 }
@@ -1014,7 +1020,7 @@
 	/* Check if the next ready thread is the same as the current thread */
 	new_thread = _kernel.ready_q.cache;
 	return new_thread != _current;
-#endif
+#endif /* CONFIG_SMP */
 }
 
 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
@@ -1079,7 +1085,7 @@
 	return ret;
 #else
 	return _kernel.ready_q.cache;
-#endif
+#endif /* CONFIG_SMP */
 }
 
 #ifdef CONFIG_USE_SWITCH
@@ -1148,7 +1154,7 @@
 
 #ifdef CONFIG_TIMESLICING
 			z_reset_time_slice(new_thread);
-#endif
+#endif /* CONFIG_TIMESLICING */
 
 #ifdef CONFIG_SPIN_VALIDATE
 			/* Changed _current!  Update the spinlock
@@ -1157,7 +1163,7 @@
 			 * release the lock.
 			 */
 			z_spin_lock_set_owner(&_sched_spinlock);
-#endif
+#endif /* CONFIG_SPIN_VALIDATE */
 
 			/* A queued (runnable) old/current thread
 			 * needs to be added back to the run queue
@@ -1183,9 +1189,9 @@
 	_current->switch_handle = interrupted;
 	set_current(_kernel.ready_q.cache);
 	return _current->switch_handle;
-#endif
+#endif /* CONFIG_SMP */
 }
-#endif
+#endif /* CONFIG_USE_SWITCH */
 
 int z_unpend_all(_wait_q_t *wait_q)
 {
@@ -1226,7 +1232,7 @@
 	}
 #else
 	init_ready_q(&_kernel.ready_q);
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
 }
 
 int z_impl_k_thread_priority_get(k_tid_t thread)
@@ -1241,7 +1247,7 @@
 	return z_impl_k_thread_priority_get(thread);
 }
 #include <syscalls/k_thread_priority_get_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
 {
@@ -1270,11 +1276,11 @@
 	K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
 				    "thread priority may only be downgraded (%d < %d)",
 				    prio, thread->base.prio));
-#endif
+#endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
 	z_impl_k_thread_priority_set(thread, prio);
 }
 #include <syscalls/k_thread_priority_set_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_SCHED_DEADLINE
 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
@@ -1312,8 +1318,8 @@
 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
 }
 #include <syscalls/k_thread_deadline_set_mrsh.c>
-#endif
-#endif
+#endif /* CONFIG_USERSPACE */
+#endif /* CONFIG_SCHED_DEADLINE */
 
 bool k_can_yield(void)
 {
@@ -1344,7 +1350,7 @@
 	z_impl_k_yield();
 }
 #include <syscalls/k_yield_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static int32_t z_tick_sleep(k_ticks_t ticks)
 {
@@ -1360,7 +1366,7 @@
 		k_yield();
 		return 0;
 	}
-#endif
+#endif /* CONFIG_MULTITHREADING */
 
 	if (Z_TICK_ABS(ticks) <= 0) {
 		expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
@@ -1374,7 +1380,7 @@
 
 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
 	pending_current = _current;
-#endif
+#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
 	unready_thread(_current);
 	z_add_thread_timeout(_current, timeout);
 	z_mark_thread_as_suspended(_current);
@@ -1390,7 +1396,7 @@
 #else
 	/* busy wait to be time coherent since subsystems may depend on it */
 	z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks));
-#endif
+#endif /* CONFIG_MULTITHREADING */
 
 	return 0;
 }
@@ -1410,7 +1416,7 @@
 #else
 		/* In Single Thread, just wait for an interrupt saving power */
 		k_cpu_idle();
-#endif
+#endif /* CONFIG_MULTITHREADING */
 		SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
 
 		return (int32_t) K_TICKS_FOREVER;
@@ -1433,7 +1439,7 @@
 	return z_impl_k_sleep(timeout);
 }
 #include <syscalls/k_sleep_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int32_t z_impl_k_usleep(int us)
 {
@@ -1457,7 +1463,7 @@
 	return z_impl_k_usleep(us);
 }
 #include <syscalls/k_usleep_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_wakeup(k_tid_t thread)
 {
@@ -1491,7 +1497,7 @@
 
 #ifdef CONFIG_TRACE_SCHED_IPI
 extern void z_trace_sched_ipi(void);
-#endif
+#endif /* CONFIG_TRACE_SCHED_IPI */
 
 #ifdef CONFIG_SMP
 void z_sched_ipi(void)
@@ -1501,15 +1507,15 @@
 	 */
 #ifdef CONFIG_TRACE_SCHED_IPI
 	z_trace_sched_ipi();
-#endif
+#endif /* CONFIG_TRACE_SCHED_IPI */
 
 #ifdef CONFIG_TIMESLICING
 	if (sliceable(_current)) {
 		z_time_slice();
 	}
-#endif
+#endif /* CONFIG_TIMESLICING */
 }
-#endif
+#endif /* CONFIG_SMP */
 
 #ifdef CONFIG_USERSPACE
 static inline void z_vrfy_k_wakeup(k_tid_t thread)
@@ -1518,7 +1524,7 @@
 	z_impl_k_wakeup(thread);
 }
 #include <syscalls/k_wakeup_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 k_tid_t z_impl_k_sched_current_thread_query(void)
 {
@@ -1528,13 +1534,13 @@
 	 * local interrupts when reading it.
 	 */
 	unsigned int k = arch_irq_lock();
-#endif
+#endif /* CONFIG_SMP */
 
 	k_tid_t ret = _current_cpu->current;
 
 #ifdef CONFIG_SMP
 	arch_irq_unlock(k);
-#endif
+#endif /* CONFIG_SMP */
 	return ret;
 }
 
@@ -1544,7 +1550,7 @@
 	return z_impl_k_sched_current_thread_query();
 }
 #include <syscalls/k_sched_current_thread_query_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_is_preempt_thread(void)
 {
@@ -1557,7 +1563,7 @@
 	return z_impl_k_is_preempt_thread();
 }
 #include <syscalls/k_is_preempt_thread_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static inline void unpend_all(_wait_q_t *wait_q)
 {
@@ -1573,7 +1579,7 @@
 
 #ifdef CONFIG_THREAD_ABORT_HOOK
 extern void thread_abort_hook(struct k_thread *thread);
-#endif
+#endif /* CONFIG_THREAD_ABORT_HOOK */
 
 /**
  * @brief Dequeues the specified thread
@@ -1604,7 +1610,7 @@
 		}
 #ifdef CONFIG_SMP
 		unpend_all(&thread->halt_queue);
-#endif
+#endif /* CONFIG_SMP */
 		update_cache(1);
 
 		if (new_state == _THREAD_SUSPENDED) {
@@ -1613,28 +1619,28 @@
 
 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
 		arch_float_disable(thread);
-#endif
+#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
 
 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
 
 		z_thread_monitor_exit(thread);
 #ifdef CONFIG_THREAD_ABORT_HOOK
 		thread_abort_hook(thread);
-#endif
+#endif /* CONFIG_THREAD_ABORT_HOOK */
 
 #ifdef CONFIG_OBJ_CORE_THREAD
 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
 		k_obj_core_stats_deregister(K_OBJ_CORE(thread));
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
 		k_obj_core_unlink(K_OBJ_CORE(thread));
-#endif
+#endif /* CONFIG_OBJ_CORE_THREAD */
 
 #ifdef CONFIG_USERSPACE
 		z_mem_domain_exit_thread(thread);
 		k_thread_perms_all_clear(thread);
 		k_object_uninit(thread->stack_obj);
 		k_object_uninit(thread);
-#endif
+#endif /* CONFIG_USERSPACE */
 	}
 }
 
@@ -1666,7 +1672,7 @@
 
 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
 }
-#endif
+#endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
 
 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
 {
@@ -1723,7 +1729,7 @@
 	default:
 #ifdef CONFIG_LOG
 		k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
-#endif
+#endif /* CONFIG_LOG */
 		K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
 	}
 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
diff --git a/kernel/sem.c b/kernel/sem.c
index 2f8de51..819abd3 100644
--- a/kernel/sem.c
+++ b/kernel/sem.c
@@ -40,7 +40,7 @@
 
 #ifdef CONFIG_OBJ_CORE_SEM
 static struct k_obj_type obj_type_sem;
-#endif
+#endif /* CONFIG_OBJ_CORE_SEM */
 
 int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
 		      unsigned int limit)
@@ -62,12 +62,12 @@
 	z_waitq_init(&sem->wait_q);
 #if defined(CONFIG_POLL)
 	sys_dlist_init(&sem->poll_events);
-#endif
+#endif /* CONFIG_POLL */
 	k_object_init(sem);
 
 #ifdef CONFIG_OBJ_CORE_SEM
 	k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
-#endif
+#endif /* CONFIG_OBJ_CORE_SEM */
 
 	return 0;
 }
@@ -80,7 +80,7 @@
 	return z_impl_k_sem_init(sem, initial_count, limit);
 }
 #include <syscalls/k_sem_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 static inline bool handle_poll_events(struct k_sem *sem)
 {
@@ -90,7 +90,7 @@
 #else
 	ARG_UNUSED(sem);
 	return false;
-#endif
+#endif /* CONFIG_POLL */
 }
 
 void z_impl_k_sem_give(struct k_sem *sem)
@@ -127,7 +127,7 @@
 	z_impl_k_sem_give(sem);
 }
 #include <syscalls/k_sem_give_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
 {
@@ -207,7 +207,7 @@
 }
 #include <syscalls/k_sem_count_get_mrsh.c>
 
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_SEM
 static int init_sem_obj_core_list(void)
@@ -228,4 +228,4 @@
 
 SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_SEM */
diff --git a/kernel/smp.c b/kernel/smp.c
index 6b5a12a..6e6cad26 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -49,7 +49,7 @@
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 	/** True if smp_timer_init() needs to be called. */
 	bool reinit_timer;
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 } cpu_start_fn;
 
 static struct k_spinlock cpu_start_lock;
@@ -130,7 +130,7 @@
 	if ((arg == NULL) || csc.reinit_timer) {
 		smp_timer_init();
 	}
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 
 	/* Do additional initialization steps if needed. */
 	if (csc.fn != NULL) {
@@ -177,7 +177,7 @@
 
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 	cpu_start_fn.reinit_timer = true;
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 
 	/* We are only starting one CPU so we do not need to synchronize
 	 * across all CPUs using the start_flag. So just set it to 1.
@@ -206,7 +206,7 @@
 	cpu_start_fn.reinit_timer = reinit_timer;
 #else
 	ARG_UNUSED(reinit_timer);
-#endif
+#endif /* CONFIG_SYS_CLOCK_EXISTS */
 
 	/* We are only starting one CPU so we do not need to synchronize
 	 * across all CPUs using the start_flag. So just set it to 1.
diff --git a/kernel/stack.c b/kernel/stack.c
index 6ada39c..822abfc 100644
--- a/kernel/stack.c
+++ b/kernel/stack.c
@@ -21,7 +21,7 @@
 
 #ifdef CONFIG_OBJ_CORE_STACK
 static struct k_obj_type obj_type_stack;
-#endif
+#endif /* CONFIG_OBJ_CORE_STACK */
 
 void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
 		  uint32_t num_entries)
@@ -36,7 +36,7 @@
 
 #ifdef CONFIG_OBJ_CORE_STACK
 	k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
-#endif
+#endif /* CONFIG_OBJ_CORE_STACK */
 }
 
 int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
@@ -69,7 +69,7 @@
 	return z_impl_k_stack_alloc_init(stack, num_entries);
 }
 #include <syscalls/k_stack_alloc_init_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int k_stack_cleanup(struct k_stack *stack)
 {
@@ -137,7 +137,7 @@
 	return z_impl_k_stack_push(stack, data);
 }
 #include <syscalls/k_stack_push_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
 		       k_timeout_t timeout)
@@ -192,7 +192,7 @@
 	return z_impl_k_stack_pop(stack, data, timeout);
 }
 #include <syscalls/k_stack_pop_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_STACK
 static int init_stack_obj_core_list(void)
@@ -213,4 +213,4 @@
 
 SYS_INIT(init_stack_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_STACK */
diff --git a/kernel/thread.c b/kernel/thread.c
index dc512a4..729be27 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -47,7 +47,7 @@
 	.disable = z_thread_stats_disable,
 	.enable  = z_thread_stats_enable,
 };
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
 
 static int init_thread_obj_core_list(void)
 {
@@ -56,18 +56,18 @@
 #ifdef CONFIG_OBJ_CORE_THREAD
 	z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
 			offsetof(struct k_thread, obj_core));
-#endif
+#endif /* CONFIG_OBJ_CORE_THREAD */
 
 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
 	k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
 
 	return 0;
 }
 
 SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_THREAD */
 
 
 #define _FOREACH_STATIC_THREAD(thread_data)              \
@@ -91,7 +91,7 @@
 	z_impl_k_thread_custom_data_set(data);
 }
 #include <syscalls/k_thread_custom_data_set_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void *z_impl_k_thread_custom_data_get(void)
 {
@@ -338,8 +338,8 @@
 	return z_impl_k_thread_start(thread);
 }
 #include <syscalls/k_thread_start_mrsh.c>
-#endif
-#endif
+#endif /* CONFIG_USERSPACE */
+#endif /* CONFIG_MULTITHREADING */
 
 
 #if CONFIG_STACK_POINTER_RANDOM
@@ -388,7 +388,7 @@
 		stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
 		stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
 	} else
-#endif
+#endif /* CONFIG_USERSPACE */
 	{
 		/* Object cannot host a user mode thread */
 		stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
@@ -417,7 +417,7 @@
 
 #ifdef CONFIG_INIT_STACKS
 	memset(stack_buf_start, 0xaa, stack_buf_size);
-#endif
+#endif /* CONFIG_INIT_STACKS */
 #ifdef CONFIG_STACK_SENTINEL
 	/* Put the stack sentinel at the lowest 4 bytes of the stack area.
 	 * We periodically check that it's still present and kill the thread
@@ -436,10 +436,10 @@
 	delta += tls_size;
 	new_thread->userspace_local_data =
 		(struct _thread_userspace_local_data *)(stack_ptr - delta);
-#endif
+#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
 #if CONFIG_STACK_POINTER_RANDOM
 	delta += random_offset(stack_buf_size);
-#endif
+#endif /* CONFIG_STACK_POINTER_RANDOM */
 	delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
 #ifdef CONFIG_THREAD_STACK_INFO
 	/* Initial values. Arches which implement MPU guards that "borrow"
@@ -452,7 +452,7 @@
 	new_thread->stack_info.start = (uintptr_t)stack_buf_start;
 	new_thread->stack_info.size = stack_buf_size;
 	new_thread->stack_info.delta = delta;
-#endif
+#endif /* CONFIG_THREAD_STACK_INFO */
 	stack_ptr -= delta;
 
 	return stack_ptr;
@@ -479,8 +479,8 @@
 	k_obj_core_stats_register(K_OBJ_CORE(new_thread),
 				  &new_thread->base.usage,
 				  sizeof(new_thread->base.usage));
-#endif
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
+#endif /* CONFIG_OBJ_CORE_THREAD */
 
 #ifdef CONFIG_USERSPACE
 	__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
@@ -493,7 +493,7 @@
 
 	/* Any given thread has access to itself */
 	k_object_access_grant(new_thread, new_thread);
-#endif
+#endif /* CONFIG_USERSPACE */
 	z_waitq_init(&new_thread->join_queue);
 
 	/* Initialize various struct k_thread members */
@@ -513,7 +513,7 @@
 	__ASSERT_NO_MSG(!arch_mem_coherent(stack));
 #endif  /* CONFIG_DYNAMIC_THREAD */
 
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
 
@@ -527,14 +527,14 @@
 	 */
 	__ASSERT(new_thread->switch_handle != NULL,
 		 "arch layer failed to initialize switch_handle");
-#endif
+#endif /* CONFIG_USE_SWITCH */
 #ifdef CONFIG_THREAD_CUSTOM_DATA
 	/* Initialize custom data field (value is opaque to kernel) */
 	new_thread->custom_data = NULL;
-#endif
+#endif /* CONFIG_THREAD_CUSTOM_DATA */
 #ifdef CONFIG_EVENTS
 	new_thread->no_wake_on_timeout = false;
-#endif
+#endif /* CONFIG_EVENTS */
 #ifdef CONFIG_THREAD_MONITOR
 	new_thread->entry.pEntry = entry;
 	new_thread->entry.parameter1 = p1;
@@ -546,7 +546,7 @@
 	new_thread->next_thread = _kernel.threads;
 	_kernel.threads = new_thread;
 	k_spin_unlock(&z_thread_monitor_lock, key);
-#endif
+#endif /* CONFIG_THREAD_MONITOR */
 #ifdef CONFIG_THREAD_NAME
 	if (name != NULL) {
 		strncpy(new_thread->name, name,
@@ -556,42 +556,42 @@
 	} else {
 		new_thread->name[0] = '\0';
 	}
-#endif
+#endif /* CONFIG_THREAD_NAME */
 #ifdef CONFIG_SCHED_CPU_MASK
 	if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
 		new_thread->base.cpu_mask = 1; /* must specify only one cpu */
 	} else {
 		new_thread->base.cpu_mask = -1; /* allow all cpus */
 	}
-#endif
+#endif /* CONFIG_SCHED_CPU_MASK */
 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
 	/* _current may be null if the dummy thread is not used */
 	if (!_current) {
 		new_thread->resource_pool = NULL;
 		return stack_ptr;
 	}
-#endif
+#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
 #ifdef CONFIG_USERSPACE
 	z_mem_domain_init_thread(new_thread);
 
 	if ((options & K_INHERIT_PERMS) != 0U) {
 		k_thread_perms_inherit(_current, new_thread);
 	}
-#endif
+#endif /* CONFIG_USERSPACE */
 #ifdef CONFIG_SCHED_DEADLINE
 	new_thread->base.prio_deadline = 0;
-#endif
+#endif /* CONFIG_SCHED_DEADLINE */
 	new_thread->resource_pool = _current->resource_pool;
 
 #ifdef CONFIG_SMP
 	z_waitq_init(&new_thread->halt_queue);
-#endif
+#endif /* CONFIG_SMP */
 
 #ifdef CONFIG_SCHED_THREAD_USAGE
 	new_thread->base.usage = (struct k_cycle_stats) {};
 	new_thread->base.usage.track_usage =
 		CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE */
 
 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
 
@@ -661,7 +661,7 @@
 	stack_obj_size = stack_object->data.stack_data->size;
 #else
 	stack_obj_size = stack_object->data.stack_size;
-#endif
+#endif /* CONFIG_GEN_PRIV_STACKS */
 	K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
 				    "stack size %zu is too big, max is %zu",
 				    total_size, stack_obj_size));
@@ -707,12 +707,12 @@
 
 #ifdef CONFIG_SMP
 	thread_base->is_idle = 0;
-#endif
+#endif /* CONFIG_SMP */
 
 #ifdef CONFIG_TIMESLICE_PER_THREAD
 	thread_base->slice_ticks = 0;
 	thread_base->slice_expired = NULL;
-#endif
+#endif /* CONFIG_TIMESLICE_PER_THREAD */
 
 	/* swap_data does not need to be initialized */
 
@@ -731,30 +731,30 @@
 	_current->entry.parameter1 = p1;
 	_current->entry.parameter2 = p2;
 	_current->entry.parameter3 = p3;
-#endif
+#endif /* CONFIG_THREAD_MONITOR */
 #ifdef CONFIG_USERSPACE
 	__ASSERT(z_stack_is_user_capable(_current->stack_obj),
 		 "dropping to user mode with kernel-only stack object");
 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
 	memset(_current->userspace_local_data, 0,
 	       sizeof(struct _thread_userspace_local_data));
-#endif
+#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
 #ifdef CONFIG_THREAD_LOCAL_STORAGE
 	arch_tls_stack_setup(_current,
 			     (char *)(_current->stack_info.start +
 				      _current->stack_info.size));
-#endif
+#endif /* CONFIG_THREAD_LOCAL_STORAGE */
 	arch_user_mode_enter(entry, p1, p2, p3);
 #else
 	/* XXX In this case we do not reset the stack */
 	z_thread_entry(entry, p1, p2, p3);
-#endif
+#endif /* CONFIG_USERSPACE */
 }
 
 #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
 #ifdef CONFIG_STACK_GROWS_UP
 #error "Unsupported configuration for stack analysis"
-#endif
+#endif /* CONFIG_STACK_GROWS_UP */
 
 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
 {
@@ -858,25 +858,25 @@
 	return z_impl_k_thread_timeout_expires_ticks(thread);
 }
 #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
 void z_thread_mark_switched_in(void)
 {
 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
 	z_sched_usage_start(_current);
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
 
 #ifdef CONFIG_TRACING
 	SYS_PORT_TRACING_FUNC(k_thread, switched_in);
-#endif
+#endif /* CONFIG_TRACING */
 }
 
 void z_thread_mark_switched_out(void)
 {
 #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
 	z_sched_usage_stop();
-#endif
+#endif /*CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
 
 #ifdef CONFIG_TRACING
 #ifdef CONFIG_THREAD_LOCAL_STORAGE
@@ -884,9 +884,9 @@
 	if (!_current_cpu->current ||
 	    (_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
 		return;
-#endif
+#endif /* CONFIG_THREAD_LOCAL_STORAGE */
 	SYS_PORT_TRACING_FUNC(k_thread, switched_out);
-#endif
+#endif /* CONFIG_TRACING */
 }
 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
 
@@ -901,7 +901,7 @@
 	z_sched_thread_usage(thread, stats);
 #else
 	*stats = (k_thread_runtime_stats_t) {};
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE */
 
 	return 0;
 }
@@ -910,7 +910,7 @@
 {
 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
 	k_thread_runtime_stats_t  tmp_stats;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
 
 	if (stats == NULL) {
 		return -EINVAL;
@@ -932,10 +932,10 @@
 		stats->current_cycles   += tmp_stats.current_cycles;
 		stats->peak_cycles      += tmp_stats.peak_cycles;
 		stats->average_cycles   += tmp_stats.average_cycles;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 		stats->idle_cycles      += tmp_stats.idle_cycles;
 	}
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
 
 	return 0;
 }
diff --git a/kernel/timeout.c b/kernel/timeout.c
index b667aba..3d5cb51 100644
--- a/kernel/timeout.c
+++ b/kernel/timeout.c
@@ -105,7 +105,7 @@
 
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(to));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	__ASSERT(!sys_dnode_is_linked(&to->node), "");
 	to->fn = fn;
@@ -255,7 +255,7 @@
 
 #ifdef CONFIG_TIMESLICING
 	z_time_slice();
-#endif
+#endif /* CONFIG_TIMESLICING */
 }
 
 int64_t sys_clock_tick_get(void)
@@ -274,7 +274,7 @@
 	return (uint32_t)sys_clock_tick_get();
 #else
 	return (uint32_t)curr_tick;
-#endif
+#endif /* CONFIG_TICKLESS_KERNEL */
 }
 
 int64_t z_impl_k_uptime_ticks(void)
@@ -288,7 +288,7 @@
 	return z_impl_k_uptime_ticks();
 }
 #include <syscalls/k_uptime_ticks_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
 {
@@ -337,4 +337,4 @@
 {
 	z_impl_sys_clock_tick_set(tick);
 }
-#endif
+#endif /* CONFIG_ZTEST */
diff --git a/kernel/timer.c b/kernel/timer.c
index 48cc69b..8eafbb4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -17,7 +17,7 @@
 
 #ifdef CONFIG_OBJ_CORE_TIMER
 static struct k_obj_type obj_type_timer;
-#endif
+#endif /* CONFIG_OBJ_CORE_TIMER */
 
 /**
  * @brief Handle expiration of a kernel timer object.
@@ -72,7 +72,7 @@
 		 * down" behavior on timeout addition).
 		 */
 		next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
-#endif
+#endif /* CONFIG_TIMEOUT_64BIT */
 		z_add_timeout(&timer->timeout, z_timer_expiration_handler,
 			      next);
 	}
@@ -132,7 +132,7 @@
 
 #ifdef CONFIG_OBJ_CORE_TIMER
 	k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
-#endif
+#endif /* CONFIG_OBJ_CORE_TIMER */
 }
 
 
@@ -189,7 +189,7 @@
 	z_impl_k_timer_start(timer, duration, period);
 }
 #include <syscalls/k_timer_start_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 void z_impl_k_timer_stop(struct k_timer *timer)
 {
@@ -222,7 +222,7 @@
 	z_impl_k_timer_stop(timer);
 }
 #include <syscalls/k_timer_stop_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
 {
@@ -242,7 +242,7 @@
 	return z_impl_k_timer_status_get(timer);
 }
 #include <syscalls/k_timer_status_get_mrsh.c>
-#endif
+#endif /* CONFIG_USERSPACE */
 
 uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
 {
@@ -342,7 +342,7 @@
 }
 #include <syscalls/k_timer_user_data_set_mrsh.c>
 
-#endif
+#endif /* CONFIG_USERSPACE */
 
 #ifdef CONFIG_OBJ_CORE_TIMER
 static int init_timer_obj_core_list(void)
@@ -362,4 +362,4 @@
 }
 SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
-#endif
+#endif /* CONFIG_OBJ_CORE_TIMER */
diff --git a/kernel/usage.c b/kernel/usage.c
index de497e2..7ec5e30 100644
--- a/kernel/usage.c
+++ b/kernel/usage.c
@@ -14,7 +14,7 @@
 /* Need one of these for this to work */
 #if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
 #error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
-#endif
+#endif /* !CONFIG_USE_SWITCH && !CONFIG_INSTRUMENT_THREAD_SWITCHING */
 
 static struct k_spinlock usage_lock;
 
@@ -26,7 +26,7 @@
 	now = (uint32_t)timing_counter_get();
 #else
 	now = k_cycle_get_32();
-#endif
+#endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
 
 	/* Edge case: we use a zero as a null ("stop() already called") */
 	return (now == 0) ? 1 : now;
@@ -51,12 +51,12 @@
 	} else {
 		cpu->usage->current = 0;
 		cpu->usage->num_windows++;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 	}
 }
 #else
 #define sched_cpu_update_usage(cpu, cycles)   do { } while (0)
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
 
 static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
 {
@@ -68,7 +68,7 @@
 	if (thread->base.usage.longest < thread->base.usage.current) {
 		thread->base.usage.longest = thread->base.usage.current;
 	}
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 }
 
 void z_sched_usage_start(struct k_thread *thread)
@@ -93,7 +93,7 @@
 	 */
 
 	_current_cpu->usage0 = usage_now();
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 }
 
 void z_sched_usage_stop(void)
@@ -159,7 +159,7 @@
 		stats->average_cycles = stats->total_cycles /
 					cpu->usage->num_windows;
 	}
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 
 	stats->idle_cycles =
 		_kernel.cpus[cpu_id].idle_thread->base.usage.total;
@@ -168,7 +168,7 @@
 
 	k_spin_unlock(&usage_lock, key);
 }
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
 
 void z_sched_thread_usage(struct k_thread *thread,
 			  struct k_thread_runtime_stats *stats)
@@ -215,11 +215,11 @@
 		stats->average_cycles = stats->total_cycles /
 					thread->base.usage.num_windows;
 	}
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 
 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
 	stats->idle_cycles = 0;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
 	stats->execution_cycles = thread->base.usage.total;
 
 	k_spin_unlock(&usage_lock, key);
@@ -273,7 +273,7 @@
 
 	return 0;
 }
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 
 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
 void k_sys_runtime_stats_enable(void)
@@ -303,7 +303,7 @@
 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
 		_kernel.cpus[i].usage->num_windows++;
 		_kernel.cpus[i].usage->current = 0;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 	}
 
 	k_spin_unlock(&usage_lock, key);
@@ -342,7 +342,7 @@
 
 	k_spin_unlock(&usage_lock, key);
 }
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
 
 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
 int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats)
@@ -382,7 +382,7 @@
 	stats->current = 0ULL;
 	stats->longest = 0ULL;
 	stats->num_windows = (thread->base.usage.track_usage) ?  1U : 0U;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 
 	if (thread != _current_cpu->current) {
 
@@ -423,7 +423,7 @@
 	return k_thread_runtime_stats_disable(thread);
 #else
 	return -ENOTSUP;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 }
 
 int z_thread_stats_enable(struct k_obj_core *obj_core)
@@ -436,9 +436,9 @@
 	return k_thread_runtime_stats_enable(thread);
 #else
 	return -ENOTSUP;
-#endif
+#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
 }
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
 
 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
 int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats)
@@ -462,7 +462,7 @@
 
 	return 0;
 }
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
 
 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
 int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats)
@@ -483,4 +483,4 @@
 
 	return k_thread_runtime_stats_all_get(stats);
 }
-#endif
+#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
diff --git a/kernel/userspace.c b/kernel/userspace.c
index ecdc134..c87421d 100644
--- a/kernel/userspace.c
+++ b/kernel/userspace.c
@@ -25,7 +25,7 @@
 
 #ifdef Z_LIBC_PARTITION_EXISTS
 K_APPMEM_PARTITION_DEFINE(z_libc_partition);
-#endif
+#endif /* Z_LIBC_PARTITION_EXISTS */
 
 /* TODO: Find a better place to put this. Since we pull the entire
  * lib..__modules__crypto__mbedtls.a  globals into app shared memory
@@ -33,7 +33,7 @@
  */
 #ifdef CONFIG_MBEDTLS
 K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
-#endif
+#endif /* CONFIG_MBEDTLS */
 
 #include <zephyr/logging/log.h>
 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
@@ -68,14 +68,14 @@
 #define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size)
 #endif /* CONFIG_GEN_PRIV_STACKS */
 
-#endif
+#endif /* CONFIG_DYNAMIC_OBJECTS */
 static struct k_spinlock obj_lock;         /* kobj struct data */
 
 #define MAX_THREAD_BITS		(CONFIG_MAX_THREAD_BYTES * 8)
 
 #ifdef CONFIG_DYNAMIC_OBJECTS
 extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
-#endif
+#endif /* CONFIG_DYNAMIC_OBJECTS */
 
 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
 
@@ -102,7 +102,7 @@
 #else
 	ARG_UNUSED(otype);
 	ret = NULL;
-#endif
+#endif /* CONFIG_LOG */
 	return ret;
 }
 
@@ -147,7 +147,7 @@
 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
 #else
 #define DYN_OBJ_DATA_ALIGN_K_THREAD	(sizeof(void *))
-#endif
+#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
 
 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
 #ifndef CONFIG_MPU_STACK_GUARD
@@ -211,7 +211,7 @@
 		ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
 #else
 		ret = __alignof(struct dyn_obj);
-#endif
+#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
 		break;
 	default:
 		ret = __alignof(struct dyn_obj);
@@ -349,11 +349,11 @@
 			  Z_THREAD_STACK_OBJ_ALIGN(size));
 #else
 		dyn->kobj.name = dyn->data;
-#endif
+#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
 #else
 		dyn->kobj.name = dyn->data;
 		dyn->kobj.data.stack_size = adjusted_size;
-#endif
+#endif /* CONFIG_GEN_PRIV_STACKS */
 	} else {
 		dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
 		if (dyn->data == NULL) {
@@ -561,7 +561,7 @@
 	case K_OBJ_PIPE:
 		k_pipe_cleanup((struct k_pipe *)ko->name);
 		break;
-#endif
+#endif /* CONFIG_PIPES */
 	case K_OBJ_MSGQ:
 		k_msgq_cleanup((struct k_msgq *)ko->name);
 		break;
@@ -577,7 +577,7 @@
 	k_free(dyn->data);
 	k_free(dyn);
 out:
-#endif
+#endif /* CONFIG_DYNAMIC_OBJECTS */
 	k_spin_unlock(&obj_lock, key);
 }
 
diff --git a/kernel/userspace_handler.c b/kernel/userspace_handler.c
index a1cf9f9..a26c508 100644
--- a/kernel/userspace_handler.c
+++ b/kernel/userspace_handler.c
@@ -25,7 +25,7 @@
 	if (ret != 0) {
 #ifdef CONFIG_LOG
 		k_object_dump_error(ret, obj, ko, otype);
-#endif
+#endif /* CONFIG_LOG */
 		return NULL;
 	}
 
diff --git a/kernel/work.c b/kernel/work.c
index 269e734..cf73bdb 100644
--- a/kernel/work.c
+++ b/kernel/work.c
@@ -471,7 +471,7 @@
 	__ASSERT_NO_MSG(sync != NULL);
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
 
@@ -583,7 +583,7 @@
 	__ASSERT_NO_MSG(!k_is_in_isr());
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
 
@@ -1066,7 +1066,7 @@
 	__ASSERT_NO_MSG(!k_is_in_isr());
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
 
@@ -1098,7 +1098,7 @@
 	__ASSERT_NO_MSG(!k_is_in_isr());
 #ifdef CONFIG_KERNEL_COHERENCE
 	__ASSERT_NO_MSG(arch_mem_coherent(sync));
-#endif
+#endif /* CONFIG_KERNEL_COHERENCE */
 
 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);