all: Update reserved function names

Update reserved function names starting with one underscore, replacing
them as follows:
   '_k_' with 'z_'
   '_K_' with 'Z_'
   '_handler_' with 'z_handl_'
   '_Cstart' with 'z_cstart'
   '_Swap' with 'z_swap'

This renaming is done on both global and those static function names
in kernel/include and include/. Other static function names in kernel/
are renamed by removing the leading underscore. Other function names
not starting with any prefix listed above are renamed starting with
a 'z_' or 'Z_' prefix.

Function names starting with two or three leading underscores are not
automatcally renamed since these names will collide with the variants
with two or three leading underscores.

Various generator scripts have also been updated as well as perf,
linker and usb files. These are
   drivers/serial/uart_handlers.c
   include/linker/kobject-text.ld
   kernel/include/syscall_handler.h
   scripts/gen_kobject_list.py
   scripts/gen_syscall_header.py

Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
diff --git a/arch/arc/core/cache.c b/arch/arc/core/cache.c
index dd14bf0..23914a5 100644
--- a/arch/arc/core/cache.c
+++ b/arch/arc/core/cache.c
@@ -51,7 +51,7 @@
 
 static bool dcache_available(void)
 {
-	unsigned long val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
+	unsigned long val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
 
 	val &= 0xff; /* extract version */
 	return (val == 0) ? false : true;
@@ -60,7 +60,7 @@
 static void dcache_dc_ctrl(u32_t dcache_en_mask)
 {
 	if (dcache_available()) {
-		_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
+		z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
 	}
 }
 
@@ -101,13 +101,13 @@
 	key = irq_lock(); /* --enter critical section-- */
 
 	do {
-		_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
+		z_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
 		__asm__ volatile("nop_s");
 		__asm__ volatile("nop_s");
 		__asm__ volatile("nop_s");
 		/* wait for flush completion */
 		do {
-			if ((_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
+			if ((z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
 			     DC_CTRL_FLUSH_STATUS) == 0) {
 				break;
 			}
@@ -149,7 +149,7 @@
 {
 	u32_t val;
 
-	val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
+	val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
 	__ASSERT((val&0xff) != 0, "d-cache is not present");
 	val = ((val>>16) & 0xf) + 1;
 	val *= 16;
diff --git a/arch/arc/core/fast_irq.S b/arch/arc/core/fast_irq.S
index 41c969d..7a8cc93 100644
--- a/arch/arc/core/fast_irq.S
+++ b/arch/arc/core/fast_irq.S
@@ -145,7 +145,7 @@
 	st	r0, [r1]
 
 #ifdef CONFIG_STACK_SENTINEL
-	bl _check_stack_sentinel
+	bl z_check_stack_sentinel
 #endif
 
 #ifdef CONFIG_PREEMPT_ENABLED
@@ -284,7 +284,7 @@
 	pop_s r0 /* status32 into r0 */
 	/*
 	 * There are only two interrupt lock states: locked and unlocked. When
-	 * entering _Swap(), they are always locked, so the IE bit is unset in
+	 * entering z_swap(), they are always locked, so the IE bit is unset in
 	 * status32. If the incoming thread had them locked recursively, it
 	 * means that the IE bit should stay unset. The only time the bit
 	 * has to change is if they were not locked recursively.
diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c
index 5c22b72..71a5437 100644
--- a/arch/arc/core/fatal.c
+++ b/arch/arc/core/fatal.c
@@ -25,7 +25,7 @@
  *
  * This routine is called when fatal error conditions are detected by software
  * and is responsible only for reporting the error. Once reported, it then
- * invokes the user provided routine _SysFatalErrorHandler() which is
+ * invokes the user provided routine z_SysFatalErrorHandler() which is
  * responsible for implementing the error handling policy.
  *
  * The caller is expected to always provide a usable ESF. In the event that the
@@ -34,7 +34,7 @@
  *
  * @return This function does not return.
  */
-void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
+void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
 {
 	LOG_PANIC();
 
@@ -70,7 +70,7 @@
 
 	if (reason == _NANO_ERR_HW_EXCEPTION) {
 		printk("Faulting instruction address = 0x%lx\n",
-		_arc_v2_aux_reg_read(_ARC_V2_ERET));
+		z_arc_v2_aux_reg_read(_ARC_V2_ERET));
 	}
 
 	/*
@@ -81,12 +81,12 @@
 	 * decide.
 	 */
 
-	_SysFatalErrorHandler(reason, pEsf);
+	z_SysFatalErrorHandler(reason, pEsf);
 }
 
-FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
+FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
 {
 	LOG_PANIC();
-	_SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr);
+	z_SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr);
 	CODE_UNREACHABLE;
 }
diff --git a/arch/arc/core/fault.c b/arch/arc/core/fault.c
index ccf6bca..a85bf96 100644
--- a/arch/arc/core/fault.c
+++ b/arch/arc/core/fault.c
@@ -34,14 +34,14 @@
  *
  * This routine is called when fatal error conditions are detected by hardware
  * and is responsible only for reporting the error. Once reported, it then
- * invokes the user provided routine _SysFatalErrorHandler() which is
+ * invokes the user provided routine z_SysFatalErrorHandler() which is
  * responsible for implementing the error handling policy.
  */
 void _Fault(NANO_ESF *esf)
 {
 	u32_t vector, code, parameter;
-	u32_t exc_addr = _arc_v2_aux_reg_read(_ARC_V2_EFA);
-	u32_t ecr = _arc_v2_aux_reg_read(_ARC_V2_ECR);
+	u32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
+	u32_t ecr = z_arc_v2_aux_reg_read(_ARC_V2_ECR);
 
 	LOG_PANIC();
 
@@ -64,7 +64,7 @@
 
 	/* exception raised by kernel */
 	if (vector == 0x9 && parameter == _TRAP_S_CALL_RUNTIME_EXCEPT) {
-		_NanoFatalErrorHandler(esf->r0, esf);
+		z_NanoFatalErrorHandler(esf->r0, esf);
 		return;
 	}
 
@@ -76,9 +76,9 @@
 	 * check violation
 	 */
 	if (vector == 6 && parameter == 2) {
-		_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
+		z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
 		return;
 	}
 #endif
-	_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
 }
diff --git a/arch/arc/core/irq_manage.c b/arch/arc/core/irq_manage.c
index dc27c1e..9515114 100644
--- a/arch/arc/core/irq_manage.c
+++ b/arch/arc/core/irq_manage.c
@@ -36,7 +36,7 @@
  * @return N/A
  */
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	unsigned int key = irq_lock();
 
@@ -53,7 +53,7 @@
  * @return N/A
  */
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	unsigned int key = irq_lock();
 
@@ -75,7 +75,7 @@
  * @return N/A
  */
 
-void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
+void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
 {
 	ARG_UNUSED(flags);
 
@@ -96,22 +96,22 @@
  * @return N/A
  */
 
-void _irq_spurious(void *unused)
+void z_irq_spurious(void *unused)
 {
 	ARG_UNUSED(unused);
-	printk("_irq_spurious(). Spinning...\n");
+	printk("z_irq_spurious(). Spinning...\n");
 	for (;;) {
 		;
 	}
 }
 
 #ifdef CONFIG_DYNAMIC_INTERRUPTS
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			      void (*routine)(void *parameter), void *parameter,
 			      u32_t flags)
 {
 	z_isr_install(irq, routine, parameter);
-	_irq_priority_set(irq, priority, flags);
+	z_irq_priority_set(irq, priority, flags);
 	return irq;
 }
 #endif /* CONFIG_DYNAMIC_INTERRUPTS */
diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S
index ea2c984..c2d7300 100644
--- a/arch/arc/core/isr_wrapper.S
+++ b/arch/arc/core/isr_wrapper.S
@@ -45,7 +45,7 @@
 #endif
 
 #if defined(CONFIG_SYS_POWER_MANAGEMENT)
-GTEXT(_sys_power_save_idle_exit)
+GTEXT(z_sys_power_save_idle_exit)
 #endif
 
 /*
@@ -95,7 +95,7 @@
     transition from outgoing thread to incoming thread
 
 Not loading _kernel into r0 allows loading _kernel without stomping on
-the parameter in r0 in _Swap().
+the parameter in r0 in z_swap().
 
 
 ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The
@@ -195,7 +195,7 @@
 
   o to coop
 
-    The address of the returning instruction from _Swap() is loaded in ilink and
+    The address of the returning instruction from z_swap() is loaded in ilink and
     the saved status32 in status32_p0, taking care to adjust the interrupt lock
     state desired in status32_p0. The return value is put in r0.
 
@@ -359,7 +359,7 @@
 
 	st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */
 	push_s blink
-	jl _sys_power_save_idle_exit
+	jl z_sys_power_save_idle_exit
 	pop_s blink
 
 _skip_sys_power_save_idle_exit:
diff --git a/arch/arc/core/mpu/arc_core_mpu.c b/arch/arc/core/mpu/arc_core_mpu.c
index dfb88fb..fe46674 100644
--- a/arch/arc/core/mpu/arc_core_mpu.c
+++ b/arch/arc/core/mpu/arc_core_mpu.c
@@ -104,7 +104,7 @@
 	arc_core_mpu_configure_mem_domain(thread);
 }
 
-int _arch_mem_domain_max_partitions_get(void)
+int z_arch_mem_domain_max_partitions_get(void)
 {
 	return arc_core_mpu_get_max_domain_partition_regions();
 }
@@ -112,7 +112,7 @@
 /*
  * Reset MPU region for a single memory partition
  */
-void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
+void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
 				       u32_t partition_id)
 {
 	ARG_UNUSED(domain);
@@ -126,7 +126,7 @@
 /*
  * Configure MPU memory domain
  */
-void _arch_mem_domain_configure(struct k_thread *thread)
+void z_arch_mem_domain_configure(struct k_thread *thread)
 {
 	configure_mpu_mem_domain(thread);
 }
@@ -134,7 +134,7 @@
 /*
  * Destroy MPU regions for the mem domain
  */
-void _arch_mem_domain_destroy(struct k_mem_domain *domain)
+void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
 {
 	ARG_UNUSED(domain);
 
@@ -152,7 +152,7 @@
 /*
  * Validate the given buffer is user accessible or not
  */
-int _arch_buffer_validate(void *addr, size_t size, int write)
+int z_arch_buffer_validate(void *addr, size_t size, int write)
 {
 	return arc_core_mpu_buffer_validate(addr, size, write);
 }
diff --git a/arch/arc/core/mpu/arc_mpu.c b/arch/arc/core/mpu/arc_mpu.c
index aff3a81..9ac6d56 100644
--- a/arch/arc/core/mpu/arc_mpu.c
+++ b/arch/arc/core/mpu/arc_mpu.c
@@ -55,7 +55,7 @@
  */
 static inline u8_t _get_num_regions(void)
 {
-	u32_t num = _arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
+	u32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
 
 	num = (num & 0xFF00) >> 8;
 
@@ -107,8 +107,8 @@
 		region_addr = 0U;
 	}
 
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
 
 #elif CONFIG_ARC_MPU_VER == 3
 #define AUX_MPU_RPER_SID1       0x10000
@@ -122,11 +122,11 @@
 				 AUX_MPU_RPER_SID1);
 	}
 
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
 			      CALC_REGION_END_ADDR(region_addr, size));
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
 #endif
 }
 
@@ -135,8 +135,8 @@
 {
 	u32_t val;
 
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
-	val = _arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
+	val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
 
 	/* if no match or multiple regions match, return error */
 	if (val & 0xC0000000) {
@@ -215,11 +215,11 @@
 static inline int _is_enabled_region(u32_t r_index)
 {
 #if CONFIG_ARC_MPU_VER == 2
-	return ((_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
+	return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
 		 & AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
 #elif CONFIG_ARC_MPU_VER == 3
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
-	return ((_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
+	return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
 		 AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
 #endif
 }
@@ -234,9 +234,9 @@
 	u32_t r_addr_end;
 	u32_t r_size_lshift;
 
-	r_addr_start = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
+	r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
 		       & (~AUX_MPU_RDB_VALID_MASK);
-	r_size_lshift = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index)
+	r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index)
 			& AUX_MPU_RDP_ATTR_MASK;
 	r_size_lshift = (r_size_lshift & 0x3) | ((r_size_lshift >> 7) & 0x1C);
 	r_addr_end = r_addr_start  + (1 << (r_size_lshift + 1));
@@ -264,10 +264,10 @@
 	u32_t r_ap;
 
 #if CONFIG_ARC_MPU_VER == 2
-	r_ap = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
+	r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
 #elif CONFIG_ARC_MPU_VER == 3
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
-	r_ap = _arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
+	r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
 #endif
 	r_ap &= AUX_MPU_RDP_ATTR_MASK;
 
@@ -289,8 +289,8 @@
 {
 #if CONFIG_ARC_MPU_VER == 2
 	/* Enable MPU */
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
-			      _arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
+			      z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
 
 	/* MPU is always enabled, use default region to
 	 * simulate MPU enable
@@ -308,8 +308,8 @@
 {
 #if CONFIG_ARC_MPU_VER == 2
 	/* Disable MPU */
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
-			      _arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
+			      z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
 #elif CONFIG_ARC_MPU_VER == 3
 	/* MPU is always enabled, use default region to
 	 * simulate MPU disable
@@ -411,12 +411,12 @@
  */
 void arc_core_mpu_default(u32_t region_attr)
 {
-	u32_t val =  _arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
+	u32_t val =  z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
 		    (~AUX_MPU_RDP_ATTR_MASK);
 
 	region_attr &= AUX_MPU_RDP_ATTR_MASK;
 
-	_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
+	z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
 }
 
 /**
diff --git a/arch/arc/core/prep_c.c b/arch/arc/core/prep_c.c
index 77c7724..6689dd7 100644
--- a/arch/arc/core/prep_c.c
+++ b/arch/arc/core/prep_c.c
@@ -10,7 +10,7 @@
  *
  *
  * Initialization of full C support: zero the .bss, copy the .data if XIP,
- * call _Cstart().
+ * call z_cstart().
  *
  * Stack is available in this module, but not the global data/bss until their
  * initialization is performed.
@@ -40,14 +40,14 @@
 {
 	unsigned int val;
 
-	val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
+	val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
 	val &= 0xff; /* version field */
 	if (val == 0) {
 		return; /* skip if i-cache is not present */
 	}
-	_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
+	z_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
 	__asm__ __volatile__ ("nop");
-	_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
+	z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
 }
 
 /**
@@ -64,12 +64,12 @@
 {
 	unsigned int val;
 
-	val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
+	val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
 	val &= 0xff; /* version field */
 	if (val == 0) {
 		return; /* skip if d-cache is not present */
 	}
-	_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
+	z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
 }
 #endif
 
@@ -97,15 +97,15 @@
 	 * from the base address known by the ARC CPU,
 	 * set the vector base to the compiled-in address.
 	 */
-	vbr = _arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
+	vbr = z_arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
 	vbr &= 0xfffffc00;
 	if (vbr != (unsigned int)&_VectorTable) {
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE,
 					(unsigned int)&_VectorTable);
 	}
 }
 
-extern FUNC_NORETURN void _Cstart(void);
+extern FUNC_NORETURN void z_cstart(void);
 /**
  *
  * @brief Prepare to and run C code
@@ -119,8 +119,8 @@
 {
 	_icache_setup();
 	adjust_vector_table_base();
-	_bss_zero();
-	_data_copy();
-	_Cstart();
+	z_bss_zero();
+	z_data_copy();
+	z_cstart();
 	CODE_UNREACHABLE;
 }
diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S
index 4aef2df..e626ec6 100644
--- a/arch/arc/core/regular_irq.S
+++ b/arch/arc/core/regular_irq.S
@@ -104,7 +104,7 @@
 	st	r0, [r1]
 
 #ifdef CONFIG_STACK_SENTINEL
-	bl _check_stack_sentinel
+	bl z_check_stack_sentinel
 #endif
 
 #ifdef CONFIG_PREEMPT_ENABLED
diff --git a/arch/arc/core/swap.S b/arch/arc/core/swap.S
index b3586f5..2e4e8eb 100644
--- a/arch/arc/core/swap.S
+++ b/arch/arc/core/swap.S
@@ -51,7 +51,7 @@
  * not has already been taken and a context switch must happen.
  *
  * @return may contain a return value setup by a call to
- * _set_thread_return_value()
+ * z_set_thread_return_value()
  *
  * C function prototype:
  *
@@ -89,7 +89,7 @@
          * Carve space for the return value. Setting it to a default of
          * -EAGAIN eliminates the need for the timeout code to set it.
          * If another value is ever needed, it can be modified with
-         * _set_thread_return_value().
+         * z_set_thread_return_value().
          */
 	ld r3, [_k_neg_eagain]
 	st_s r3, [r2, _thread_offset_to_return_value]
diff --git a/arch/arc/core/sys_fatal_error_handler.c b/arch/arc/core/sys_fatal_error_handler.c
index 6f36708..cae259e 100644
--- a/arch/arc/core/sys_fatal_error_handler.c
+++ b/arch/arc/core/sys_fatal_error_handler.c
@@ -8,7 +8,7 @@
  * @file
  * @brief ARCv2 system fatal error handler
  *
- * This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs.
+ * This module provides the z_SysFatalErrorHandler() routine for ARCv2 BSPs.
  */
 
 #include <kernel.h>
@@ -37,7 +37,7 @@
  *
  * @return N/A
  */
-__weak void _SysFatalErrorHandler(unsigned int reason,
+__weak void z_SysFatalErrorHandler(unsigned int reason,
 						const NANO_ESF *pEsf)
 {
 	ARG_UNUSED(pEsf);
@@ -52,7 +52,7 @@
 		goto hang_system;
 	}
 
-	if (_is_thread_essential()) {
+	if (z_is_thread_essential()) {
 		printk("Fatal fault in essential thread! Spinning...\n");
 		goto hang_system;
 	}
diff --git a/arch/arc/core/thread.c b/arch/arc/core/thread.c
index 8e3b4d5..c904184 100644
--- a/arch/arc/core/thread.c
+++ b/arch/arc/core/thread.c
@@ -46,7 +46,7 @@
  * needed anymore.
  *
  * The initial context is a basic stack frame that contains arguments for
- * _thread_entry() return address, that points at _thread_entry()
+ * z_thread_entry() return address, that points at z_thread_entry()
  * and status register.
  *
  * <options> is currently unused.
@@ -62,13 +62,13 @@
  *
  * @return N/A
  */
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		 size_t stackSize, k_thread_entry_t pEntry,
 		 void *parameter1, void *parameter2, void *parameter3,
 		 int priority, unsigned int options)
 {
 	char *pStackMem = K_THREAD_STACK_BUFFER(stack);
-	_ASSERT_VALID_PRIO(priority, pEntry);
+	Z_ASSERT_VALID_PRIO(priority, pEntry);
 
 	char *stackEnd;
 	char *stackAdjEnd;
@@ -171,7 +171,7 @@
 #endif
 
 #ifdef CONFIG_ARC_HAS_SECURE
-	pInitCtx->sec_stat = _arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
+	pInitCtx->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
 #endif
 
 	pInitCtx->r0 = (u32_t)pEntry;
@@ -206,7 +206,7 @@
 #endif
 #endif
 	/*
-	 * seti instruction in the end of the _Swap() will
+	 * seti instruction in the end of the z_swap() will
 	 * enable the interrupts based on intlock_key
 	 * value.
 	 *
@@ -226,7 +226,7 @@
 
 #ifdef CONFIG_USERSPACE
 
-FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
+FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
 	void *p1, void *p2, void *p3)
 {
 
diff --git a/arch/arc/core/thread_entry_wrapper.S b/arch/arc/core/thread_entry_wrapper.S
index 1702613..7c1322c 100644
--- a/arch/arc/core/thread_entry_wrapper.S
+++ b/arch/arc/core/thread_entry_wrapper.S
@@ -6,9 +6,9 @@
 
 /**
  * @file
- * @brief Wrapper for _thread_entry
+ * @brief Wrapper for z_thread_entry
  *
- * Wrapper for _thread_entry routine when called from the initial context.
+ * Wrapper for z_thread_entry routine when called from the initial context.
  */
 
 #include <toolchain.h>
@@ -17,10 +17,10 @@
 GTEXT(_thread_entry_wrapper)
 
 /*
- * @brief Wrapper for _thread_entry
+ * @brief Wrapper for z_thread_entry
  *
- * The routine pops parameters for the _thread_entry from stack frame, prepared
- * by the _new_thread() routine.
+ * The routine pops parameters for the z_thread_entry from stack frame, prepared
+ * by the z_new_thread() routine.
  *
  * @return N/A
  */
@@ -31,5 +31,5 @@
 	pop_s r2
 	pop_s r1
 	pop_s r0
-	j _thread_entry
+	j z_thread_entry
 	nop
diff --git a/arch/arc/core/timestamp.c b/arch/arc/core/timestamp.c
index d3d0a76..81e3ea7 100644
--- a/arch/arc/core/timestamp.c
+++ b/arch/arc/core/timestamp.c
@@ -23,7 +23,7 @@
  *
  * @return 64-bit time stamp value
  */
-u64_t _tsc_read(void)
+u64_t z_tsc_read(void)
 {
 	unsigned int key;
 	u64_t t;
@@ -31,7 +31,7 @@
 
 	key = irq_lock();
 	t = (u64_t)z_tick_get();
-	count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
+	count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
 	irq_unlock(key);
 	t *= (u64_t)sys_clock_hw_cycles_per_tick();
 	t += (u64_t)count;
diff --git a/arch/arc/core/userspace.S b/arch/arc/core/userspace.S
index 0419aaa..38fccdc 100644
--- a/arch/arc/core/userspace.S
+++ b/arch/arc/core/userspace.S
@@ -52,7 +52,7 @@
 GTEXT(z_arch_user_string_nlen_fault_end)
 GTEXT(z_arch_user_string_nlen_fixup)
 /*
- * @brief Wrapper for _thread_entry in the case of user thread
+ * @brief Wrapper for z_thread_entry in the case of user thread
  * The init parameters are in privileged stack
  *
  * @return N/A
diff --git a/arch/arc/core/vector_table.c b/arch/arc/core/vector_table.c
index 9cdf9d7..b55de8e 100644
--- a/arch/arc/core/vector_table.c
+++ b/arch/arc/core/vector_table.c
@@ -46,7 +46,7 @@
 	u32_t unused_2;
 };
 
-struct vector_table _VectorTable _GENERIC_SECTION(.exc_vector_table) = {
+struct vector_table _VectorTable Z_GENERIC_SECTION(.exc_vector_table) = {
 	(u32_t)__reset,
 	(u32_t)__memory_error,
 	(u32_t)__instruction_error,
diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h
index bd1e660..a2383f7 100644
--- a/arch/arc/include/kernel_arch_func.h
+++ b/arch/arc/include/kernel_arch_func.h
@@ -37,7 +37,7 @@
 }
 
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->arch.return_value = value;
 }
@@ -51,12 +51,12 @@
  */
 static ALWAYS_INLINE int _INTERRUPT_CAUSE(void)
 {
-	u32_t irq_num = _arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
+	u32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
 
 	return irq_num;
 }
 
-#define _is_in_isr	_arc_v2_irq_unit_is_in_isr
+#define z_is_in_isr	z_arc_v2_irq_unit_is_in_isr
 
 extern void _thread_entry_wrapper(void);
 extern void _user_thread_entry_wrapper(void);
diff --git a/arch/arc/include/kernel_arch_thread.h b/arch/arc/include/kernel_arch_thread.h
index 42e23d3..33b57f9 100644
--- a/arch/arc/include/kernel_arch_thread.h
+++ b/arch/arc/include/kernel_arch_thread.h
@@ -54,7 +54,7 @@
 	/* one of the _CAUSE_xxxx definitions above */
 	int relinquish_cause;
 
-	/* return value from _Swap */
+	/* return value from z_swap */
 	unsigned int return_value;
 
 #ifdef CONFIG_ARC_STACK_CHECKING
diff --git a/arch/arc/include/swap_macros.h b/arch/arc/include/swap_macros.h
index 23fc201..f941dd1 100644
--- a/arch/arc/include/swap_macros.h
+++ b/arch/arc/include/swap_macros.h
@@ -236,7 +236,7 @@
 	 * The pc and status32 values will still be on the stack. We cannot
 	 * pop them yet because the callers of _pop_irq_stack_frame must reload
 	 * status32 differently depending on the execution context they are
-	 * running in (_Swap(), firq or exception).
+	 * running in (z_swap(), firq or exception).
 	 */
 	add_s sp, sp, ___isf_t_SIZEOF
 
diff --git a/arch/arc/include/v2/cache.h b/arch/arc/include/v2/cache.h
index e077dbe..a6886d7 100644
--- a/arch/arc/include/v2/cache.h
+++ b/arch/arc/include/v2/cache.h
@@ -42,11 +42,11 @@
 	);
 	u32_t val;
 
-	val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
+	val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
 	val &= 0xff;
 	if (val != 0) { /* is i-cache present? */
 		/* configure i-cache */
-		_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
+		z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
 	}
 }
 
diff --git a/arch/arc/include/v2/irq.h b/arch/arc/include/v2/irq.h
index 26c9f6c..784c048 100644
--- a/arch/arc/include/v2/irq.h
+++ b/arch/arc/include/v2/irq.h
@@ -52,7 +52,7 @@
 	);
 
 	k_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
-	_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
+	z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
 
 	_kernel.irq_stack =
 		K_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
diff --git a/arch/arm/core/cortex_m/mpu/arm_core_mpu.c b/arch/arm/core/cortex_m/mpu/arm_core_mpu.c
index 04e870a..94a9fae 100644
--- a/arch/arm/core/cortex_m/mpu/arm_core_mpu.c
+++ b/arch/arm/core/cortex_m/mpu/arm_core_mpu.c
@@ -22,7 +22,7 @@
  * available MPU regions for dynamic programming depends on the number of the
  * static MPU regions currently being programmed, and the total number of HW-
  * available MPU regions. This macro is only used internally in function
- * _arch_configure_dynamic_mpu_regions(), to reserve sufficient area for the
+ * z_arch_configure_dynamic_mpu_regions(), to reserve sufficient area for the
  * array of dynamic regions passed to the underlying driver.
  */
 #if defined(CONFIG_USERSPACE)
@@ -58,7 +58,7 @@
  * For some MPU architectures, such as the unmodified ARMv8-M MPU,
  * the function must execute with MPU enabled.
  */
-void _arch_configure_static_mpu_regions(void)
+void z_arch_configure_static_mpu_regions(void)
 {
 #if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
 		const struct k_mem_partition gcov_region =
@@ -141,7 +141,7 @@
  * For some MPU architectures, such as the unmodified ARMv8-M MPU,
  * the function must execute with MPU enabled.
  */
-void _arch_configure_dynamic_mpu_regions(struct k_thread *thread)
+void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread)
 {
 	/* Define an array of k_mem_partition objects to hold the configuration
 	 * of the respective dynamic MPU regions to be programmed for
@@ -259,7 +259,7 @@
  *        that is supported by the MPU hardware, and with respect
  *        to the current static memory region configuration.
  */
-int _arch_mem_domain_max_partitions_get(void)
+int z_arch_mem_domain_max_partitions_get(void)
 {
 	int available_regions = arm_core_mpu_get_max_available_dyn_regions();
 
@@ -277,13 +277,13 @@
 /**
  * @brief Configure the memory domain of the thread.
  */
-void _arch_mem_domain_configure(struct k_thread *thread)
+void z_arch_mem_domain_configure(struct k_thread *thread)
 {
 	/* Request to configure memory domain for a thread.
 	 * This triggers re-programming of the entire dynamic
 	 * memory map.
 	 */
-	_arch_configure_dynamic_mpu_regions(thread);
+	z_arch_configure_dynamic_mpu_regions(thread);
 }
 
 /*
@@ -292,7 +292,7 @@
  *
  * @param domain pointer to the memory domain (must be valid)
  */
-void _arch_mem_domain_destroy(struct k_mem_domain *domain)
+void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
 {
 	/* This function will reset the access permission configuration
 	 * of the active partitions of the memory domain.
@@ -324,7 +324,7 @@
  * @param partition_id the ID (sequence) number of the memory domain
  *        partition (must be a valid partition).
  */
-void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
+void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
 				       u32_t  partition_id)
 {
 	/* Request to remove a partition from a memory domain.
@@ -346,7 +346,7 @@
 /*
  * Validate the given buffer is user accessible or not
  */
-int _arch_buffer_validate(void *addr, size_t size, int write)
+int z_arch_buffer_validate(void *addr, size_t size, int write)
 {
 	return arm_core_mpu_buffer_validate(addr, size, write);
 }
diff --git a/arch/arm/core/cortex_m/nmi.c b/arch/arm/core/cortex_m/nmi.c
index 37dd169..e8d1a2e 100644
--- a/arch/arm/core/cortex_m/nmi.c
+++ b/arch/arm/core/cortex_m/nmi.c
@@ -34,7 +34,7 @@
  * @brief Default NMI handler installed when kernel is up
  *
  * The default handler outputs a error message and reboots the target. It is
- * installed by calling _NmiInit();
+ * installed by calling z_NmiInit();
  *
  * @return N/A
  */
@@ -57,7 +57,7 @@
  * @return N/A
  */
 
-void _NmiInit(void)
+void z_NmiInit(void)
 {
 	handler = _DefaultHandler;
 }
@@ -91,5 +91,5 @@
 void __nmi(void)
 {
 	handler();
-	_ExcExit();
+	z_ExcExit();
 }
diff --git a/arch/arm/core/cortex_m/prep_c.c b/arch/arm/core/cortex_m/prep_c.c
index 9aa76f4..12fc82b 100644
--- a/arch/arm/core/cortex_m/prep_c.c
+++ b/arch/arm/core/cortex_m/prep_c.c
@@ -10,7 +10,7 @@
  *
  *
  * Initialization of full C support: zero the .bss, copy the .data if XIP,
- * call _Cstart().
+ * call z_cstart().
  *
  * Stack is available in this module, but not the global data/bss until their
  * initialization is performed.
@@ -92,7 +92,7 @@
 #else
 
 #if defined(CONFIG_SW_VECTOR_RELAY)
-_GENERIC_SECTION(.vt_pointer_section) void *_vector_table_pointer;
+Z_GENERIC_SECTION(.vt_pointer_section) void *_vector_table_pointer;
 #endif
 
 #define VECTOR_ADDRESS 0
@@ -151,7 +151,7 @@
 }
 #endif
 
-extern FUNC_NORETURN void _Cstart(void);
+extern FUNC_NORETURN void z_cstart(void);
 /**
  *
  * @brief Prepare to and run C code
@@ -178,12 +178,12 @@
 	set_and_switch_to_psp();
 	relocate_vector_table();
 	enable_floating_point();
-	_bss_zero();
-	_data_copy();
+	z_bss_zero();
+	z_data_copy();
 #ifdef CONFIG_BOOT_TIME_MEASUREMENT
 	__start_time_stamp = 0U;
 #endif
 	_IntLibInit();
-	_Cstart();
+	z_cstart();
 	CODE_UNREACHABLE;
 }
diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S
index 93b41b9..cd896ce 100644
--- a/arch/arm/core/exc_exit.S
+++ b/arch/arm/core/exc_exit.S
@@ -21,7 +21,7 @@
 
 _ASM_FILE_PROLOGUE
 
-GTEXT(_ExcExit)
+GTEXT(z_ExcExit)
 GTEXT(_IntExit)
 GDATA(_kernel)
 
@@ -53,7 +53,7 @@
 
 SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
 
-/* _IntExit falls through to _ExcExit (they are aliases of each other) */
+/* _IntExit falls through to z_ExcExit (they are aliases of each other) */
 
 /**
  *
@@ -65,7 +65,7 @@
  * @return N/A
  */
 
-SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
+SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
 
 #ifdef CONFIG_PREEMPT_ENABLED
     ldr r0, =_kernel
@@ -88,7 +88,7 @@
 
 #ifdef CONFIG_STACK_SENTINEL
     push {r0, lr}
-    bl _check_stack_sentinel
+    bl z_check_stack_sentinel
 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
     pop {r0, r1}
     mov lr, r1
diff --git a/arch/arm/core/fatal.c b/arch/arm/core/fatal.c
index 63802fc..9be9ec8 100644
--- a/arch/arm/core/fatal.c
+++ b/arch/arm/core/fatal.c
@@ -8,7 +8,7 @@
  * @file
  * @brief Kernel fatal error handler for ARM Cortex-M
  *
- * This module provides the _NanoFatalErrorHandler() routine for ARM Cortex-M.
+ * This module provides the z_NanoFatalErrorHandler() routine for ARM Cortex-M.
  */
 
 #include <toolchain.h>
@@ -26,14 +26,14 @@
  *
  * This routine is called when fatal error conditions are detected by software
  * and is responsible only for reporting the error. Once reported, it then
- * invokes the user provided routine _SysFatalErrorHandler() which is
+ * invokes the user provided routine z_SysFatalErrorHandler() which is
  * responsible for implementing the error handling policy.
  *
  * The caller is expected to always provide a usable ESF. In the event that the
  * fatal error does not have a hardware generated ESF, the caller should either
  * create its own or use a pointer to the global default ESF <_default_esf>.
  *
- * Unlike other arches, this function may return if _SysFatalErrorHandler
+ * Unlike other arches, this function may return if z_SysFatalErrorHandler
  * determines that only the current thread should be aborted and the CPU
  * was in handler mode. PendSV will be asserted in this case and the current
  * thread taken off the run queue. Leaving the exception will immediately
@@ -44,7 +44,7 @@
  *
  * @return This function does not return.
  */
-void _NanoFatalErrorHandler(unsigned int reason,
+void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *pEsf)
 {
 	LOG_PANIC();
@@ -89,15 +89,15 @@
 	 * decide.
 	 */
 
-	_SysFatalErrorHandler(reason, pEsf);
+	z_SysFatalErrorHandler(reason, pEsf);
 }
 
 void _do_kernel_oops(const NANO_ESF *esf)
 {
-	_NanoFatalErrorHandler(esf->r0, esf);
+	z_NanoFatalErrorHandler(esf->r0, esf);
 }
 
-FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
+FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
 {
 	u32_t *ssf_contents = ssf_ptr;
 	NANO_ESF oops_esf = { 0 };
diff --git a/arch/arm/core/fault.c b/arch/arm/core/fault.c
index a020552..f2d2abf 100644
--- a/arch/arm/core/fault.c
+++ b/arch/arm/core/fault.c
@@ -764,8 +764,8 @@
  *   error handling policy allows the system to recover from the error),
  * - reporting the error information,
  * - determining the error reason to be provided as input to the user-
- *   provided routine, _NanoFatalErrorHandler().
- * The _NanoFatalErrorHandler() is invoked once the above operations are
+ *   provided routine, z_NanoFatalErrorHandler().
+ * The z_NanoFatalErrorHandler() is invoked once the above operations are
  * completed, and is responsible for implementing the error handling policy.
  *
  * The provided ESF pointer points to the exception stack frame of the current
@@ -869,7 +869,7 @@
 	defined(CONFIG_ARM_NONSECURE_FIRMWARE)
 _exit_fatal:
 #endif
-	_NanoFatalErrorHandler(reason, esf);
+	z_NanoFatalErrorHandler(reason, esf);
 }
 
 /**
diff --git a/arch/arm/core/irq_init.c b/arch/arm/core/irq_init.c
index 7c56f9c..17d993c 100644
--- a/arch/arm/core/irq_init.c
+++ b/arch/arm/core/irq_init.c
@@ -11,7 +11,7 @@
  * The ARM Cortex-M architecture provides its own k_thread_abort() to deal with
  * different CPU modes (handler vs thread) when a thread aborts. When its entry
  * point returns or when it aborts itself, the CPU is in thread mode and must
- * call _Swap() (which triggers a service call), but when in handler mode, the
+ * call z_swap() (which triggers a service call), but when in handler mode, the
  * CPU must exit handler mode to cause the context switch, and thus must queue
  * the PendSV exception.
  */
diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/irq_manage.c
index a4712ef..0808e5b 100644
--- a/arch/arm/core/irq_manage.c
+++ b/arch/arm/core/irq_manage.c
@@ -40,7 +40,7 @@
  *
  * @return N/A
  */
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	NVIC_EnableIRQ((IRQn_Type)irq);
 }
@@ -54,7 +54,7 @@
  *
  * @return N/A
  */
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	NVIC_DisableIRQ((IRQn_Type)irq);
 }
@@ -65,7 +65,7 @@
  * @param irq IRQ line
  * @return interrupt enable state, true or false
  */
-int _arch_irq_is_enabled(unsigned int irq)
+int z_arch_irq_is_enabled(unsigned int irq)
 {
 	return NVIC->ISER[REG_FROM_IRQ(irq)] & (1 << BIT_FROM_IRQ(irq));
 }
@@ -81,7 +81,7 @@
  *
  * @return N/A
  */
-void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
+void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
 {
 	/* The kernel may reserve some of the highest priority levels.
 	 * So we offset the requested priority level with the number
@@ -126,7 +126,7 @@
  *
  * @return N/A
  */
-void _irq_spurious(void *unused)
+void z_irq_spurious(void *unused)
 {
 	ARG_UNUSED(unused);
 	__reserved();
@@ -163,7 +163,7 @@
 		s32_t idle_val = _kernel.idle;
 
 		_kernel.idle = 0;
-		_sys_power_save_idle_exit(idle_val);
+		z_sys_power_save_idle_exit(idle_val);
 	}
 
 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
@@ -177,7 +177,7 @@
 }
 #endif
 
-void _arch_isr_direct_header(void)
+void z_arch_isr_direct_header(void)
 {
 	z_sys_trace_isr_enter();
 }
@@ -239,12 +239,12 @@
 #endif /* CONFIG_ARM_SECURE_FIRMWARE */
 
 #ifdef CONFIG_DYNAMIC_INTERRUPTS
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			      void (*routine)(void *parameter), void *parameter,
 			      u32_t flags)
 {
 	z_isr_install(irq, routine, parameter);
-	_irq_priority_set(irq, priority, flags);
+	z_irq_priority_set(irq, priority, flags);
 	return irq;
 }
 #endif /* CONFIG_DYNAMIC_INTERRUPTS */
diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S
index 59af840..43509d1 100644
--- a/arch/arm/core/isr_wrapper.S
+++ b/arch/arm/core/isr_wrapper.S
@@ -57,7 +57,7 @@
 	 * idle, this ensures that the calculation and programming of the device
 	 * for the next timer deadline is not interrupted.  For non-tickless idle,
 	 * this ensures that the clearing of the kernel idle state is not
-	 * interrupted.  In each case, _sys_power_save_idle_exit is called with
+	 * interrupted.  In each case, z_sys_power_save_idle_exit is called with
 	 * interrupts disabled.
 	 */
 	cpsid i  /* PRIMASK = 1 */
@@ -73,7 +73,7 @@
 	movs.n r1, #0
 	/* clear kernel idle state */
 	str r1, [r2, #_kernel_offset_to_idle]
-	blx _sys_power_save_idle_exit
+	blx z_sys_power_save_idle_exit
 _idle_state_cleared:
 
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
@@ -81,7 +81,7 @@
 	movne	r1, #0
 		/* clear kernel idle state */
 		strne	r1, [r2, #_kernel_offset_to_idle]
-		blxne	_sys_power_save_idle_exit
+		blxne	z_sys_power_save_idle_exit
 #else
 #error Unknown ARM architecture
 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
diff --git a/arch/arm/core/swap.c b/arch/arm/core/swap.c
index 310dd1f..2e31c6c 100644
--- a/arch/arm/core/swap.c
+++ b/arch/arm/core/swap.c
@@ -42,7 +42,7 @@
  * as BASEPRI is not available.
  *
  * @return -EAGAIN, or a return value set by a call to
- * _set_thread_return_value()
+ * z_set_thread_return_value()
  *
  */
 int __swap(int key)
diff --git a/arch/arm/core/swap_helper.S b/arch/arm/core/swap_helper.S
index bfe26a8..649a77c 100644
--- a/arch/arm/core/swap_helper.S
+++ b/arch/arm/core/swap_helper.S
@@ -187,7 +187,7 @@
     push {r2,lr}
     ldr r0, =_kernel
     ldr r0, [r0, #_kernel_offset_to_current]
-    bl _arch_configure_dynamic_mpu_regions
+    bl z_arch_configure_dynamic_mpu_regions
     pop {r2,lr}
 #endif
 
diff --git a/arch/arm/core/sys_fatal_error_handler.c b/arch/arm/core/sys_fatal_error_handler.c
index a263051..9873eb1 100644
--- a/arch/arm/core/sys_fatal_error_handler.c
+++ b/arch/arm/core/sys_fatal_error_handler.c
@@ -8,7 +8,7 @@
  * @file
  * @brief ARM Cortex-M system fatal error handler
  *
- * This module provides the _SysFatalErrorHandler() routine for Cortex-M
+ * This module provides the z_SysFatalErrorHandler() routine for Cortex-M
  * platforms.
  */
 
@@ -38,7 +38,7 @@
  *
  * @return This function does not return.
  */
-void __weak _SysFatalErrorHandler(unsigned int reason,
+void __weak z_SysFatalErrorHandler(unsigned int reason,
 					 const NANO_ESF *pEsf)
 {
 	ARG_UNUSED(pEsf);
@@ -52,7 +52,7 @@
 	if (reason == _NANO_ERR_KERNEL_PANIC) {
 		goto hang_system;
 	}
-	if (k_is_in_isr() || _is_thread_essential()) {
+	if (k_is_in_isr() || z_is_thread_essential()) {
 		printk("Fatal fault in %s! Spinning...\n",
 		       k_is_in_isr() ? "ISR" : "essential thread");
 		goto hang_system;
diff --git a/arch/arm/core/thread.c b/arch/arm/core/thread.c
index 2459d77..0e526bc 100644
--- a/arch/arm/core/thread.c
+++ b/arch/arm/core/thread.c
@@ -50,7 +50,7 @@
  * @return N/A
  */
 
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		 size_t stackSize, k_thread_entry_t pEntry,
 		 void *parameter1, void *parameter2, void *parameter3,
 		 int priority, unsigned int options)
@@ -60,7 +60,7 @@
 	/* Offset between the top of stack and the high end of stack area. */
 	u32_t top_of_stack_offset = 0;
 
-	_ASSERT_VALID_PRIO(priority, pEntry);
+	Z_ASSERT_VALID_PRIO(priority, pEntry);
 
 #if defined(CONFIG_USERSPACE)
 	/* Truncate the stack size to align with the MPU region granularity.
@@ -106,12 +106,12 @@
 
 #if CONFIG_USERSPACE
 	if ((options & K_USER) != 0) {
-		pInitCtx->pc = (u32_t)_arch_user_mode_enter;
+		pInitCtx->pc = (u32_t)z_arch_user_mode_enter;
 	} else {
-		pInitCtx->pc = (u32_t)_thread_entry;
+		pInitCtx->pc = (u32_t)z_thread_entry;
 	}
 #else
-	pInitCtx->pc = (u32_t)_thread_entry;
+	pInitCtx->pc = (u32_t)z_thread_entry;
 #endif
 
 	/* force ARM mode by clearing LSB of address */
@@ -142,7 +142,7 @@
 
 #ifdef CONFIG_USERSPACE
 
-FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
+FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
 	void *p1, void *p2, void *p3)
 {
 
diff --git a/arch/arm/core/thread_abort.c b/arch/arm/core/thread_abort.c
index eeb0aa6..613e174 100644
--- a/arch/arm/core/thread_abort.c
+++ b/arch/arm/core/thread_abort.c
@@ -11,7 +11,7 @@
  * The ARM Cortex-M architecture provides its own k_thread_abort() to deal
  * with different CPU modes (handler vs thread) when a thread aborts. When its
  * entry point returns or when it aborts itself, the CPU is in thread mode and
- * must call _Swap() (which triggers a service call), but when in handler
+ * must call z_swap() (which triggers a service call), but when in handler
  * mode, the CPU must exit handler mode to cause the context switch, and thus
  * must queue the PendSV exception.
  */
@@ -25,9 +25,9 @@
 #include <wait_q.h>
 #include <misc/__assert.h>
 
-extern void _k_thread_single_abort(struct k_thread *thread);
+extern void z_thread_single_abort(struct k_thread *thread);
 
-void _impl_k_thread_abort(k_tid_t thread)
+void z_impl_k_thread_abort(k_tid_t thread)
 {
 	unsigned int key;
 
@@ -36,12 +36,12 @@
 	__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
 		 "essential thread aborted");
 
-	_k_thread_single_abort(thread);
-	_thread_monitor_exit(thread);
+	z_thread_single_abort(thread);
+	z_thread_monitor_exit(thread);
 
 	if (_current == thread) {
 		if ((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) == 0) {
-			(void)_Swap_irqlock(key);
+			(void)z_swap_irqlock(key);
 			CODE_UNREACHABLE;
 		} else {
 			SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
@@ -49,5 +49,5 @@
 	}
 
 	/* The abort handler might have altered the ready queue. */
-	_reschedule_irqlock(key);
+	z_reschedule_irqlock(key);
 }
diff --git a/arch/arm/core/userspace.S b/arch/arm/core/userspace.S
index ba345a5..996e08b 100644
--- a/arch/arm/core/userspace.S
+++ b/arch/arm/core/userspace.S
@@ -68,7 +68,7 @@
     /* Re-program dynamic memory map.
      *
      * Important note:
-     * _arch_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
+     * z_arch_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
      * to guard the privilege stack for overflows (if building with option
      * CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
      * stack while doing the re-programming. We minimize the risk by placing
@@ -82,7 +82,7 @@
     push {r0,r1,r2,r3,ip,lr}
     ldr r0, =_kernel
     ldr r0, [r0, #_kernel_offset_to_current]
-    bl _arch_configure_dynamic_mpu_regions
+    bl z_arch_configure_dynamic_mpu_regions
     pop {r0,r1,r2,r3,ip,lr}
 #endif
 
@@ -152,8 +152,8 @@
      */
     isb
 
-    /* jump to _thread_entry entry */
-    ldr ip, =_thread_entry
+    /* jump to z_thread_entry entry */
+    ldr ip, =z_thread_entry
     bx ip
 
 /**
diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h
index 6aa0fe1..704bd4a 100644
--- a/arch/arm/include/kernel_arch_func.h
+++ b/arch/arm/include/kernel_arch_func.h
@@ -30,8 +30,8 @@
 extern void _FaultInit(void);
 extern void _CpuIdleInit(void);
 #ifdef CONFIG_ARM_MPU
-extern void _arch_configure_static_mpu_regions(void);
-extern void _arch_configure_dynamic_mpu_regions(struct k_thread *thread);
+extern void z_arch_configure_static_mpu_regions(void);
+extern void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread);
 #endif /* CONFIG_ARM_MPU */
 
 static ALWAYS_INLINE void kernel_arch_init(void)
@@ -56,7 +56,7 @@
 }
 
 static ALWAYS_INLINE void
-_arch_switch_to_main_thread(struct k_thread *main_thread,
+z_arch_switch_to_main_thread(struct k_thread *main_thread,
 			    k_thread_stack_t *main_stack,
 			    size_t main_stack_size, k_thread_entry_t _main)
 {
@@ -67,7 +67,7 @@
 	 *
 	 * This function is invoked once, upon system initialization.
 	 */
-	_arch_configure_static_mpu_regions();
+	z_arch_configure_static_mpu_regions();
 #endif
 
 	/* get high address of the stack, i.e. its start (stack grows down) */
@@ -110,21 +110,21 @@
 	 * If stack protection is enabled, make sure to set it
 	 * before jumping to thread entry function
 	 */
-	_arch_configure_dynamic_mpu_regions(main_thread);
+	z_arch_configure_dynamic_mpu_regions(main_thread);
 #endif
-	_thread_entry(_main, 0, 0, 0);
+	z_thread_entry(_main, 0, 0, 0);
 	CODE_UNREACHABLE;
 }
 
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->arch.swap_return_value = value;
 }
 
 extern void k_cpu_atomic_idle(unsigned int key);
 
-#define _is_in_isr() _IsInIsr()
+#define z_is_in_isr() _IsInIsr()
 
 extern FUNC_NORETURN void _arm_userspace_enter(k_thread_entry_t user_entry,
 					       void *p1, void *p2, void *p3,
diff --git a/arch/common/gen_isr_tables.py b/arch/common/gen_isr_tables.py
index 9ad13d4..68e929d 100755
--- a/arch/common/gen_isr_tables.py
+++ b/arch/common/gen_isr_tables.py
@@ -225,7 +225,7 @@
     offset = intlist["offset"]
     prefix = endian_prefix()
 
-    spurious_handler = "&_irq_spurious"
+    spurious_handler = "&z_irq_spurious"
     sw_irq_handler   = "ISR_WRAPPER"
 
     debug('offset is ' + str(offset))
diff --git a/arch/common/isr_tables.c b/arch/common/isr_tables.c
index b1f96ea..43b4dc8 100644
--- a/arch/common/isr_tables.c
+++ b/arch/common/isr_tables.c
@@ -21,7 +21,7 @@
  * header of the initList section, which is used by gen_isr_tables.py to create
  * the vector and sw isr tables,
  */
-_GENERIC_SECTION(.irq_info) struct int_list_header _iheader = {
+Z_GENERIC_SECTION(.irq_info) struct int_list_header _iheader = {
 	.table_size = IRQ_TABLE_SIZE,
 	.offset = CONFIG_GEN_IRQ_START_VECTOR,
 };
@@ -29,7 +29,7 @@
 /* These are placeholder tables. They will be replaced by the real tables
  * generated by gen_isr_tables.py.
  *
- * _irq_spurious and _isr_wrapper are used as placeholder values to
+ * z_irq_spurious and _isr_wrapper are used as placeholder values to
  * ensure that they are not optimized out in the first link. The first
  * link must contain the same symbols as the second one for the code
  * generation to work.
@@ -49,6 +49,6 @@
  */
 #ifdef CONFIG_GEN_SW_ISR_TABLE
 struct _isr_table_entry __sw_isr_table _sw_isr_table[IRQ_TABLE_SIZE] = {
-	[0 ...(IRQ_TABLE_SIZE - 1)] = {(void *)0x42, (void *)&_irq_spurious},
+	[0 ...(IRQ_TABLE_SIZE - 1)] = {(void *)0x42, (void *)&z_irq_spurious},
 };
 #endif
diff --git a/arch/common/timing_info_bench.c b/arch/common/timing_info_bench.c
index f8e127e..cab2cc2 100644
--- a/arch/common/timing_info_bench.c
+++ b/arch/common/timing_info_bench.c
@@ -35,7 +35,7 @@
 
 #elif CONFIG_X86
 #define TIMING_INFO_PRE_READ()
-#define TIMING_INFO_OS_GET_TIME()      (_tsc_read())
+#define TIMING_INFO_OS_GET_TIME()      (z_tsc_read())
 #define TIMING_INFO_GET_TIMER_VALUE()  (TIMING_INFO_OS_GET_TIME())
 #define SUBTRACT_CLOCK_CYCLES(val)     (val)
 
@@ -48,7 +48,7 @@
 #elif CONFIG_ARC
 #define TIMING_INFO_PRE_READ()
 #define TIMING_INFO_OS_GET_TIME()     (k_cycle_get_32())
-#define TIMING_INFO_GET_TIMER_VALUE() (_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
+#define TIMING_INFO_GET_TIMER_VALUE() (z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
 #define SUBTRACT_CLOCK_CYCLES(val)    ((u32_t)val)
 
 #elif CONFIG_NIOS2
diff --git a/arch/nios2/core/crt0.S b/arch/nios2/core/crt0.S
index 632c49d..4fb3968 100644
--- a/arch/nios2/core/crt0.S
+++ b/arch/nios2/core/crt0.S
@@ -140,6 +140,6 @@
 	 */
 
 	/* Jump into C domain. _PrepC zeroes BSS, copies rw data into RAM,
-	 * and then enters _Cstart */
+	 * and then enters z_cstart */
 	call _PrepC
 
diff --git a/arch/nios2/core/fatal.c b/arch/nios2/core/fatal.c
index bec5894..66f9686 100644
--- a/arch/nios2/core/fatal.c
+++ b/arch/nios2/core/fatal.c
@@ -48,7 +48,7 @@
  *
  * @return This function does not return.
  */
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *esf)
 {
 	LOG_PANIC();
@@ -102,7 +102,7 @@
 	       esf->estatus);
 #endif
 
-	_SysFatalErrorHandler(reason, esf);
+	z_SysFatalErrorHandler(reason, esf);
 }
 
 #if defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_PRINTK) \
@@ -194,7 +194,7 @@
 #endif /* ALT_CPU_HAS_EXTRA_EXCEPTION_INFO */
 #endif /* CONFIG_PRINTK */
 
-	_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
 }
 
 
@@ -218,7 +218,7 @@
  *
  * @return N/A
  */
-FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
 						const NANO_ESF *pEsf)
 {
 	ARG_UNUSED(pEsf);
@@ -232,7 +232,7 @@
 	if (reason == _NANO_ERR_KERNEL_PANIC) {
 		goto hang_system;
 	}
-	if (k_is_in_isr() || _is_thread_essential()) {
+	if (k_is_in_isr() || z_is_thread_essential()) {
 		printk("Fatal fault in %s! Spinning...\n",
 		       k_is_in_isr() ? "ISR" : "essential thread");
 		goto hang_system;
diff --git a/arch/nios2/core/irq_manage.c b/arch/nios2/core/irq_manage.c
index b64c993..0712a66 100644
--- a/arch/nios2/core/irq_manage.c
+++ b/arch/nios2/core/irq_manage.c
@@ -21,16 +21,16 @@
 #include <kswap.h>
 #include <tracing.h>
 
-void _irq_spurious(void *unused)
+void z_irq_spurious(void *unused)
 {
 	ARG_UNUSED(unused);
 	printk("Spurious interrupt detected! ipending: %x\n",
 	       _nios2_creg_read(NIOS2_CR_IPENDING));
-	_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
 }
 
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	u32_t ienable;
 	unsigned int key;
@@ -46,7 +46,7 @@
 
 
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	u32_t ienable;
 	unsigned int key;
@@ -103,12 +103,12 @@
 
 	_kernel.nested--;
 #ifdef CONFIG_STACK_SENTINEL
-	_check_stack_sentinel();
+	z_check_stack_sentinel();
 #endif
 }
 
 #ifdef CONFIG_DYNAMIC_INTERRUPTS
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			      void (*routine)(void *parameter), void *parameter,
 			      u32_t flags)
 {
diff --git a/arch/nios2/core/prep_c.c b/arch/nios2/core/prep_c.c
index 52b19dc..dc43466 100644
--- a/arch/nios2/core/prep_c.c
+++ b/arch/nios2/core/prep_c.c
@@ -10,7 +10,7 @@
  *
  *
  * Initialization of full C support: zero the .bss, copy the .data if XIP,
- * call _Cstart().
+ * call z_cstart().
  *
  * Stack is available in this module, but not the global data/bss until their
  * initialization is performed.
@@ -33,9 +33,9 @@
 
 void _PrepC(void)
 {
-	_bss_zero();
+	z_bss_zero();
 #ifdef CONFIG_XIP
-	_data_copy();
+	z_data_copy();
 	/* In most XIP scenarios we copy the exception code into RAM, so need
 	 * to flush instruction cache.
 	 */
@@ -48,6 +48,6 @@
 	_nios2_dcache_flush_all();
 #endif
 #endif
-	_Cstart();
+	z_cstart();
 	CODE_UNREACHABLE;
 }
diff --git a/arch/nios2/core/swap.S b/arch/nios2/core/swap.S
index 88e71ff..a16ab78 100644
--- a/arch/nios2/core/swap.S
+++ b/arch/nios2/core/swap.S
@@ -121,7 +121,7 @@
 
 	/*
 	 * Load return value into r2 (return value register). -EAGAIN unless
-	 * someone previously called _set_thread_return_value(). Do this before
+	 * someone previously called z_set_thread_return_value(). Do this before
 	 * we potentially unlock interrupts.
 	 */
 	ldw r2, _thread_offset_to_retval(r2)
@@ -191,5 +191,5 @@
 	/* pop all the stuff that we just loaded into registers */
 	addi sp, sp, 16
 
-	call _thread_entry
+	call z_thread_entry
 
diff --git a/arch/nios2/core/thread.c b/arch/nios2/core/thread.c
index 85a9062..e44ea11 100644
--- a/arch/nios2/core/thread.c
+++ b/arch/nios2/core/thread.c
@@ -11,7 +11,7 @@
 #include <string.h>
 
 /* forward declaration to asm function to adjust setup the arguments
- * to _thread_entry() since this arch puts the first four arguments
+ * to z_thread_entry() since this arch puts the first four arguments
  * in r4-r7 and not on the stack
  */
 void _thread_entry_wrapper(k_thread_entry_t, void *, void *, void *);
@@ -20,7 +20,7 @@
 	/* top of the stack / most recently pushed */
 
 	/* Used by _thread_entry_wrapper. pulls these off the stack and
-	 * into argument registers before calling _thread_entry()
+	 * into argument registers before calling z_thread_entry()
 	 */
 	k_thread_entry_t entry_point;
 	void *arg1;
@@ -31,13 +31,13 @@
 };
 
 
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		 size_t stack_size, k_thread_entry_t thread_func,
 		 void *arg1, void *arg2, void *arg3,
 		 int priority, unsigned int options)
 {
 	char *stack_memory = K_THREAD_STACK_BUFFER(stack);
-	_ASSERT_VALID_PRIO(priority, thread_func);
+	Z_ASSERT_VALID_PRIO(priority, thread_func);
 
 	struct init_stack_frame *iframe;
 
diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h
index d704b77..4e8ffc9 100644
--- a/arch/nios2/include/kernel_arch_func.h
+++ b/arch/nios2/include/kernel_arch_func.h
@@ -36,12 +36,12 @@
 }
 
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->callee_saved.retval = value;
 }
 
-#define _is_in_isr() (_kernel.nested != 0U)
+#define z_is_in_isr() (_kernel.nested != 0U)
 
 #ifdef CONFIG_IRQ_OFFLOAD
 void _irq_do_offload(void);
diff --git a/arch/nios2/include/kernel_arch_thread.h b/arch/nios2/include/kernel_arch_thread.h
index 5840977..9266a45 100644
--- a/arch/nios2/include/kernel_arch_thread.h
+++ b/arch/nios2/include/kernel_arch_thread.h
@@ -54,10 +54,10 @@
 	/* Stack pointer */
 	u32_t sp;
 
-	/* IRQ status before irq_lock() and call to _Swap() */
+	/* IRQ status before irq_lock() and call to z_swap() */
 	u32_t key;
 
-	/* Return value of _Swap() */
+	/* Return value of z_swap() */
 	u32_t retval;
 };
 
diff --git a/arch/posix/core/fatal.c b/arch/posix/core/fatal.c
index e1a8084..7e15f44 100644
--- a/arch/posix/core/fatal.c
+++ b/arch/posix/core/fatal.c
@@ -32,7 +32,7 @@
  *
  * @return This function does not return.
  */
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 		const NANO_ESF *esf)
 {
 	LOG_PANIC();
@@ -72,9 +72,9 @@
 
 #endif
 
-	void _SysFatalErrorHandler(unsigned int reason,
+	void z_SysFatalErrorHandler(unsigned int reason,
 			const NANO_ESF *pEsf);
-	_SysFatalErrorHandler(reason, esf);
+	z_SysFatalErrorHandler(reason, esf);
 }
 
 
@@ -98,7 +98,7 @@
  *
  * @return N/A
  */
-FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
 		const NANO_ESF *pEsf)
 {
 	ARG_UNUSED(pEsf);
@@ -111,7 +111,7 @@
 	if (reason == _NANO_ERR_KERNEL_PANIC) {
 		goto hang_system;
 	}
-	if (k_is_in_isr() || _is_thread_essential()) {
+	if (k_is_in_isr() || z_is_thread_essential()) {
 		posix_print_error_and_exit(
 			"Fatal fault in %s! Stopping...\n",
 			k_is_in_isr() ? "ISR" : "essential thread");
@@ -122,6 +122,6 @@
 hang_system:
 
 	posix_print_error_and_exit(
-		"Stopped in _SysFatalErrorHandler()\n");
+		"Stopped in z_SysFatalErrorHandler()\n");
 	CODE_UNREACHABLE;
 }
diff --git a/arch/posix/core/posix_core.c b/arch/posix/core/posix_core.c
index 5f14e93..1824633 100644
--- a/arch/posix/core/posix_core.c
+++ b/arch/posix/core/posix_core.c
@@ -205,7 +205,7 @@
 /**
  * Let the ready thread (main) run, and exit this thread (init)
  *
- * Called from _arch_switch_to_main_thread() which does the picking from the
+ * Called from z_arch_switch_to_main_thread() which does the picking from the
  * kernel structures
  *
  * Note that we could have just done a swap(), but that would have left the
@@ -298,7 +298,7 @@
 
 	posix_new_thread_pre_start();
 
-	_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3);
+	z_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3);
 
 	/*
 	 * We only reach this point if the thread actually returns which should
@@ -357,9 +357,9 @@
 }
 
 /**
- * Called from _new_thread(),
+ * Called from z_new_thread(),
  * Create a new POSIX thread for the new Zephyr thread.
- * _new_thread() picks from the kernel structures what it is that we need to
+ * z_new_thread() picks from the kernel structures what it is that we need to
  * call with what parameters
  */
 void posix_new_thread(posix_thread_status_t *ptr)
@@ -472,9 +472,9 @@
 
 #if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
 
-extern void _k_thread_single_abort(struct k_thread *thread);
+extern void z_thread_single_abort(struct k_thread *thread);
 
-void _impl_k_thread_abort(k_tid_t thread)
+void z_impl_k_thread_abort(k_tid_t thread)
 {
 	unsigned int key;
 	int thread_idx;
@@ -490,8 +490,8 @@
 	__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
 		 "essential thread aborted");
 
-	_k_thread_single_abort(thread);
-	_thread_monitor_exit(thread);
+	z_thread_single_abort(thread);
+	z_thread_monitor_exit(thread);
 
 	if (_current == thread) {
 		if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */
@@ -510,7 +510,7 @@
 			thread_idx,
 			__func__);
 
-		(void)_Swap_irqlock(key);
+		(void)z_swap_irqlock(key);
 		CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
 	}
 
@@ -531,7 +531,7 @@
 	}
 
 	/* The abort handler might have altered the ready queue. */
-	_reschedule_irqlock(key);
+	z_reschedule_irqlock(key);
 }
 #endif
 
diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c
index b56e231..5ea448b 100644
--- a/arch/posix/core/swap.c
+++ b/arch/posix/core/swap.c
@@ -30,7 +30,7 @@
  *
  *
  * @return -EAGAIN, or a return value set by a call to
- * _set_thread_return_value()
+ * z_set_thread_return_value()
  *
  */
 
@@ -48,7 +48,7 @@
  */
 	_kernel.current->callee_saved.key = key;
 	_kernel.current->callee_saved.retval = -EAGAIN;
-	/* retval may be modified with a call to _set_thread_return_value() */
+	/* retval may be modified with a call to z_set_thread_return_value() */
 
 	posix_thread_status_t *ready_thread_ptr =
 		(posix_thread_status_t *)
@@ -86,7 +86,7 @@
  * Note that we will never come back to this thread:
  * posix_core_main_thread_start() does never return
  */
-void _arch_switch_to_main_thread(struct k_thread *main_thread,
+void z_arch_switch_to_main_thread(struct k_thread *main_thread,
 		k_thread_stack_t *main_stack,
 		size_t main_stack_size, k_thread_entry_t _main)
 {
@@ -114,7 +114,7 @@
 		s32_t idle_val = _kernel.idle;
 
 		_kernel.idle = 0;
-		_sys_power_save_idle_exit(idle_val);
+		z_sys_power_save_idle_exit(idle_val);
 	}
 }
 #endif
diff --git a/arch/posix/core/thread.c b/arch/posix/core/thread.c
index 07917a2..06fa83f 100644
--- a/arch/posix/core/thread.c
+++ b/arch/posix/core/thread.c
@@ -45,7 +45,7 @@
  * pthreads stack and therefore we ignore the stack size
  *
  */
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		size_t stack_size, k_thread_entry_t thread_func,
 		void *arg1, void *arg2, void *arg3,
 		int priority, unsigned int options)
@@ -53,7 +53,7 @@
 
 	char *stack_memory = K_THREAD_STACK_BUFFER(stack);
 
-	_ASSERT_VALID_PRIO(priority, thread_func);
+	Z_ASSERT_VALID_PRIO(priority, thread_func);
 
 	posix_thread_status_t *thread_status;
 
@@ -66,7 +66,7 @@
 		STACK_ROUND_DOWN(stack_memory + stack_size
 				- sizeof(*thread_status));
 
-	/* _thread_entry() arguments */
+	/* z_thread_entry() arguments */
 	thread_status->entry_point = thread_func;
 	thread_status->arg1 = arg1;
 	thread_status->arg2 = arg2;
diff --git a/arch/posix/include/kernel_arch_func.h b/arch/posix/include/kernel_arch_func.h
index 018fc5a..224f9cf 100644
--- a/arch/posix/include/kernel_arch_func.h
+++ b/arch/posix/include/kernel_arch_func.h
@@ -21,7 +21,7 @@
 #endif
 
 #if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN)
-void _arch_switch_to_main_thread(struct k_thread *main_thread,
+void z_arch_switch_to_main_thread(struct k_thread *main_thread,
 		k_thread_stack_t *main_stack,
 		size_t main_stack_size, k_thread_entry_t _main);
 #endif
@@ -44,7 +44,7 @@
 
 
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->callee_saved.retval = value;
 }
@@ -53,7 +53,7 @@
 }
 #endif
 
-#define _is_in_isr() (_kernel.nested != 0U)
+#define z_is_in_isr() (_kernel.nested != 0U)
 
 #endif /* _ASMLANGUAGE */
 
diff --git a/arch/posix/include/kernel_arch_thread.h b/arch/posix/include/kernel_arch_thread.h
index 01dd77a..d975949 100644
--- a/arch/posix/include/kernel_arch_thread.h
+++ b/arch/posix/include/kernel_arch_thread.h
@@ -32,10 +32,10 @@
 
 
 struct _callee_saved {
-	/* IRQ status before irq_lock() and call to _Swap() */
+	/* IRQ status before irq_lock() and call to z_swap() */
 	u32_t key;
 
-	/* Return value of _Swap() */
+	/* Return value of z_swap() */
 	u32_t retval;
 
 	/*
diff --git a/arch/posix/include/posix_soc_if.h b/arch/posix/include/posix_soc_if.h
index b3323ef..cddd6c7 100644
--- a/arch/posix/include/posix_soc_if.h
+++ b/arch/posix/include/posix_soc_if.h
@@ -26,11 +26,11 @@
 
 #include "soc_irq.h" /* Must exist and define _ARCH_IRQ/ISR_* macros */
 
-unsigned int _arch_irq_lock(void);
-void _arch_irq_unlock(unsigned int key);
-void _arch_irq_enable(unsigned int irq);
-void _arch_irq_disable(unsigned int irq);
-int  _arch_irq_is_enabled(unsigned int irq);
+unsigned int z_arch_irq_lock(void);
+void z_arch_irq_unlock(unsigned int key);
+void z_arch_irq_enable(unsigned int irq);
+void z_arch_irq_disable(unsigned int irq);
+int  z_arch_irq_is_enabled(unsigned int irq);
 unsigned int posix_irq_lock(void);
 void posix_irq_unlock(unsigned int key);
 void posix_irq_full_unlock(void);
diff --git a/arch/riscv32/core/fatal.c b/arch/riscv32/core/fatal.c
index 5377ead..d0f8b1d 100644
--- a/arch/riscv32/core/fatal.c
+++ b/arch/riscv32/core/fatal.c
@@ -55,7 +55,7 @@
  *
  * @return This function does not return.
  */
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *esf)
 {
 	LOG_PANIC();
@@ -103,7 +103,7 @@
 	       esf->a2, esf->a3, esf->a4, esf->a5,
 	       esf->a6, esf->a7);
 
-	_SysFatalErrorHandler(reason, esf);
+	z_SysFatalErrorHandler(reason, esf);
 	/* spin forever */
 	for (;;)
 		__asm__ volatile("nop");
@@ -130,7 +130,7 @@
  *
  * @return N/A
  */
-FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
 						const NANO_ESF *esf)
 {
 	ARG_UNUSED(esf);
@@ -146,7 +146,7 @@
 	if (reason == _NANO_ERR_KERNEL_PANIC) {
 		goto hang_system;
 	}
-	if (k_is_in_isr() || _is_thread_essential()) {
+	if (k_is_in_isr() || z_is_thread_essential()) {
 		printk("Fatal fault in %s! Spinning...\n",
 		       k_is_in_isr() ? "ISR" : "essential thread");
 		goto hang_system;
@@ -196,5 +196,5 @@
 	mcause &= SOC_MCAUSE_EXP_MASK;
 	printk("Exception cause %s (%d)\n", cause_str(mcause), (int)mcause);
 
-	_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
 }
diff --git a/arch/riscv32/core/irq_manage.c b/arch/riscv32/core/irq_manage.c
index d1b17d2..3006e1f 100644
--- a/arch/riscv32/core/irq_manage.c
+++ b/arch/riscv32/core/irq_manage.c
@@ -8,7 +8,7 @@
 #include <kernel_structs.h>
 #include <misc/printk.h>
 
-void _irq_spurious(void *unused)
+void z_irq_spurious(void *unused)
 {
 	u32_t mcause;
 
@@ -26,11 +26,11 @@
 	}
 #endif
 
-	_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
 }
 
 #ifdef CONFIG_DYNAMIC_INTERRUPTS
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			      void (*routine)(void *parameter), void *parameter,
 			      u32_t flags)
 {
diff --git a/arch/riscv32/core/isr.S b/arch/riscv32/core/isr.S
index dfad000..ddc380e 100644
--- a/arch/riscv32/core/isr.S
+++ b/arch/riscv32/core/isr.S
@@ -22,7 +22,7 @@
 
 GTEXT(_k_neg_eagain)
 GTEXT(_is_next_thread_current)
-GTEXT(_get_next_ready_thread)
+GTEXT(z_get_next_ready_thread)
 
 #ifdef CONFIG_TRACING
 GTEXT(z_sys_trace_thread_switched_in)
@@ -281,7 +281,7 @@
 	addi sp, t0, 0
 
 #ifdef CONFIG_STACK_SENTINEL
-	call _check_stack_sentinel
+	call z_check_stack_sentinel
 	la t1, _kernel
 #endif
 
@@ -332,7 +332,7 @@
 
 	/*
 	 * Save stack pointer of current thread and set the default return value
-	 * of _Swap to _k_neg_eagain for the thread.
+	 * of z_swap to _k_neg_eagain for the thread.
 	 */
 	sw sp, _thread_offset_to_sp(t1)
 	la t2, _k_neg_eagain
diff --git a/arch/riscv32/core/prep_c.c b/arch/riscv32/core/prep_c.c
index 96c7997..cc6cb85 100644
--- a/arch/riscv32/core/prep_c.c
+++ b/arch/riscv32/core/prep_c.c
@@ -9,7 +9,7 @@
  * @brief Full C support initialization
  *
  *
- * Initialization of full C support: zero the .bss and call _Cstart().
+ * Initialization of full C support: zero the .bss and call z_cstart().
  *
  * Stack is available in this module, but not the global data/bss until their
  * initialization is performed.
@@ -31,13 +31,13 @@
 
 void _PrepC(void)
 {
-	_bss_zero();
+	z_bss_zero();
 #ifdef CONFIG_XIP
-	_data_copy();
+	z_data_copy();
 #endif
 #if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
 	soc_interrupt_init();
 #endif
-	_Cstart();
+	z_cstart();
 	CODE_UNREACHABLE;
 }
diff --git a/arch/riscv32/core/reset.S b/arch/riscv32/core/reset.S
index bd93438..9b1d978 100644
--- a/arch/riscv32/core/reset.S
+++ b/arch/riscv32/core/reset.S
@@ -59,6 +59,6 @@
 
 	/*
 	 * Jump into C domain. _PrepC zeroes BSS, copies rw data into RAM,
-	 * and then enters kernel _Cstart
+	 * and then enters kernel z_cstart
 	 */
 	call _PrepC
diff --git a/arch/riscv32/core/swap.S b/arch/riscv32/core/swap.S
index ed18cd6..aaee132 100644
--- a/arch/riscv32/core/swap.S
+++ b/arch/riscv32/core/swap.S
@@ -78,7 +78,7 @@
 	 * Prior to unlocking irq, load return value of
 	 * __swap to temp register t2 (from
 	 * _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
-	 * unless someone has previously called _set_thread_return_value(..).
+	 * unless someone has previously called z_set_thread_return_value(..).
 	 */
 	la t0, _kernel
 
@@ -113,8 +113,8 @@
 	 * the thread stack (initialized via function _thread).
 	 * In this case, thread_entry_t, * void *, void * and void * are stored
 	 * in registers a0, a1, a2 and a3. These registers are used as arguments
-	 * to function _thread_entry. Hence, just call _thread_entry with
+	 * to function z_thread_entry. Hence, just call z_thread_entry with
 	 * return address set to 0 to indicate a non-returning function call.
 	 */
 
-	jal x0, _thread_entry
+	jal x0, z_thread_entry
diff --git a/arch/riscv32/core/thread.c b/arch/riscv32/core/thread.c
index 2fd3357..ed3b555 100644
--- a/arch/riscv32/core/thread.c
+++ b/arch/riscv32/core/thread.c
@@ -15,13 +15,13 @@
 			   void *arg2,
 			   void *arg3);
 
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		 size_t stack_size, k_thread_entry_t thread_func,
 		 void *arg1, void *arg2, void *arg3,
 		 int priority, unsigned int options)
 {
 	char *stack_memory = K_THREAD_STACK_BUFFER(stack);
-	_ASSERT_VALID_PRIO(priority, thread_func);
+	Z_ASSERT_VALID_PRIO(priority, thread_func);
 
 	struct __esf *stack_init;
 
diff --git a/arch/riscv32/include/kernel_arch_func.h b/arch/riscv32/include/kernel_arch_func.h
index a02bec0..fd7e9ea 100644
--- a/arch/riscv32/include/kernel_arch_func.h
+++ b/arch/riscv32/include/kernel_arch_func.h
@@ -32,16 +32,16 @@
 }
 
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->arch.swap_return_value = value;
 }
 
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *esf);
 
 
-#define _is_in_isr() (_kernel.nested != 0U)
+#define z_is_in_isr() (_kernel.nested != 0U)
 
 #ifdef CONFIG_IRQ_OFFLOAD
 int _irq_do_offload(void);
diff --git a/arch/riscv32/include/kernel_arch_thread.h b/arch/riscv32/include/kernel_arch_thread.h
index b57098f..24c2d7d 100644
--- a/arch/riscv32/include/kernel_arch_thread.h
+++ b/arch/riscv32/include/kernel_arch_thread.h
@@ -55,7 +55,7 @@
 typedef struct _caller_saved _caller_saved_t;
 
 struct _thread_arch {
-	u32_t swap_return_value; /* Return value of _Swap() */
+	u32_t swap_return_value; /* Return value of z_swap() */
 };
 
 typedef struct _thread_arch _thread_arch_t;
diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c
index 0ca938e..b4935d4 100644
--- a/arch/x86/core/cpuhalt.c
+++ b/arch/x86/core/cpuhalt.c
@@ -44,7 +44,7 @@
  */
 void k_cpu_idle(void)
 {
-	_int_latency_stop();
+	z_int_latency_stop();
 	z_sys_trace_idle();
 #if defined(CONFIG_BOOT_TIME_MEASUREMENT)
 	__idle_time_stamp = (u64_t)k_cycle_get_32();
@@ -75,7 +75,7 @@
 
 void k_cpu_atomic_idle(unsigned int key)
 {
-	_int_latency_stop();
+	z_int_latency_stop();
 	z_sys_trace_idle();
 
 	__asm__ volatile (
@@ -96,7 +96,7 @@
 
 	/* restore interrupt lockout state before returning to caller */
 	if ((key & 0x200) == 0) {
-		_int_latency_start();
+		z_int_latency_start();
 		__asm__ volatile("cli");
 	}
 }
diff --git a/arch/x86/core/crt0.S b/arch/x86/core/crt0.S
index 1e93955..b7acffd 100644
--- a/arch/x86/core/crt0.S
+++ b/arch/x86/core/crt0.S
@@ -20,7 +20,7 @@
 	GTEXT(__start)
 
 	/* externs */
-	GTEXT(_Cstart)
+	GTEXT(z_cstart)
 
 	GDATA(_idt_base_address)
 	GDATA(_interrupt_stack)
@@ -247,7 +247,7 @@
 	/*
 	 * Set the stack pointer to the area used for the interrupt stack.
 	 * Note this stack is used during the execution of __start() and
-	 * _Cstart() until the multi-tasking kernel is initialized.  The
+	 * z_cstart() until the multi-tasking kernel is initialized.  The
 	 * dual-purposing of this area of memory is safe since
 	 * interrupts are disabled until the first context switch.
 	 *
@@ -377,7 +377,7 @@
 
 	/* Jump to C portion of kernel initialization and never return */
 
-	jmp	_Cstart
+	jmp	z_cstart
 
 
 _x86_bss_zero:
diff --git a/arch/x86/core/excstub.S b/arch/x86/core/excstub.S
index f8d721f..9c25b44 100644
--- a/arch/x86/core/excstub.S
+++ b/arch/x86/core/excstub.S
@@ -134,7 +134,7 @@
 
 	/*
 	 * Set the _EXC_ACTIVE state bit of the current thread.
-	 * This enables _Swap() to preserve the thread's FP registers
+	 * This enables z_swap() to preserve the thread's FP registers
 	 * (where needed) if the exception handler causes a context switch.
 	 * It also indicates to debug tools that an exception is being
 	 * handled in the event of a context switch.
diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c
index fffddf5..562a933 100644
--- a/arch/x86/core/fatal.c
+++ b/arch/x86/core/fatal.c
@@ -8,7 +8,7 @@
  * @file
  * @brief Kernel fatal error handler
  *
- * This module provides the _NanoFatalErrorHandler() routine.
+ * This module provides the z_NanoFatalErrorHandler() routine.
  */
 
 #include <toolchain.h>
@@ -43,9 +43,9 @@
 {
 	u32_t start, end;
 
-	if (_is_in_isr()) {
+	if (z_is_in_isr()) {
 		/* We were servicing an interrupt */
-		start = (u32_t)_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
+		start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
 		end = start + CONFIG_ISR_STACK_SIZE;
 	} else if ((cs & 0x3) != 0 ||
 		   (_current->base.user_options & K_USER) == 0) {
@@ -134,7 +134,7 @@
  *
  * @return This function does not return.
  */
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *pEsf)
 {
 	LOG_PANIC();
@@ -208,10 +208,10 @@
 	 * fatal error handling policy defined for the platform.
 	 */
 
-	_SysFatalErrorHandler(reason, pEsf);
+	z_SysFatalErrorHandler(reason, pEsf);
 }
 
-FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
+FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
 {
 	struct _x86_syscall_stack_frame *ssf =
 		(struct _x86_syscall_stack_frame *)ssf_ptr;
@@ -225,14 +225,14 @@
 		oops.esp = ssf->esp;
 	}
 
-	_NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops);
+	z_NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops);
 }
 
 #ifdef CONFIG_X86_KERNEL_OOPS
 FUNC_NORETURN void _do_kernel_oops(const NANO_ESF *esf)
 {
 	u32_t *stack_ptr = (u32_t *)esf->esp;
-	_NanoFatalErrorHandler(*stack_ptr, esf);
+	z_NanoFatalErrorHandler(*stack_ptr, esf);
 }
 
 extern void (*_kernel_oops_handler)(void);
@@ -242,7 +242,7 @@
 #endif
 
 /*
- * Define a default ESF for use with _NanoFatalErrorHandler() in the event
+ * Define a default ESF for use with z_NanoFatalErrorHandler() in the event
  * the caller does not have a NANO_ESF to pass
  */
 const NANO_ESF _default_esf = {
@@ -280,7 +280,7 @@
 	if ((BIT(vector) & _EXC_ERROR_CODE_FAULTS) != 0) {
 		printk("***** Exception code: 0x%x\n", pEsf->errorCode);
 	}
-	_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf);
+	z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf);
 }
 
 #define _EXC_FUNC(vector) \
@@ -351,7 +351,7 @@
 {
 	x86_page_entry_data_t pde_flags, pte_flags;
 
-	_x86_mmu_get_flags(pdpt, addr, &pde_flags, &pte_flags);
+	z_x86_mmu_get_flags(pdpt, addr, &pde_flags, &pte_flags);
 
 	printk("PDE: ");
 	dump_entry_flags(pde_flags);
@@ -414,10 +414,10 @@
 #endif
 #ifdef CONFIG_THREAD_STACK_INFO
 	if (check_stack_bounds(esf->esp, 0, esf->cs)) {
-		_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
+		z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
 	}
 #endif
-	_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
 	CODE_UNREACHABLE;
 }
 _EXCEPTION_CONNECT_CODE(page_fault_handler, IV_PAGE_FAULT);
@@ -437,7 +437,7 @@
 extern char z_trampoline_stack_end[];
 #endif
 
-_GENERIC_SECTION(.tss)
+Z_GENERIC_SECTION(.tss)
 struct task_state_segment _main_tss = {
 	.ss0 = DATA_SEG,
 #ifdef CONFIG_X86_KPTI
@@ -450,7 +450,7 @@
 };
 
 /* Special TSS for handling double-faults with a known good stack */
-_GENERIC_SECTION(.tss)
+Z_GENERIC_SECTION(.tss)
 struct task_state_segment _df_tss = {
 	.esp = (u32_t)(_df_stack + sizeof(_df_stack)),
 	.cs = CODE_SEG,
@@ -476,7 +476,7 @@
 		reason = _NANO_ERR_STACK_CHK_FAIL;
 	}
 #endif
-	_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf);
+	z_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf);
 }
 
 static FUNC_NORETURN __used void _df_handler_top(void)
@@ -499,7 +499,7 @@
 	_df_esf.eflags = _main_tss.eflags;
 
 	/* Restore the main IA task to a runnable state */
-	_main_tss.esp = (u32_t)(_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
+	_main_tss.esp = (u32_t)(Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack) +
 				CONFIG_ISR_STACK_SIZE);
 	_main_tss.cs = CODE_SEG;
 	_main_tss.ds = DATA_SEG;
diff --git a/arch/x86/core/float.c b/arch/x86/core/float.c
index a9baa5c..9332207 100644
--- a/arch/x86/core/float.c
+++ b/arch/x86/core/float.c
@@ -88,7 +88,7 @@
  * Enable preservation of floating point context information.
  *
  * The transition from "non-FP supporting" to "FP supporting" must be done
- * atomically to avoid confusing the floating point logic used by _Swap(), so
+ * atomically to avoid confusing the floating point logic used by z_swap(), so
  * this routine locks interrupts to ensure that a context switch does not occur.
  * The locking isn't really needed when the routine is called by a cooperative
  * thread (since context switching can't occur), but it is harmless.
@@ -167,7 +167,7 @@
 			 *
 			 * The saved FP context is needed in case the thread
 			 * we enabled FP support for is currently pre-empted,
-			 * since _Swap() uses it to restore FP context when
+			 * since z_swap() uses it to restore FP context when
 			 * the thread re-activates.
 			 *
 			 * Saving the FP context reinits the FPU, and thus
@@ -187,7 +187,7 @@
  * Disable preservation of floating point context information.
  *
  * The transition from "FP supporting" to "non-FP supporting" must be done
- * atomically to avoid confusing the floating point logic used by _Swap(), so
+ * atomically to avoid confusing the floating point logic used by z_swap(), so
  * this routine locks interrupts to ensure that a context switch does not occur.
  * The locking isn't really needed when the routine is called by a cooperative
  * thread (since context switching can't occur), but it is harmless.
diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S
index b98f3c1..40edbb4 100644
--- a/arch/x86/core/intstub.S
+++ b/arch/x86/core/intstub.S
@@ -33,13 +33,13 @@
 	GTEXT(__swap)
 
 #ifdef CONFIG_SYS_POWER_MANAGEMENT
-	GTEXT(_sys_power_save_idle_exit)
+	GTEXT(z_sys_power_save_idle_exit)
 #endif
 
 
 #ifdef CONFIG_INT_LATENCY_BENCHMARK
-	GTEXT(_int_latency_start)
-	GTEXT(_int_latency_stop)
+	GTEXT(z_int_latency_start)
+	GTEXT(z_int_latency_stop)
 #endif
 /**
  *
@@ -150,7 +150,7 @@
 	 * interrupt.
 	 */
 
-	call	_int_latency_start
+	call	z_int_latency_start
 #endif
 
 	call	z_sys_trace_isr_enter
@@ -196,7 +196,7 @@
 #ifdef CONFIG_INT_LATENCY_BENCHMARK
 	pushl	%eax
 	pushl	%edx
-	call	_int_latency_stop
+	call	z_int_latency_stop
 	popl	%edx
 	popl	%eax
 #endif
@@ -237,7 +237,7 @@
 	_irq_controller_eoi_macro
 
 #ifdef CONFIG_INT_LATENCY_BENCHMARK
-	call	_int_latency_start
+	call	z_int_latency_start
 #endif
 
 	/* determine whether exiting from a nested interrupt */
@@ -275,7 +275,7 @@
 	popl	%esp	/* switch back to outgoing thread's stack */
 
 #ifdef CONFIG_STACK_SENTINEL
-	call	_check_stack_sentinel
+	call	z_check_stack_sentinel
 #endif
 	pushfl			/* push KERNEL_LOCK_KEY argument */
 #ifdef CONFIG_X86_IAMCU
@@ -309,7 +309,7 @@
 
 	/* Restore volatile registers and return to the interrupted thread */
 #ifdef CONFIG_INT_LATENCY_BENCHMARK
-	call	_int_latency_stop
+	call	z_int_latency_stop
 #endif
 	popl	%edi
 	popl	%ecx
@@ -331,7 +331,7 @@
 	popl	%esp		/* pop thread stack pointer */
 
 #ifdef CONFIG_STACK_SENTINEL
-	call	_check_stack_sentinel
+	call	z_check_stack_sentinel
 #endif
 
 	/* fall through to 'nestedInterrupt' */
@@ -345,7 +345,7 @@
 
 nestedInterrupt:
 #ifdef CONFIG_INT_LATENCY_BENCHMARK
-	call	_int_latency_stop
+	call	z_int_latency_stop
 #endif
 
 	popl	%edi
@@ -360,7 +360,7 @@
 handle_idle:
 	pushl	%eax
 	pushl	%edx
-	/* Populate 'ticks' argument to _sys_power_save_idle_exit */
+	/* Populate 'ticks' argument to z_sys_power_save_idle_exit */
 #ifdef CONFIG_X86_IAMCU
 	movl	_kernel_offset_to_idle(%ecx), %eax
 #else
@@ -371,13 +371,13 @@
 	movl	$0, _kernel_offset_to_idle(%ecx)
 
 	/*
-	 * Beware that a timer driver's _sys_power_save_idle_exit() implementation might
+	 * Beware that a timer driver's z_sys_power_save_idle_exit() implementation might
 	 * expect that interrupts are disabled when invoked.  This ensures that
 	 * the calculation and programming of the device for the next timer
 	 * deadline is not interrupted.
 	 */
 
-	call	_sys_power_save_idle_exit
+	call	z_sys_power_save_idle_exit
 #ifndef CONFIG_X86_IAMCU
 	/* SYS V: discard 'ticks' argument passed on the stack */
 	add	$0x4, %esp
@@ -457,7 +457,7 @@
 	movl	$_NANO_ERR_SPURIOUS_INT, %eax
 #endif
 	/* call the fatal error handler */
-	call	_NanoFatalErrorHandler
+	call	z_NanoFatalErrorHandler
 
 	/* handler doesn't  return */
 
diff --git a/arch/x86/core/irq_manage.c b/arch/x86/core/irq_manage.c
index d28d80c..88f128d 100644
--- a/arch/x86/core/irq_manage.c
+++ b/arch/x86/core/irq_manage.c
@@ -48,32 +48,32 @@
  */
 
 #ifdef CONFIG_SYS_POWER_MANAGEMENT
-void _arch_irq_direct_pm(void)
+void z_arch_irq_direct_pm(void)
 {
 	if (_kernel.idle) {
 		s32_t idle_val = _kernel.idle;
 
 		_kernel.idle = 0;
-		_sys_power_save_idle_exit(idle_val);
+		z_sys_power_save_idle_exit(idle_val);
 	}
 }
 #endif
 
-void _arch_isr_direct_header(void)
+void z_arch_isr_direct_header(void)
 {
-	_int_latency_start();
+	z_int_latency_start();
 	z_sys_trace_isr_enter();
 
 	/* We're not going to unlock IRQs, but we still need to increment this
-	 * so that _is_in_isr() works
+	 * so that z_is_in_isr() works
 	 */
 	++_kernel.nested;
 }
 
-void _arch_isr_direct_footer(int swap)
+void z_arch_isr_direct_footer(int swap)
 {
 	_irq_controller_eoi();
-	_int_latency_stop();
+	z_int_latency_stop();
 	sys_trace_isr_exit();
 	--_kernel.nested;
 
@@ -87,7 +87,7 @@
 	    _kernel.ready_q.cache != _current) {
 		unsigned int flags;
 
-		/* Fetch EFLAGS argument to _Swap() */
+		/* Fetch EFLAGS argument to z_swap() */
 		__asm__ volatile (
 			"pushfl\n\t"
 			"popl %0\n\t"
@@ -95,7 +95,7 @@
 			:
 			: "memory"
 			);
-		(void)_Swap_irqlock(flags);
+		(void)z_swap_irqlock(flags);
 	}
 }
 
@@ -301,7 +301,7 @@
  * the processor.
  */
 
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 		void (*routine)(void *parameter), void *parameter,
 		u32_t flags)
 {
@@ -310,7 +310,7 @@
 	key = irq_lock();
 
 #ifdef CONFIG_X86_FIXED_IRQ_MAPPING
-	vector = _IRQ_TO_INTERRUPT_VECTOR(irq);
+	vector = Z_IRQ_TO_INTERRUPT_VECTOR(irq);
 #else
 	vector = priority_to_free_vector(priority);
 	/* 0 indicates not used, vectors for interrupts start at 32 */
@@ -318,7 +318,7 @@
 		 "IRQ %d already configured", irq);
 	_irq_to_interrupt_vector[irq] = vector;
 #endif
-	_irq_controller_irq_config(vector, irq, flags);
+	z_irq_controller_irq_config(vector, irq, flags);
 
 	stub_idx = next_irq_stub++;
 	__ASSERT(stub_idx < CONFIG_X86_DYNAMIC_IRQ_STUBS,
diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S
index 0ac727a..8298eb1 100644
--- a/arch/x86/core/swap.S
+++ b/arch/x86/core/swap.S
@@ -74,7 +74,7 @@
  * potential security leaks.
  *
  * @return -EAGAIN, or a return value set by a call to
- * _set_thread_return_value()
+ * z_set_thread_return_value()
  *
  * C function prototype:
  *
@@ -130,7 +130,7 @@
 	 * Carve space for the return value. Setting it to a default of
 	 * -EAGAIN eliminates the need for the timeout code to set it.
 	 * If another value is ever needed, it can be modified with
-	 * _set_thread_return_value().
+	 * z_set_thread_return_value().
 	 */
 
 	pushl   _k_neg_eagain
@@ -336,7 +336,7 @@
 	movl	_thread_offset_to_esp(%eax), %esp
 
 
-	/* load return value from a possible _set_thread_return_value() */
+	/* load return value from a possible z_set_thread_return_value() */
 
 	popl	%eax
 
@@ -351,7 +351,7 @@
 	 * %eax may contain one of these values:
 	 *
 	 * - the return value for __swap() that was set up by a call to
-	 * _set_thread_return_value()
+	 * z_set_thread_return_value()
 	 * - -EINVAL
 	 */
 
@@ -365,7 +365,7 @@
 	/* save %eax since it used as the return value for __swap */
 	pushl	%eax
 	/* interrupts are being reenabled, stop accumulating time */
-	call	_int_latency_stop
+	call	z_int_latency_stop
 	/* restore __swap's %eax */
 	popl	%eax
 
@@ -398,11 +398,11 @@
  *
  * @brief Adjust stack/parameters before invoking thread entry function
  *
- * This function adjusts the initial stack frame created by _new_thread() such
+ * This function adjusts the initial stack frame created by z_new_thread() such
  * that the GDB stack frame unwinders recognize it as the outermost frame in
  * the thread's stack.  For targets that use the IAMCU calling convention, the
  * first three arguments are popped into eax, edx, and ecx. The function then
- * jumps to _thread_entry().
+ * jumps to z_thread_entry().
  *
  * GDB normally stops unwinding a stack when it detects that it has
  * reached a function called main().  Kernel threads, however, do not have
@@ -411,9 +411,9 @@
  *
  * SYS V Systems:
  *
- * Given the initial thread created by _new_thread(), GDB expects to find a
+ * Given the initial thread created by z_new_thread(), GDB expects to find a
  * return address on the stack immediately above the thread entry routine
- * _thread_entry, in the location occupied by the initial EFLAGS.
+ * z_thread_entry, in the location occupied by the initial EFLAGS.
  * GDB attempts to examine the memory at this return address, which typically
  * results in an invalid access to page 0 of memory.
  *
@@ -422,17 +422,17 @@
  * an invalid access to address zero and returns an error, which causes the
  * GDB stack unwinder to stop somewhat gracefully.
  *
- * The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
- * the new thread for the first time.  This routine is called by _Swap() the
+ * The initial EFLAGS cannot be overwritten until after z_swap() has swapped in
+ * the new thread for the first time.  This routine is called by z_swap() the
  * first time that the new thread is swapped in, and it jumps to
- * _thread_entry after it has done its work.
+ * z_thread_entry after it has done its work.
  *
  * IAMCU Systems:
  *
- * There is no EFLAGS on the stack when we get here. _thread_entry() takes
+ * There is no EFLAGS on the stack when we get here. z_thread_entry() takes
  * four arguments, and we need to pop off the first three into the
  * appropriate registers. Instead of using the 'call' instruction, we push
- * a NULL return address onto the stack and jump into _thread_entry,
+ * a NULL return address onto the stack and jump into z_thread_entry,
  * ensuring the stack won't be unwound further. Placing some kind of return
  * address on the stack is mandatory so this isn't conditionally compiled.
  *
@@ -443,13 +443,13 @@
  *      |__________________|                  |
  *      |      param1      |                  V
  *      |__________________|
- *      |      pEntry      |  <----   ESP when invoked by _Swap() on IAMCU
+ *      |      pEntry      |  <----   ESP when invoked by z_swap() on IAMCU
  *      |__________________|
- *      | initial EFLAGS   |  <----   ESP when invoked by _Swap() on Sys V
+ *      | initial EFLAGS   |  <----   ESP when invoked by z_swap() on Sys V
  *      |__________________|             (Zeroed by this routine on Sys V)
  *
  * The address of the thread entry function needs to be in %edi when this is
- * invoked. It will either be _thread_entry, or if userspace is enabled,
+ * invoked. It will either be z_thread_entry, or if userspace is enabled,
  * _arch_drop_to_user_mode if this is a user thread.
  *
  * @return this routine does NOT return.
diff --git a/arch/x86/core/sys_fatal_error_handler.c b/arch/x86/core/sys_fatal_error_handler.c
index 47f04cc..e78a98c 100644
--- a/arch/x86/core/sys_fatal_error_handler.c
+++ b/arch/x86/core/sys_fatal_error_handler.c
@@ -8,7 +8,7 @@
  * @file
  * @brief Common system fatal error handler
  *
- * This module provides the _SysFatalErrorHandler() routine which is common to
+ * This module provides the z_SysFatalErrorHandler() routine which is common to
  * supported platforms.
  */
 
@@ -39,7 +39,7 @@
  *
  * @return This function does not return.
  */
-FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
 					 const NANO_ESF *pEsf)
 {
 	ARG_UNUSED(pEsf);
@@ -55,7 +55,7 @@
 	if (reason == _NANO_ERR_KERNEL_PANIC) {
 		goto hang_system;
 	}
-	if (k_is_in_isr() || _is_thread_essential()) {
+	if (k_is_in_isr() || z_is_thread_essential()) {
 		printk("Fatal fault in %s! Spinning...\n",
 		       k_is_in_isr() ? "ISR" : "essential thread");
 		goto hang_system;
diff --git a/arch/x86/core/thread.c b/arch/x86/core/thread.c
index 619b365..b6d1ec4 100644
--- a/arch/x86/core/thread.c
+++ b/arch/x86/core/thread.c
@@ -26,7 +26,7 @@
 /* forward declaration */
 
 /* Initial thread stack frame, such that everything is laid out as expected
- * for when _Swap() switches to it for the first time.
+ * for when z_swap() switches to it for the first time.
  */
 struct _x86_initial_frame {
 	u32_t swap_retval;
@@ -58,7 +58,7 @@
  * @param priority thread priority
  * @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS
  */
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		 size_t stack_size, k_thread_entry_t entry,
 		 void *parameter1, void *parameter2, void *parameter3,
 		 int priority, unsigned int options)
@@ -67,7 +67,7 @@
 	char *stack_high;
 	struct _x86_initial_frame *initial_frame;
 
-	_ASSERT_VALID_PRIO(priority, entry);
+	Z_ASSERT_VALID_PRIO(priority, entry);
 	stack_buf = K_THREAD_STACK_BUFFER(stack);
 	_new_thread_init(thread, stack_buf, stack_size, priority, options);
 
@@ -75,7 +75,7 @@
 	if ((options & K_USER) == 0) {
 		/* Running in kernel mode, kernel stack region is also a guard
 		 * page */
-		_x86_mmu_set_flags(&z_x86_kernel_pdpt,
+		z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
 				   (void *)(stack_buf - MMU_PAGE_SIZE),
 				   MMU_PAGE_SIZE, MMU_ENTRY_NOT_PRESENT,
 				   MMU_PTE_P_MASK);
@@ -83,16 +83,16 @@
 #endif /* CONFIG_X86_USERSPACE */
 
 #if CONFIG_X86_STACK_PROTECTION
-	_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack, MMU_PAGE_SIZE,
+	z_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack, MMU_PAGE_SIZE,
 			   MMU_ENTRY_NOT_PRESENT, MMU_PTE_P_MASK);
 #endif
 
 	stack_high = (char *)STACK_ROUND_DOWN(stack_buf + stack_size);
 
-	/* Create an initial context on the stack expected by _Swap() */
+	/* Create an initial context on the stack expected by z_swap() */
 	initial_frame = (struct _x86_initial_frame *)
 		(stack_high - sizeof(struct _x86_initial_frame));
-	/* _thread_entry() arguments */
+	/* z_thread_entry() arguments */
 	initial_frame->entry = entry;
 	initial_frame->p1 = parameter1;
 	initial_frame->p2 = parameter2;
@@ -102,22 +102,22 @@
 #ifdef CONFIG_X86_USERSPACE
 	if ((options & K_USER) != 0) {
 #ifdef _THREAD_WRAPPER_REQUIRED
-		initial_frame->edi = (u32_t)_arch_user_mode_enter;
+		initial_frame->edi = (u32_t)z_arch_user_mode_enter;
 		initial_frame->thread_entry = _x86_thread_entry_wrapper;
 #else
-		initial_frame->thread_entry = _arch_user_mode_enter;
+		initial_frame->thread_entry = z_arch_user_mode_enter;
 #endif /* _THREAD_WRAPPER_REQUIRED */
 	} else
 #endif /* CONFIG_X86_USERSPACE */
 	{
 #ifdef _THREAD_WRAPPER_REQUIRED
-		initial_frame->edi = (u32_t)_thread_entry;
+		initial_frame->edi = (u32_t)z_thread_entry;
 		initial_frame->thread_entry = _x86_thread_entry_wrapper;
 #else
-		initial_frame->thread_entry = _thread_entry;
+		initial_frame->thread_entry = z_thread_entry;
 #endif
 	}
-	/* Remaining _x86_initial_frame members can be garbage, _thread_entry()
+	/* Remaining _x86_initial_frame members can be garbage, z_thread_entry()
 	 * doesn't care about their state when execution begins
 	 */
 	thread->callee_saved.esp = (unsigned long)initial_frame;
@@ -136,7 +136,7 @@
 			   ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE));
 
 	/* Userspace can now access the incoming thread's stack */
-	_x86_mmu_set_flags(&USER_PDPT,
+	z_x86_mmu_set_flags(&USER_PDPT,
 			   (void *)incoming->stack_info.start,
 			   ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
 			   MMU_ENTRY_PRESENT | K_MEM_PARTITION_P_RW_U_RW,
@@ -164,13 +164,13 @@
 		 /* Ensure that the outgoing mem domain configuration
 		  * is set back to default state.
 		  */
-		_arch_mem_domain_destroy(outgoing->mem_domain_info.mem_domain);
-		_arch_mem_domain_configure(incoming);
+		z_arch_mem_domain_destroy(outgoing->mem_domain_info.mem_domain);
+		z_arch_mem_domain_configure(incoming);
 	}
 }
 
 
-FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
+FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
 					 void *p1, void *p2, void *p3)
 {
 	u32_t stack_end;
@@ -182,7 +182,7 @@
 				     _current->stack_info.size);
 
 	/* Set up the kernel stack used during privilege elevation */
-	_x86_mmu_set_flags(&z_x86_kernel_pdpt,
+	z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
 			   (void *)(_current->stack_info.start - MMU_PAGE_SIZE),
 			   MMU_PAGE_SIZE,
 			   (MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
diff --git a/arch/x86/core/userspace.S b/arch/x86/core/userspace.S
index 1b109f8..6193584 100644
--- a/arch/x86/core/userspace.S
+++ b/arch/x86/core/userspace.S
@@ -366,7 +366,7 @@
 	mov	%bx, %ds
 	mov	%bx, %es
 
-	/* Push arguments to _thread_entry() */
+	/* Push arguments to z_thread_entry() */
 	push	%esi	/* p3 */
 #ifndef CONFIG_X86_IAMCU
 	push	%ecx	/* p2 */
@@ -377,7 +377,7 @@
 	push	$0
 
 	/* Save stack pointer at this position, this is where it will be
-	 * when we land in _thread_entry()
+	 * when we land in z_thread_entry()
 	 */
 	mov	%esp, %edi
 
@@ -389,7 +389,7 @@
 	push	%edi		/* ESP */
 	pushfl			/* EFLAGS */
 	push	$USER_CODE_SEG	/* CS */
-	push	$_thread_entry	/* EIP */
+	push	$z_thread_entry	/* EIP */
 
 #ifdef CONFIG_EXECUTION_BENCHMARKING
 	/* Save the eax and edx registers before reading the time stamp
@@ -404,5 +404,5 @@
 	pop %eax
 #endif
 
-	/* We will land in _thread_entry() in user mode after this */
+	/* We will land in z_thread_entry() in user mode after this */
 	KPTI_IRET_USER
diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c
index a8401cb..02f9660 100644
--- a/arch/x86/core/x86_mmu.c
+++ b/arch/x86/core/x86_mmu.c
@@ -45,7 +45,7 @@
 		MMU_ENTRY_EXECUTE_DISABLE);
 
 
-void _x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
+void z_x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
 			x86_page_entry_data_t *pde_flags,
 			x86_page_entry_data_t *pte_flags)
 {
@@ -63,7 +63,7 @@
 }
 
 
-int _arch_buffer_validate(void *addr, size_t size, int write)
+int z_arch_buffer_validate(void *addr, size_t size, int write)
 {
 	u32_t start_pde_num;
 	u32_t end_pde_num;
@@ -180,7 +180,7 @@
 }
 
 
-void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
+void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
 			size_t size,
 			x86_page_entry_data_t flags,
 			x86_page_entry_data_t mask)
@@ -231,12 +231,12 @@
 	/* Clear both present bit and access flags. Only applies
 	 * to threads running in user mode.
 	 */
-	_x86_mmu_set_flags(&z_x86_user_pdpt, start, size,
+	z_x86_mmu_set_flags(&z_x86_user_pdpt, start, size,
 			   MMU_ENTRY_NOT_PRESENT,
 			   K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK);
 #else
 	/* Mark as supervisor read-write, user mode no access */
-	_x86_mmu_set_flags(&z_x86_kernel_pdpt, start, size,
+	z_x86_mmu_set_flags(&z_x86_kernel_pdpt, start, size,
 			   K_MEM_PARTITION_P_RW_U_NA,
 			   K_MEM_PARTITION_PERM_MASK);
 #endif /* CONFIG_X86_KPTI */
@@ -255,9 +255,9 @@
 	mask = K_MEM_PARTITION_PERM_MASK;
 #endif /* CONFIG_X86_KPTI */
 
-	_x86_mmu_set_flags(&USER_PDPT,
-			   (void *)partition->start,
-			   partition->size, attr, mask);
+	z_x86_mmu_set_flags(&USER_PDPT,
+			    (void *)partition->start,
+			    partition->size, attr, mask);
 }
 
 /* Helper macros needed to be passed to x86_update_mem_domain_pages */
@@ -307,7 +307,7 @@
 }
 
 /* Load the partitions of the thread. */
-void _arch_mem_domain_configure(struct k_thread *thread)
+void z_arch_mem_domain_configure(struct k_thread *thread)
 {
 	_x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain,
 				     X86_MEM_DOMAIN_SET_PAGES);
@@ -316,14 +316,14 @@
 /* Destroy or reset the mmu page tables when necessary.
  * Needed when either swap takes place or k_mem_domain_destroy is called.
  */
-void _arch_mem_domain_destroy(struct k_mem_domain *domain)
+void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
 {
 	_x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES);
 }
 
 /* Reset/destroy one partition spcified in the argument of the API. */
-void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
-				       u32_t partition_id)
+void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
+					u32_t partition_id)
 {
 	struct k_mem_partition *partition;
 
@@ -349,7 +349,7 @@
 	activate_partition(partition);
 }
 
-int _arch_mem_domain_max_partitions_get(void)
+int z_arch_mem_domain_max_partitions_get(void)
 {
 	return CONFIG_MAX_DOMAIN_PARTITIONS;
 }
diff --git a/arch/x86/include/kernel_arch_data.h b/arch/x86/include/kernel_arch_data.h
index a2cef30..84b0e9d 100644
--- a/arch/x86/include/kernel_arch_data.h
+++ b/arch/x86/include/kernel_arch_data.h
@@ -41,7 +41,7 @@
 #endif
 
 /* Some configurations require that the stack/registers be adjusted before
- * _thread_entry. See discussion in swap.S for _x86_thread_entry_wrapper()
+ * z_thread_entry. See discussion in swap.S for _x86_thread_entry_wrapper()
  */
 #if defined(CONFIG_X86_IAMCU) || defined(CONFIG_DEBUG_INFO)
 #define _THREAD_WRAPPER_REQUIRED
diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h
index 72285e3..a9c7bf3 100644
--- a/arch/x86/include/kernel_arch_func.h
+++ b/arch/x86/include/kernel_arch_func.h
@@ -39,7 +39,7 @@
 	_kernel.irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack) +
 				CONFIG_ISR_STACK_SIZE;
 #if CONFIG_X86_STACK_PROTECTION
-	_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
+	z_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
 			   MMU_ENTRY_NOT_PRESENT, MMU_PTE_P_MASK);
 #endif
 }
@@ -58,9 +58,9 @@
  * @return N/A
  */
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
-	/* write into 'eax' slot created in _Swap() entry */
+	/* write into 'eax' slot created in z_swap() entry */
 
 	*(unsigned int *)(thread->callee_saved.esp) = value;
 }
@@ -129,7 +129,7 @@
 }
 #endif
 
-#define _is_in_isr() (_kernel.nested != 0U)
+#define z_is_in_isr() (_kernel.nested != 0U)
 
 #endif /* _ASMLANGUAGE */
 
diff --git a/arch/x86/include/kernel_arch_thread.h b/arch/x86/include/kernel_arch_thread.h
index 1f6ee89..08b7405 100644
--- a/arch/x86/include/kernel_arch_thread.h
+++ b/arch/x86/include/kernel_arch_thread.h
@@ -236,7 +236,7 @@
  * The thread control stucture definition.  It contains the
  * various fields to manage a _single_ thread. The TCS will be aligned
  * to the appropriate architecture specific boundary via the
- * _new_thread() call.
+ * z_new_thread() call.
  */
 
 struct _thread_arch {
@@ -244,7 +244,7 @@
 #if defined(CONFIG_FP_SHARING)
 	/*
 	 * Nested exception count to maintain setting of EXC_ACTIVE flag across
-	 * outermost exception.  EXC_ACTIVE is used by _Swap() lazy FP
+	 * outermost exception.  EXC_ACTIVE is used by z_swap() lazy FP
 	 * save/restore and by debug tools.
 	 */
 	unsigned excNestCount; /* nested exception count */
diff --git a/arch/x86/include/swapstk.h b/arch/x86/include/swapstk.h
index a89c87a..c773eb7 100644
--- a/arch/x86/include/swapstk.h
+++ b/arch/x86/include/swapstk.h
@@ -8,12 +8,12 @@
  * @file
  * @brief Stack frame created by swap (IA-32)
  *
- * This file details the stack frame generated by _Swap() when it saves a task
+ * This file details the stack frame generated by z_swap() when it saves a task
  * or thread's context. This is specific to the IA-32 processor architecture.
  *
- * NOTE: _Swap() does not use this file as it uses the push instruction to
+ * NOTE: z_swap() does not use this file as it uses the push instruction to
  * save a context. Changes to the file will not automatically be picked up by
- * _Swap().  Conversely, changes to _Swap() should be mirrored here if the
+ * z_swap().  Conversely, changes to z_swap() should be mirrored here if the
  * stack frame is modified.
  */
 
@@ -33,8 +33,8 @@
 	unsigned int ebx;     /* EBX register */
 	unsigned int esi;     /* ESI register */
 	unsigned int edi;     /* EDI register */
-	unsigned int retAddr; /* Return address of caller of _Swap() */
-	unsigned int param;   /* Parameter passed to _Swap() */
+	unsigned int retAddr; /* Return address of caller of z_swap() */
+	unsigned int param;   /* Parameter passed to z_swap() */
 } tSwapStk;
 
 #endif /* _ASMLANGUAGE */
diff --git a/arch/x86_64/core/x86_64.c b/arch/x86_64/core/x86_64.c
index 9fa4845..312912d 100644
--- a/arch/x86_64/core/x86_64.c
+++ b/arch/x86_64/core/x86_64.c
@@ -15,7 +15,7 @@
 struct NANO_ESF {
 };
 
-void _new_thread(struct k_thread *t, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
 		 size_t sz, k_thread_entry_t entry,
 		 void *p1, void *p2, void *p3,
 		 int prio, unsigned int opts)
@@ -29,7 +29,7 @@
 	_new_thread_init(t, base, sz, prio, opts);
 
 	t->switch_handle = (void *)xuk_setup_stack((long) top,
-						   (void *)_thread_entry,
+						   (void *)z_thread_entry,
 						   eflags, (long *)args,
 						   nargs);
 }
@@ -53,18 +53,18 @@
 	printk("***  R8 0x%llx R9 0x%llx R10 0x%llx R11 0x%llx\n",
 	       f->r8, f->r9, f->r10, f->r11);
 
-	_NanoFatalErrorHandler(x86_64_except_reason, NULL);
+	z_NanoFatalErrorHandler(x86_64_except_reason, NULL);
 }
 
 void _isr_entry(void)
 {
-	_arch_curr_cpu()->nested++;
+	z_arch_curr_cpu()->nested++;
 }
 
 void *_isr_exit_restore_stack(void *interrupted)
 {
-	bool nested = (--_arch_curr_cpu()->nested) > 0;
-	void *next = _get_next_switch_handle(interrupted);
+	bool nested = (--z_arch_curr_cpu()->nested) > 0;
+	void *next = z_get_next_switch_handle(interrupted);
 
 	return (nested || next == interrupted) ? NULL : next;
 }
@@ -76,7 +76,7 @@
 } cpu_init[CONFIG_MP_NUM_CPUS];
 
 /* Called from Zephyr initialization */
-void _arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
+void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
 		     void (*fn)(int, void *), void *arg)
 {
 	cpu_init[cpu_num].arg = arg;
@@ -130,18 +130,18 @@
 		/* The SMP CPU startup function pointers act as init
 		 * flags.  Zero them here because this code is running
 		 * BEFORE .bss is zeroed!  Should probably move that
-		 * out of _Cstart() for this architecture...
+		 * out of z_cstart() for this architecture...
 		 */
 		for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
 			cpu_init[i].fn = 0;
 		}
 
 		/* Enter Zephyr */
-		_Cstart();
+		z_cstart();
 
 	} else if (cpu < CONFIG_MP_NUM_CPUS) {
 		/* SMP initialization.  First spin, waiting for
-		 * _arch_start_cpu() to be called from the main CPU
+		 * z_arch_start_cpu() to be called from the main CPU
 		 */
 		while (!cpu_init[cpu].fn) {
 		}
@@ -157,14 +157,14 @@
 
 /* Returns the initial stack to use for CPU startup on auxiliary (not
  * cpu 0) processors to the xuk layer, which gets selected by the
- * non-arch Zephyr kernel and stashed by _arch_start_cpu()
+ * non-arch Zephyr kernel and stashed by z_arch_start_cpu()
  */
 unsigned int _init_cpu_stack(int cpu)
 {
 	return cpu_init[cpu].esp;
 }
 
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			      void (*routine)(void *parameter), void *parameter,
 			      u32_t flags)
 {
@@ -176,12 +176,12 @@
 	return 0;
 }
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	xuk_set_isr_mask(irq, 1);
 }
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	xuk_set_isr_mask(irq, 0);
 }
@@ -195,13 +195,13 @@
 
 int x86_64_except_reason;
 
-void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
+void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
 {
-	_SysFatalErrorHandler(reason, esf);
+	z_SysFatalErrorHandler(reason, esf);
 }
 
 /* App-overridable handler.  Does nothing here */
-void __weak _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
+void __weak z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
 {
 	ARG_UNUSED(reason);
 	ARG_UNUSED(esf);
diff --git a/arch/x86_64/include/kernel_arch_func.h b/arch/x86_64/include/kernel_arch_func.h
index 0268da3..a3638a7 100644
--- a/arch/x86_64/include/kernel_arch_func.h
+++ b/arch/x86_64/include/kernel_arch_func.h
@@ -12,11 +12,11 @@
 static inline void kernel_arch_init(void)
 {
 	/* This is a noop, we already took care of things before
-	 * _Cstart() is entered
+	 * z_cstart() is entered
 	 */
 }
 
-static inline struct _cpu *_arch_curr_cpu(void)
+static inline struct _cpu *z_arch_curr_cpu(void)
 {
 	long long ret, off = 0;
 
@@ -27,7 +27,7 @@
 	return (struct _cpu *)(long)ret;
 }
 
-static inline unsigned int _arch_irq_lock(void)
+static inline unsigned int z_arch_irq_lock(void)
 {
 	unsigned long long key;
 
@@ -35,7 +35,7 @@
 	return (int)key;
 }
 
-static inline void _arch_irq_unlock(unsigned int key)
+static inline void z_arch_irq_unlock(unsigned int key)
 {
 	if (key & 0x200) {
 		__asm__ volatile("sti");
@@ -47,8 +47,8 @@
 	__asm__ volatile("nop");
 }
 
-void _arch_irq_disable(unsigned int irq);
-void _arch_irq_enable(unsigned int irq);
+void z_arch_irq_disable(unsigned int irq);
+void z_arch_irq_enable(unsigned int irq);
 
 /* Not a standard Zephyr function, but probably will be */
 static inline unsigned long long _arch_k_cycle_get_64(void)
@@ -59,17 +59,17 @@
 	return (((unsigned long long)hi) << 32) | lo;
 }
 
-static inline unsigned int _arch_k_cycle_get_32(void)
+static inline unsigned int z_arch_k_cycle_get_32(void)
 {
 #ifdef CONFIG_HPET_TIMER
-	extern u32_t _timer_cycle_get_32(void);
-	return _timer_cycle_get_32();
+	extern u32_t z_timer_cycle_get_32(void);
+	return z_timer_cycle_get_32();
 #else
 	return (u32_t)_arch_k_cycle_get_64();
 #endif
 }
 
-#define _is_in_isr() (_arch_curr_cpu()->nested != 0)
+#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0)
 
 static inline void _arch_switch(void *switch_to, void **switched_from)
 {
@@ -88,8 +88,8 @@
 
 void x86_apic_set_timeout(u32_t cyc_from_now);
 
-#define _ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) \
-	_arch_irq_connect_dynamic(irq, pri, isr, arg, flags)
+#define Z_ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) \
+	z_arch_irq_connect_dynamic(irq, pri, isr, arg, flags)
 
 extern int x86_64_except_reason;
 
@@ -97,7 +97,7 @@
 /* Vector 5 is the "bounds" exception which is otherwise vestigial
  * (BOUND is an illegal instruction in long mode)
  */
-#define _ARCH_EXCEPT(reason) do {		\
+#define Z_ARCH_EXCEPT(reason) do {		\
 		x86_64_except_reason = reason;	\
 		__asm__ volatile("int $5");	\
 	} while (false)
diff --git a/arch/xtensa/core/crt1.S b/arch/xtensa/core/crt1.S
index 73349d9..c4b2719 100644
--- a/arch/xtensa/core/crt1.S
+++ b/arch/xtensa/core/crt1.S
@@ -17,12 +17,12 @@
  *   __stack			from linker script (see LSP Ref Manual)
  *   _bss_table_start		from linker script (see LSP Ref Manual)
  *   _bss_table_end		from linker script (see LSP Ref Manual)
- *   _Cstart		Entry point into Zephyr C domain
+ *   z_cstart		Entry point into Zephyr C domain
  *   __stack		from linker script (see LSP Ref Manual)
  */
 
 .global __start
-.type	_Cstart, @function
+.type	z_cstart, @function
 
 
 /* Macros to abstract away ABI differences */
@@ -178,7 +178,7 @@
 #endif /* !XCHAL_HAVE_BOOTLOADER */
 
 	/* Enter C domain, never returns from here */
-	CALL	_Cstart
+	CALL	z_cstart
 
 	.size	_start, . - _start
 
diff --git a/arch/xtensa/core/fatal.c b/arch/xtensa/core/fatal.c
index 938c3fc..e206204 100644
--- a/arch/xtensa/core/fatal.c
+++ b/arch/xtensa/core/fatal.c
@@ -37,7 +37,7 @@
  *
  * This routine is called when fatal error conditions are detected by software
  * and is responsible only for reporting the error. Once reported, it then
- * invokes the user provided routine _SysFatalErrorHandler() which is
+ * invokes the user provided routine z_SysFatalErrorHandler() which is
  * responsible for implementing the error handling policy.
  *
  * The caller is expected to always provide a usable ESF. In the event that the
@@ -49,7 +49,7 @@
  *
  * @return This function does not return.
  */
-XTENSA_ERR_NORET void _NanoFatalErrorHandler(unsigned int reason,
+XTENSA_ERR_NORET void z_NanoFatalErrorHandler(unsigned int reason,
 					     const NANO_ESF *pEsf)
 {
 	LOG_PANIC();
@@ -92,7 +92,7 @@
 	 * appropriate to the various errors are something the customer must
 	 * decide.
 	 */
-	_SysFatalErrorHandler(reason, pEsf);
+	z_SysFatalErrorHandler(reason, pEsf);
 }
 
 
@@ -190,7 +190,7 @@
 {
 	printk("*** Unhandled exception ****\n");
 	dump_exc_state();
-	_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
 }
 
 XTENSA_ERR_NORET void ReservedInterruptHandler(unsigned int intNo)
@@ -200,7 +200,7 @@
 	printk("INTENABLE = 0x%x\n"
 	       "INTERRUPT = 0x%x (%d)\n",
 	       get_sreg(INTENABLE), (1 << intNo), intNo);
-	_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
 }
 
 void exit(int return_code)
@@ -239,7 +239,7 @@
  *
  * @return N/A
  */
-XTENSA_ERR_NORET __weak void _SysFatalErrorHandler(unsigned int reason,
+XTENSA_ERR_NORET __weak void z_SysFatalErrorHandler(unsigned int reason,
 						   const NANO_ESF *pEsf)
 {
 	ARG_UNUSED(pEsf);
@@ -253,7 +253,7 @@
 	if (reason == _NANO_ERR_KERNEL_PANIC) {
 		goto hang_system;
 	}
-	if (k_is_in_isr() || _is_thread_essential()) {
+	if (k_is_in_isr() || z_is_thread_essential()) {
 		printk("Fatal fault in %s! Spinning...\n",
 		       k_is_in_isr() ? "ISR" : "essential thread");
 		goto hang_system;
diff --git a/arch/xtensa/core/irq_manage.c b/arch/xtensa/core/irq_manage.c
index e405933..a8b2612 100644
--- a/arch/xtensa/core/irq_manage.c
+++ b/arch/xtensa/core/irq_manage.c
@@ -27,7 +27,7 @@
  * @return N/A
  */
 
-void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
+void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
 {
 	__ASSERT(prio < XCHAL_EXCM_LEVEL + 1,
 		 "invalid priority %d! values must be less than %d\n",
@@ -38,7 +38,7 @@
 }
 
 #ifdef CONFIG_DYNAMIC_INTERRUPTS
-int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			      void (*routine)(void *parameter), void *parameter,
 			      u32_t flags)
 {
diff --git a/arch/xtensa/core/irq_offload.c b/arch/xtensa/core/irq_offload.c
index 6aefcf7..a84a19d 100644
--- a/arch/xtensa/core/irq_offload.c
+++ b/arch/xtensa/core/irq_offload.c
@@ -27,7 +27,7 @@
 {
 	IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL,
 		_irq_do_offload, NULL, 0);
-	_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
+	z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
 	offload_routine = routine;
 	offload_param = parameter;
 	_xt_set_intset(1 << CONFIG_IRQ_OFFLOAD_INTNUM);
@@ -35,5 +35,5 @@
 	 * Enable the software interrupt, in case it is disabled, so that IRQ
 	 * offload is serviced.
 	 */
-	_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
+	z_arch_irq_enable(CONFIG_IRQ_OFFLOAD_INTNUM);
 }
diff --git a/arch/xtensa/core/thread.c b/arch/xtensa/core/thread.c
index 8d91323..a2a64a1 100644
--- a/arch/xtensa/core/thread.c
+++ b/arch/xtensa/core/thread.c
@@ -25,7 +25,7 @@
  * needed anymore.
  *
  * The initial context is a basic stack frame that contains arguments for
- * _thread_entry() return address, that points at _thread_entry()
+ * z_thread_entry() return address, that points at z_thread_entry()
  * and status register.
  *
  * <options> is currently unused.
@@ -43,7 +43,7 @@
  * @return N/A
  */
 
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
 		size_t stackSize, k_thread_entry_t pEntry,
 		void *p1, void *p2, void *p3,
 		int priority, unsigned int options)
@@ -90,7 +90,7 @@
 	/* Explicitly initialize certain saved registers */
 
 	 /* task entrypoint */
-	pInitCtx->pc   = (u32_t)_thread_entry;
+	pInitCtx->pc   = (u32_t)z_thread_entry;
 
 	/* physical top of stack frame */
 	pInitCtx->a1   = (u32_t)pInitCtx + XT_STK_FRMSZ;
diff --git a/arch/xtensa/core/xt_zephyr.S b/arch/xtensa/core/xt_zephyr.S
index 316ed8e..3e6dc21 100644
--- a/arch/xtensa/core/xt_zephyr.S
+++ b/arch/xtensa/core/xt_zephyr.S
@@ -34,7 +34,7 @@
 	bnez a2, .L_frxt_dispatch_stk
 
 .L_frxt_dispatch_sol:
-	/* Solicited stack frame. Restore retval from _Swap */
+	/* Solicited stack frame. Restore retval from z_swap */
 	l32i a2, a3, THREAD_OFFSET(retval)
 	l32i a3, sp, XT_SOL_ps
 
@@ -71,9 +71,9 @@
 #endif
 #ifdef CONFIG_STACK_SENTINEL
 #ifdef __XTENSA_CALL0_ABI__
-	call0 _check_stack_sentinel
+	call0 z_check_stack_sentinel
 #else
-	call4 _check_stack_sentinel
+	call4 z_check_stack_sentinel
 #endif
 #endif
 	/*
@@ -341,10 +341,10 @@
 	*/
 #ifdef __XTENSA_CALL0_ABI__
 	movi a2, XT_TIMER_INTEN
-	call0  _xt_ints_on
+	call0  z_xt_ints_on
 #else
 	movi a6, XT_TIMER_INTEN
-	call4 _xt_ints_on
+	call4 z_xt_ints_on
 #endif
 
 #endif
diff --git a/arch/xtensa/core/xtensa-asm2.c b/arch/xtensa/core/xtensa-asm2.c
index feb2b8e..6809416 100644
--- a/arch/xtensa/core/xtensa-asm2.c
+++ b/arch/xtensa/core/xtensa-asm2.c
@@ -29,10 +29,10 @@
 
 	(void)memset(bsa, 0, bsasz);
 
-	bsa[BSA_PC_OFF/4] = _thread_entry;
+	bsa[BSA_PC_OFF/4] = z_thread_entry;
 	bsa[BSA_PS_OFF/4] = (void *)(PS_WOE | PS_UM | PS_CALLINC(1));
 
-	/* Arguments to _thread_entry().  Remember these start at A6,
+	/* Arguments to z_thread_entry().  Remember these start at A6,
 	 * which will be rotated into A2 by the ENTRY instruction that
 	 * begins the C function.  And A4-A7 and A8-A11 are optional
 	 * quads that live below the BSA!
@@ -59,7 +59,7 @@
  * utilities/testables.
  */
 #ifdef CONFIG_XTENSA_ASM2
-void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
+void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
 		 k_thread_entry_t entry, void *p1, void *p2, void *p3,
 		 int prio, unsigned int opts)
 {
@@ -77,7 +77,7 @@
 #endif
 
 #ifdef CONFIG_XTENSA_ASM2
-void _irq_spurious(void *arg)
+void z_irq_spurious(void *arg)
 {
 	int irqs, ie;
 
@@ -87,7 +87,7 @@
 	__asm__ volatile("rsr.intenable %0" : "=r"(ie));
 	printk(" ** Spurious INTERRUPT(s) %p, INTENABLE = %p\n",
 	       (void *)irqs, (void *)ie);
-	_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
+	z_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
 }
 #endif
 
@@ -143,7 +143,7 @@
 		irqs ^= m;					\
 		__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
 	}							\
-	return _get_next_switch_handle(interrupted_stack);		\
+	return z_get_next_switch_handle(interrupted_stack);		\
 }
 
 DEF_INT_C_HANDLER(2)
@@ -191,7 +191,7 @@
 		 */
 		printk(" ** FATAL EXCEPTION\n");
 		printk(" ** CPU %d EXCCAUSE %d PS %p PC %p VADDR %p\n",
-		       _arch_curr_cpu()->id, cause, (void *)bsa[BSA_PS_OFF/4],
+		       z_arch_curr_cpu()->id, cause, (void *)bsa[BSA_PS_OFF/4],
 		       (void *)bsa[BSA_PC_OFF/4], (void *)vaddr);
 
 		dump_stack(interrupted_stack);
@@ -201,9 +201,9 @@
 		 * as these are software errors.  Should clean this
 		 * up.
 		 */
-		_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
+		z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
 	}
 
-	return _get_next_switch_handle(interrupted_stack);
+	return z_get_next_switch_handle(interrupted_stack);
 }
 
diff --git a/arch/xtensa/core/xtensa_intr.c b/arch/xtensa/core/xtensa_intr.c
index 88fdf85..121a8d8 100644
--- a/arch/xtensa/core/xtensa_intr.c
+++ b/arch/xtensa/core/xtensa_intr.c
@@ -31,7 +31,7 @@
 #endif
 
 #if defined(CONFIG_SW_ISR_TABLE) && defined(XCHAL_HAVE_INTERRUPTS)
-void _irq_spurious(void *arg)
+void z_irq_spurious(void *arg)
 {
 	ReservedInterruptHandler((unsigned int)arg);
 	CODE_UNREACHABLE;
diff --git a/arch/xtensa/core/xtensa_intr_asm.S b/arch/xtensa/core/xtensa_intr_asm.S
index 091092c..9099bac 100644
--- a/arch/xtensa/core/xtensa_intr_asm.S
+++ b/arch/xtensa/core/xtensa_intr_asm.S
@@ -41,7 +41,7 @@
 
 /*
 -------------------------------------------------------------------------------
-  unsigned int _xt_ints_on ( unsigned int mask )
+  unsigned int z_xt_ints_on ( unsigned int mask )
 
   Enables a set of interrupts. Does not simply set INTENABLE directly, but
   computes it as a function of the current virtual priority.
@@ -51,10 +51,10 @@
 
 	.text
 	.align  4
-	.global _xt_ints_on
-	.type   _xt_ints_on,@function
+	.global z_xt_ints_on
+	.type   z_xt_ints_on,@function
 
-_xt_ints_on:
+z_xt_ints_on:
 
 	ENTRY0
 #if XCHAL_HAVE_INTERRUPTS
@@ -74,12 +74,12 @@
 #endif
 	RET0
 
-	.size   _xt_ints_on, . - _xt_ints_on
+	.size   z_xt_ints_on, . - z_xt_ints_on
 
 
 /*
 -------------------------------------------------------------------------------
-  unsigned int _xt_ints_off ( unsigned int mask )
+  unsigned int z_xt_ints_off ( unsigned int mask )
 
   Disables a set of interrupts. Does not simply set INTENABLE directly,
   but computes it as a function of the current virtual priority.
@@ -89,10 +89,10 @@
 
 	.text
 	.align  4
-	.global _xt_ints_off
-	.type   _xt_ints_off,@function
+	.global z_xt_ints_off
+	.type   z_xt_ints_off,@function
 
-_xt_ints_off:
+z_xt_ints_off:
 
 	ENTRY0
 #if XCHAL_HAVE_INTERRUPTS
@@ -113,6 +113,6 @@
 #endif
 	RET0
 
-	.size   _xt_ints_off, . - _xt_ints_off
+	.size   z_xt_ints_off, . - z_xt_ints_off
 
 
diff --git a/arch/xtensa/core/xtensa_vectors.S b/arch/xtensa/core/xtensa_vectors.S
index 8c17c32..c996008 100644
--- a/arch/xtensa/core/xtensa_vectors.S
+++ b/arch/xtensa/core/xtensa_vectors.S
@@ -139,7 +139,7 @@
  * mask  -- interrupt bitmask for this level
  */
     .extern _kernel
-    .extern _sys_power_save_idle_exit
+    .extern z_sys_power_save_idle_exit
 
     .macro  dispatch_c_isr    level  mask
 
@@ -203,14 +203,14 @@
     beqz    a2, 10f
     xor     a4, a2, a2
     s32i    a4, a3, _kernel_offset_to_idle
-    call0   _sys_power_save_idle_exit
+    call0   z_sys_power_save_idle_exit
     mov     a2, a12
 #else
     l32i    a6, a3, _kernel_offset_to_idle
     beqz    a6, 10f
     xor     a4, a6, a6
     s32i    a4, a3, _kernel_offset_to_idle
-    call4   _sys_power_save_idle_exit
+    call4   z_sys_power_save_idle_exit
 #endif /* __XTENSA_CALL0_ABI__ */
 10:
 #endif /* CONFIG_SYS_POWER_MANAGEMENT */
diff --git a/arch/xtensa/include/kernel_arch_func.h b/arch/xtensa/include/kernel_arch_func.h
index 20f07de..2b25787 100644
--- a/arch/xtensa/include/kernel_arch_func.h
+++ b/arch/xtensa/include/kernel_arch_func.h
@@ -40,7 +40,7 @@
 
 extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
 
-static ALWAYS_INLINE _cpu_t *_arch_curr_cpu(void)
+static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void)
 {
 #ifdef CONFIG_XTENSA_ASM2
 	void *val;
@@ -110,7 +110,7 @@
  */
 #if !CONFIG_USE_SWITCH
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->callee_saved.retval = value;
 }
@@ -124,7 +124,7 @@
 }
 #endif
 
-#define _is_in_isr() (_arch_curr_cpu()->nested != 0U)
+#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0U)
 
 #endif /* _ASMLANGUAGE */
 
diff --git a/arch/xtensa/include/xtensa_api.h b/arch/xtensa/include/xtensa_api.h
index fbe16c5..16ca369 100644
--- a/arch/xtensa/include/xtensa_api.h
+++ b/arch/xtensa/include/xtensa_api.h
@@ -16,7 +16,7 @@
  * mask     - Bit mask of interrupts to be enabled.
  */
 #if CONFIG_XTENSA_ASM2
-static inline void _xt_ints_on(unsigned int mask)
+static inline void z_xt_ints_on(unsigned int mask)
 {
 	int val;
 
@@ -25,7 +25,7 @@
 	__asm__ volatile("wsr.intenable %0; rsync" : : "r"(val));
 }
 #else
-extern void _xt_ints_on(unsigned int mask);
+extern void z_xt_ints_on(unsigned int mask);
 #endif
 
 
@@ -35,7 +35,7 @@
  * mask     - Bit mask of interrupts to be disabled.
  */
 #if CONFIG_XTENSA_ASM2
-static inline void _xt_ints_off(unsigned int mask)
+static inline void z_xt_ints_off(unsigned int mask)
 {
 	int val;
 
@@ -44,7 +44,7 @@
 	__asm__ volatile("wsr.intenable %0; rsync" : : "r"(val));
 }
 #else
-extern void _xt_ints_off(unsigned int mask);
+extern void z_xt_ints_off(unsigned int mask);
 #endif
 
 /*
diff --git a/boards/posix/native_posix/board_irq.h b/boards/posix/native_posix/board_irq.h
index f504fc2..16e09de 100644
--- a/boards/posix/native_posix/board_irq.h
+++ b/boards/posix/native_posix/board_irq.h
@@ -17,7 +17,7 @@
 
 void _isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
 		void *isr_param_p);
-void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
+void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
 
 /**
  * Configure a static interrupt.
@@ -30,10 +30,10 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
 	_isr_declare(irq_p, 0, isr_p, isr_param_p); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
@@ -43,10 +43,10 @@
  *
  * See include/irq.h for details.
  */
-#define _ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
+#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
 ({ \
 	_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, NULL); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
@@ -62,7 +62,7 @@
  * All pre/post irq work of the interrupt is handled in the board
  * posix_irq_handler() both for direct and normal interrupts together
  */
-#define _ARCH_ISR_DIRECT_DECLARE(name) \
+#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
 	static inline int name##_body(void); \
 	int name(void) \
 	{ \
@@ -72,14 +72,14 @@
 	} \
 	static inline int name##_body(void)
 
-#define _ARCH_ISR_DIRECT_HEADER()   do { } while (0)
-#define _ARCH_ISR_DIRECT_FOOTER(a)  do { } while (0)
+#define Z_ARCH_ISR_DIRECT_HEADER()   do { } while (0)
+#define Z_ARCH_ISR_DIRECT_FOOTER(a)  do { } while (0)
 
 #ifdef CONFIG_SYS_POWER_MANAGEMENT
 extern void posix_irq_check_idle_exit(void);
-#define _ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
+#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
 #else
-#define _ARCH_ISR_DIRECT_PM() do { } while (0)
+#define Z_ARCH_ISR_DIRECT_PM() do { } while (0)
 #endif
 
 #ifdef __cplusplus
diff --git a/boards/posix/native_posix/irq_handler.c b/boards/posix/native_posix/irq_handler.c
index f19c9baae..cb0691f 100644
--- a/boards/posix/native_posix/irq_handler.c
+++ b/boards/posix/native_posix/irq_handler.c
@@ -32,9 +32,9 @@
 {
 	/*
 	 * As in this architecture an irq (code) executes in 0 time,
-	 * it is a bit senseless to call _int_latency_start/stop()
+	 * it is a bit senseless to call z_int_latency_start/stop()
 	 */
-	/* _int_latency_start(); */
+	/* z_int_latency_start(); */
 
 	sys_trace_isr_enter();
 
@@ -59,7 +59,7 @@
 	}
 
 	sys_trace_isr_exit();
-	/* _int_latency_stop(); */
+	/* z_int_latency_stop(); */
 }
 
 /**
@@ -114,7 +114,7 @@
 		&& (hw_irq_ctrl_get_cur_prio() == 256)
 		&& (_kernel.ready_q.cache != _current)) {
 
-		(void)_Swap_irqlock(irq_lock);
+		(void)z_swap_irqlock(irq_lock);
 	}
 }
 
@@ -178,7 +178,7 @@
 	return hw_irq_ctrl_change_lock(true);
 }
 
-unsigned int _arch_irq_lock(void)
+unsigned int z_arch_irq_lock(void)
 {
 	return posix_irq_lock();
 }
@@ -201,7 +201,7 @@
 	hw_irq_ctrl_change_lock(key);
 }
 
-void _arch_irq_unlock(unsigned int key)
+void z_arch_irq_unlock(unsigned int key)
 {
 	posix_irq_unlock(key);
 }
@@ -212,17 +212,17 @@
 	hw_irq_ctrl_change_lock(false);
 }
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	hw_irq_ctrl_enable_irq(irq);
 }
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	hw_irq_ctrl_disable_irq(irq);
 }
 
-int _arch_irq_is_enabled(unsigned int irq)
+int z_arch_irq_is_enabled(unsigned int irq)
 {
 	return hw_irq_ctrl_is_irq_enabled(irq);
 }
@@ -265,7 +265,7 @@
  *
  * @return N/A
  */
-void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
+void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
 {
 	hw_irq_ctrl_prio_set(irq, prio);
 }
@@ -317,7 +317,7 @@
 	off_routine = routine;
 	off_parameter = parameter;
 	_isr_declare(OFFLOAD_SW_IRQ, 0, offload_sw_irq_handler, NULL);
-	_arch_irq_enable(OFFLOAD_SW_IRQ);
+	z_arch_irq_enable(OFFLOAD_SW_IRQ);
 	posix_sw_set_pending_IRQ(OFFLOAD_SW_IRQ);
-	_arch_irq_disable(OFFLOAD_SW_IRQ);
+	z_arch_irq_disable(OFFLOAD_SW_IRQ);
 }
diff --git a/boards/posix/nrf52_bsim/board_irq.h b/boards/posix/nrf52_bsim/board_irq.h
index f504fc2..16e09de 100644
--- a/boards/posix/nrf52_bsim/board_irq.h
+++ b/boards/posix/nrf52_bsim/board_irq.h
@@ -17,7 +17,7 @@
 
 void _isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
 		void *isr_param_p);
-void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
+void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags);
 
 /**
  * Configure a static interrupt.
@@ -30,10 +30,10 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
 	_isr_declare(irq_p, 0, isr_p, isr_param_p); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
@@ -43,10 +43,10 @@
  *
  * See include/irq.h for details.
  */
-#define _ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
+#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
 ({ \
 	_isr_declare(irq_p, ISR_FLAG_DIRECT, (void (*)(void *))isr_p, NULL); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
@@ -62,7 +62,7 @@
  * All pre/post irq work of the interrupt is handled in the board
  * posix_irq_handler() both for direct and normal interrupts together
  */
-#define _ARCH_ISR_DIRECT_DECLARE(name) \
+#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
 	static inline int name##_body(void); \
 	int name(void) \
 	{ \
@@ -72,14 +72,14 @@
 	} \
 	static inline int name##_body(void)
 
-#define _ARCH_ISR_DIRECT_HEADER()   do { } while (0)
-#define _ARCH_ISR_DIRECT_FOOTER(a)  do { } while (0)
+#define Z_ARCH_ISR_DIRECT_HEADER()   do { } while (0)
+#define Z_ARCH_ISR_DIRECT_FOOTER(a)  do { } while (0)
 
 #ifdef CONFIG_SYS_POWER_MANAGEMENT
 extern void posix_irq_check_idle_exit(void);
-#define _ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
+#define Z_ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
 #else
-#define _ARCH_ISR_DIRECT_PM() do { } while (0)
+#define Z_ARCH_ISR_DIRECT_PM() do { } while (0)
 #endif
 
 #ifdef __cplusplus
diff --git a/boards/posix/nrf52_bsim/irq_handler.c b/boards/posix/nrf52_bsim/irq_handler.c
index 32e4fe8..c2fbc0d 100644
--- a/boards/posix/nrf52_bsim/irq_handler.c
+++ b/boards/posix/nrf52_bsim/irq_handler.c
@@ -87,9 +87,9 @@
 
 	/*
 	 * As in this architecture an irq (code) executes in 0 time,
-	 * it is a bit senseless to call _int_latency_start/stop()
+	 * it is a bit senseless to call z_int_latency_start/stop()
 	 */
-	/* _int_latency_start(); */
+	/* z_int_latency_start(); */
 	sys_trace_isr_enter();
 
 	if (irq_vector_table[irq_nbr].func == NULL) { /* LCOV_EXCL_BR_LINE */
@@ -113,7 +113,7 @@
 	}
 
 	sys_trace_isr_exit();
-	/* _int_latency_stop(); */
+	/* z_int_latency_stop(); */
 
 	bs_trace_raw_time(7, "Irq %i (%s) ended\n", irq_nbr, irqnames[irq_nbr]);
 }
@@ -172,7 +172,7 @@
 		&& (CPU_will_be_awaken_from_WFE == false)
 		&& (_kernel.ready_q.cache != _current)) {
 
-		_Swap_irqlock(irq_lock);
+		z_swap_irqlock(irq_lock);
 	}
 }
 
@@ -236,7 +236,7 @@
 	return hw_irq_ctrl_change_lock(true);
 }
 
-unsigned int _arch_irq_lock(void)
+unsigned int z_arch_irq_lock(void)
 {
 	return posix_irq_lock();
 }
@@ -259,7 +259,7 @@
 	hw_irq_ctrl_change_lock(key);
 }
 
-void _arch_irq_unlock(unsigned int key)
+void z_arch_irq_unlock(unsigned int key)
 {
 	posix_irq_unlock(key);
 }
@@ -270,22 +270,22 @@
 	hw_irq_ctrl_change_lock(false);
 }
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	hw_irq_ctrl_enable_irq(irq);
 }
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	hw_irq_ctrl_disable_irq(irq);
 }
 
-int _arch_irq_is_enabled(unsigned int irq)
+int z_arch_irq_is_enabled(unsigned int irq)
 {
 	return hw_irq_ctrl_is_irq_enabled(irq);
 }
 
-void _arch_isr_direct_header(void)
+void z_arch_isr_direct_header(void)
 {
 	/* Nothing to be done */
 }
@@ -328,7 +328,7 @@
  *
  * @return N/A
  */
-void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
+void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
 {
 	hw_irq_ctrl_prio_set(irq, prio);
 }
@@ -380,9 +380,9 @@
 	off_routine = routine;
 	off_parameter = parameter;
 	_isr_declare(OFFLOAD_SW_IRQ, 0, offload_sw_irq_handler, NULL);
-	_arch_irq_enable(OFFLOAD_SW_IRQ);
+	z_arch_irq_enable(OFFLOAD_SW_IRQ);
 	posix_sw_set_pending_IRQ(OFFLOAD_SW_IRQ);
-	_arch_irq_disable(OFFLOAD_SW_IRQ);
+	z_arch_irq_disable(OFFLOAD_SW_IRQ);
 }
 
 /**
diff --git a/drivers/aio/aio_comparator_handlers.c b/drivers/aio/aio_comparator_handlers.c
index 8026b7b..fe14ca8 100644
--- a/drivers/aio/aio_comparator_handlers.c
+++ b/drivers/aio/aio_comparator_handlers.c
@@ -10,11 +10,11 @@
 Z_SYSCALL_HANDLER(aio_cmp_disable, dev, index)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_AIO_CMP(dev, disable));
-	return _impl_aio_cmp_disable((struct device *)dev, index);
+	return z_impl_aio_cmp_disable((struct device *)dev, index);
 }
 
 Z_SYSCALL_HANDLER(aio_cmp_get_pending_int, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_AIO_CMP(dev, get_pending_int));
-	return _impl_aio_get_pending_int((struct device *)dev, index);
+	return z_impl_aio_get_pending_int((struct device *)dev, index);
 }
diff --git a/drivers/can/can_handlers.c b/drivers/can/can_handlers.c
index 03b8872..cf73367 100644
--- a/drivers/can/can_handlers.c
+++ b/drivers/can/can_handlers.c
@@ -11,7 +11,7 @@
 
 	Z_OOPS(Z_SYSCALL_DRIVER_CAN(dev, configure));
 
-	return _impl_can_configure((struct device *)dev, (enum can_mode)mode,
+	return z_impl_can_configure((struct device *)dev, (enum can_mode)mode,
 				   (u32_t)bitrate);
 }
 
@@ -26,7 +26,7 @@
 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(callback_isr == 0,
 				    "callbacks may not be set from user mode"));
 
-	return _impl_can_send((struct device *)dev,
+	return z_impl_can_send((struct device *)dev,
 			      (const struct zcan_frame *)msg,
 			      (s32_t)timeout, (can_tx_callback_t) callback_isr);
 }
@@ -39,7 +39,7 @@
 				     sizeof(struct zcan_filter)));
 	Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
 
-	return _impl_can_attach_msgq((struct device *)dev,
+	return z_impl_can_attach_msgq((struct device *)dev,
 				     (struct k_msgq *)msgq,
 				     (const struct zcan_filter *) filter);
 }
@@ -48,7 +48,7 @@
 
 	Z_OOPS(Z_SYSCALL_DRIVER_CAN(dev, detach));
 
-	_impl_can_detach((struct device *)dev, (int)filter_id);
+	z_impl_can_detach((struct device *)dev, (int)filter_id);
 
 	return 0;
 }
diff --git a/drivers/counter/counter_handlers.c b/drivers/counter/counter_handlers.c
index 4d4c6cf..8dbda1c 100644
--- a/drivers/counter/counter_handlers.c
+++ b/drivers/counter/counter_handlers.c
@@ -14,7 +14,7 @@
 	Z_SYSCALL_HANDLER(counter_ ## name, dev) \
 	{ \
 		Z_OOPS(Z_SYSCALL_DRIVER_COUNTER(dev, name)); \
-		return _impl_counter_ ## name((struct device *)dev); \
+		return z_impl_counter_ ## name((struct device *)dev); \
 	}
 
 COUNTER_HANDLER(get_pending_int)
diff --git a/drivers/dma/dma_handlers.c b/drivers/dma/dma_handlers.c
index a85dab5..4fed46c 100644
--- a/drivers/dma/dma_handlers.c
+++ b/drivers/dma/dma_handlers.c
@@ -14,12 +14,12 @@
 Z_SYSCALL_HANDLER(dma_start, dev, channel)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_DMA(dev, start));
-	return _impl_dma_start((struct device *)dev, channel);
+	return z_impl_dma_start((struct device *)dev, channel);
 }
 
 Z_SYSCALL_HANDLER(dma_stop, dev, channel)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_DMA(dev, stop));
-	return _impl_dma_stop((struct device *)dev, channel);
+	return z_impl_dma_stop((struct device *)dev, channel);
 }
 
diff --git a/drivers/entropy/entropy_handlers.c b/drivers/entropy/entropy_handlers.c
index b22cecc..81946be 100644
--- a/drivers/entropy/entropy_handlers.c
+++ b/drivers/entropy/entropy_handlers.c
@@ -11,6 +11,6 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_ENTROPY(dev, get_entropy));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(buffer, len));
-	return _impl_entropy_get_entropy((struct device *)dev, (u8_t *)buffer,
+	return z_impl_entropy_get_entropy((struct device *)dev, (u8_t *)buffer,
 					 len);
 }
diff --git a/drivers/flash/flash_handlers.c b/drivers/flash/flash_handlers.c
index b0c7ec7..206e867 100644
--- a/drivers/flash/flash_handlers.c
+++ b/drivers/flash/flash_handlers.c
@@ -11,7 +11,7 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_FLASH(dev, read));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, len));
-	return _impl_flash_read((struct device *)dev, offset, (void *)data,
+	return z_impl_flash_read((struct device *)dev, offset, (void *)data,
 				len);
 }
 
@@ -19,14 +19,14 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_FLASH(dev, write));
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(data, len));
-	return _impl_flash_write((struct device *)dev, offset,
+	return z_impl_flash_write((struct device *)dev, offset,
 				 (const void *)data, len);
 }
 
 Z_SYSCALL_HANDLER(flash_write_protection_set, dev, enable)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_FLASH(dev, write_protection));
-	return _impl_flash_write_protection_set((struct device *)dev, enable);
+	return z_impl_flash_write_protection_set((struct device *)dev, enable);
 }
 
 Z_SYSCALL_HANDLER1_SIMPLE(flash_get_write_block_size, K_OBJ_DRIVER_FLASH,
@@ -37,7 +37,7 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_FLASH(dev, page_layout));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(info, sizeof(struct flash_pages_info)));
-	return _impl_flash_get_page_info_by_offs((struct device *)dev, offs,
+	return z_impl_flash_get_page_info_by_offs((struct device *)dev, offs,
 					(struct flash_pages_info *)info);
 }
 
@@ -45,13 +45,13 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_FLASH(dev, page_layout));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(info, sizeof(struct flash_pages_info)));
-	return _impl_flash_get_page_info_by_idx((struct device *)dev, idx,
+	return z_impl_flash_get_page_info_by_idx((struct device *)dev, idx,
 					(struct flash_pages_info *)info);
 }
 
 Z_SYSCALL_HANDLER(flash_get_page_count, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_FLASH(dev, page_layout));
-	return _impl_flash_get_page_count((struct device *)dev);
+	return z_impl_flash_get_page_count((struct device *)dev);
 }
 #endif
diff --git a/drivers/flash/flash_page_layout.c b/drivers/flash/flash_page_layout.c
index 11a95a8..29eaae9 100644
--- a/drivers/flash/flash_page_layout.c
+++ b/drivers/flash/flash_page_layout.c
@@ -52,19 +52,19 @@
 	return -EINVAL; /* page of the index doesn't exist */
 }
 
-int _impl_flash_get_page_info_by_offs(struct device *dev, off_t offs,
+int z_impl_flash_get_page_info_by_offs(struct device *dev, off_t offs,
 				      struct flash_pages_info *info)
 {
 	return _flash_get_page_info(dev, offs, true, info);
 }
 
-int _impl_flash_get_page_info_by_idx(struct device *dev, u32_t page_index,
+int z_impl_flash_get_page_info_by_idx(struct device *dev, u32_t page_index,
 				     struct flash_pages_info *info)
 {
 	return _flash_get_page_info(dev, page_index, false, info);
 }
 
-size_t _impl_flash_get_page_count(struct device *dev)
+size_t z_impl_flash_get_page_count(struct device *dev)
 {
 	const struct flash_driver_api *api = dev->driver_api;
 	const struct flash_pages_layout *layout;
diff --git a/drivers/gpio/gpio_handlers.c b/drivers/gpio/gpio_handlers.c
index 8c3c7d2..cad773f 100644
--- a/drivers/gpio/gpio_handlers.c
+++ b/drivers/gpio/gpio_handlers.c
@@ -10,36 +10,36 @@
 Z_SYSCALL_HANDLER(gpio_config, port, access_op, pin, flags)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_GPIO(port, config));
-	return _impl_gpio_config((struct device *)port, access_op, pin, flags);
+	return z_impl_gpio_config((struct device *)port, access_op, pin, flags);
 }
 
 Z_SYSCALL_HANDLER(gpio_write, port, access_op, pin, value)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_GPIO(port, write));
-	return _impl_gpio_write((struct device *)port, access_op, pin, value);
+	return z_impl_gpio_write((struct device *)port, access_op, pin, value);
 }
 
 Z_SYSCALL_HANDLER(gpio_read, port, access_op, pin, value)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_GPIO(port, read));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(value, sizeof(u32_t)));
-	return _impl_gpio_read((struct device *)port, access_op, pin,
+	return z_impl_gpio_read((struct device *)port, access_op, pin,
 			       (u32_t *)value);
 }
 
 Z_SYSCALL_HANDLER(gpio_enable_callback, port, access_op, pin)
 {
-	return _impl_gpio_enable_callback((struct device *)port, access_op,
+	return z_impl_gpio_enable_callback((struct device *)port, access_op,
 					  pin);
 }
 
 Z_SYSCALL_HANDLER(gpio_disable_callback, port, access_op, pin)
 {
-	return _impl_gpio_disable_callback((struct device *)port, access_op,
+	return z_impl_gpio_disable_callback((struct device *)port, access_op,
 					   pin);
 }
 
 Z_SYSCALL_HANDLER(gpio_get_pending_int, port)
 {
-	return _impl_gpio_get_pending_int((struct device *)port);
+	return z_impl_gpio_get_pending_int((struct device *)port);
 }
diff --git a/drivers/hwinfo/hwinfo_handlers.c b/drivers/hwinfo/hwinfo_handlers.c
index 8dc00ad..7570e30 100644
--- a/drivers/hwinfo/hwinfo_handlers.c
+++ b/drivers/hwinfo/hwinfo_handlers.c
@@ -11,5 +11,5 @@
 
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(buffer, length));
 
-	return _impl_hwinfo_get_device_id((u8_t *)buffer, (size_t)length);
+	return z_impl_hwinfo_get_device_id((u8_t *)buffer, (size_t)length);
 }
diff --git a/drivers/hwinfo/hwinfo_imxrt.c b/drivers/hwinfo/hwinfo_imxrt.c
index 2d46abc..b2ebdc9 100644
--- a/drivers/hwinfo/hwinfo_imxrt.c
+++ b/drivers/hwinfo/hwinfo_imxrt.c
@@ -12,7 +12,7 @@
 	u32_t id[2];
 };
 
-ssize_t _impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
+ssize_t z_impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
 {
 	struct imxrt_uid dev_id;
 
diff --git a/drivers/hwinfo/hwinfo_mcux_sim.c b/drivers/hwinfo/hwinfo_mcux_sim.c
index cbabf6c..c775b3c 100644
--- a/drivers/hwinfo/hwinfo_mcux_sim.c
+++ b/drivers/hwinfo/hwinfo_mcux_sim.c
@@ -28,7 +28,7 @@
 	u32_t id[HWINFO_DEVICE_ID_LENGTH_TOTAL];
 };
 
-ssize_t _impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
+ssize_t z_impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
 {
 	struct kinetis_uid dev_id;
 
diff --git a/drivers/hwinfo/hwinfo_nrf.c b/drivers/hwinfo/hwinfo_nrf.c
index 75c73ae..743f78c 100644
--- a/drivers/hwinfo/hwinfo_nrf.c
+++ b/drivers/hwinfo/hwinfo_nrf.c
@@ -12,7 +12,7 @@
 	u32_t id[2];
 };
 
-ssize_t _impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
+ssize_t z_impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
 {
 	struct nrf_uid dev_id;
 
diff --git a/drivers/hwinfo/hwinfo_stm32.c b/drivers/hwinfo/hwinfo_stm32.c
index d175bf2..c7a7f55 100644
--- a/drivers/hwinfo/hwinfo_stm32.c
+++ b/drivers/hwinfo/hwinfo_stm32.c
@@ -12,7 +12,7 @@
 	u32_t id[3];
 };
 
-ssize_t _impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
+ssize_t z_impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
 {
 	struct stm32_uid dev_id;
 
diff --git a/drivers/hwinfo/hwinfo_weak_impl.c b/drivers/hwinfo/hwinfo_weak_impl.c
index 053c165..cbbde9e 100644
--- a/drivers/hwinfo/hwinfo_weak_impl.c
+++ b/drivers/hwinfo/hwinfo_weak_impl.c
@@ -6,7 +6,7 @@
 
 #include <hwinfo.h>
 
-ssize_t __weak _impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
+ssize_t __weak z_impl_hwinfo_get_device_id(u8_t *buffer, size_t length)
 {
 	return -ENOTSUP;
 }
diff --git a/drivers/i2c/i2c_handlers.c b/drivers/i2c/i2c_handlers.c
index d431622..d754a26 100644
--- a/drivers/i2c/i2c_handlers.c
+++ b/drivers/i2c/i2c_handlers.c
@@ -11,7 +11,7 @@
 Z_SYSCALL_HANDLER(i2c_configure, dev, dev_config)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_I2C(dev, configure));
-	return _impl_i2c_configure((struct device *)dev, dev_config);
+	return z_impl_i2c_configure((struct device *)dev, dev_config);
 }
 
 static u32_t copy_msgs_and_transfer(struct device *dev,
@@ -34,7 +34,7 @@
 					copy[i].flags & I2C_MSG_READ));
 	}
 
-	return _impl_i2c_transfer(dev, copy, num_msgs, addr);
+	return z_impl_i2c_transfer(dev, copy, num_msgs, addr);
 }
 
 Z_SYSCALL_HANDLER(i2c_transfer, dev, msgs, num_msgs, addr)
diff --git a/drivers/i2s/i2s_common.c b/drivers/i2s/i2s_common.c
index c2100cf..c140311 100644
--- a/drivers/i2s/i2s_common.c
+++ b/drivers/i2s/i2s_common.c
@@ -8,7 +8,7 @@
 #include <string.h>
 #include <i2s.h>
 
-int _impl_i2s_buf_read(struct device *dev, void *buf, size_t *size)
+int z_impl_i2s_buf_read(struct device *dev, void *buf, size_t *size)
 {
 	void *mem_block;
 	int ret;
@@ -26,7 +26,7 @@
 	return ret;
 }
 
-int _impl_i2s_buf_write(struct device *dev, void *buf, size_t size)
+int z_impl_i2s_buf_write(struct device *dev, void *buf, size_t size)
 {
 	int ret;
 	struct i2s_config *tx_cfg;
diff --git a/drivers/i2s/i2s_handlers.c b/drivers/i2s/i2s_handlers.c
index 1fb3fbe..9776f05 100644
--- a/drivers/i2s/i2s_handlers.c
+++ b/drivers/i2s/i2s_handlers.c
@@ -35,7 +35,7 @@
 		goto out;
 	}
 
-	ret = _impl_i2s_configure((struct device *)dev, dir, &config);
+	ret = z_impl_i2s_configure((struct device *)dev, dir, &config);
 out:
 	return ret;
 }
@@ -110,5 +110,5 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_I2S(dev, trigger));
 
-	return _impl_i2s_trigger((struct device *)dev, dir, cmd);
+	return z_impl_i2s_trigger((struct device *)dev, dir, cmd);
 }
diff --git a/drivers/interrupt_controller/arcv2_irq_unit.c b/drivers/interrupt_controller/arcv2_irq_unit.c
index 1382ede..ca5e999 100644
--- a/drivers/interrupt_controller/arcv2_irq_unit.c
+++ b/drivers/interrupt_controller/arcv2_irq_unit.c
@@ -71,38 +71,38 @@
 	 * values in this loop.
 	 */
 	for (irq = 16; irq < CONFIG_NUM_IRQS; irq++) {
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
 #ifdef CONFIG_ARC_HAS_SECURE
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
 			 (CONFIG_NUM_IRQ_PRIO_LEVELS-1) |
 			 _ARC_V2_IRQ_PRIORITY_SECURE); /* lowest priority */
 #else
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
 			 (CONFIG_NUM_IRQ_PRIO_LEVELS-1)); /* lowest priority */
 #endif
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, _ARC_V2_INT_DISABLE);
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, _ARC_V2_INT_LEVEL);
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, _ARC_V2_INT_DISABLE);
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, _ARC_V2_INT_LEVEL);
 	}
 
 	return 0;
 }
 
-void _arc_v2_irq_unit_int_eoi(int irq)
+void z_arc_v2_irq_unit_int_eoi(int irq)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_PULSE_CANCEL, 1);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PULSE_CANCEL, 1);
 }
 
-void _arc_v2_irq_unit_trigger_set(int irq, unsigned int trigger)
+void z_arc_v2_irq_unit_trigger_set(int irq, unsigned int trigger)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, trigger);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, trigger);
 }
 
-unsigned int _arc_v2_irq_unit_trigger_get(int irq)
+unsigned int z_arc_v2_irq_unit_trigger_get(int irq)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
-	return _arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+	return z_arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER);
 }
 
 #ifdef CONFIG_DEVICE_POWER_MANAGEMENT
@@ -118,17 +118,17 @@
 	 * values in this loop.
 	 */
 	for (irq = 16U; irq < CONFIG_NUM_IRQS; irq++) {
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
 		ctx.irq_config[irq - 16] =
-			_arc_v2_aux_reg_read(_ARC_V2_IRQ_PRIORITY) << 2;
+			z_arc_v2_aux_reg_read(_ARC_V2_IRQ_PRIORITY) << 2;
 		ctx.irq_config[irq - 16] |=
-			_arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER) << 1;
+			z_arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER) << 1;
 		ctx.irq_config[irq - 16] |=
-			_arc_v2_aux_reg_read(_ARC_V2_IRQ_ENABLE);
+			z_arc_v2_aux_reg_read(_ARC_V2_IRQ_ENABLE);
 	}
 
-	ctx.irq_ctrl = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_CTRL);
-	ctx.irq_vect_base = _arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
+	ctx.irq_ctrl = z_arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_CTRL);
+	ctx.irq_vect_base = z_arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
 
 	_arc_v2_irq_unit_device_power_state = DEVICE_PM_SUSPEND_STATE;
 
@@ -147,25 +147,25 @@
 	 * values in this loop.
 	 */
 	for (irq = 16U; irq < CONFIG_NUM_IRQS; irq++) {
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
 #ifdef CONFIG_ARC_HAS_SECURE
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
 				ctx.irq_config[irq - 16] >> 2 |
 				_ARC_V2_IRQ_PRIORITY_SECURE);
 #else
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY,
 				ctx.irq_config[irq - 16] >> 2);
 #endif
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER,
 				(ctx.irq_config[irq - 16] >> 1) & BIT(0));
-		_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE,
+		z_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE,
 				ctx.irq_config[irq - 16] & BIT(0));
 	}
 
-	_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, ctx.irq_ctrl);
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE, ctx.irq_vect_base);
+	z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, ctx.irq_ctrl);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE, ctx.irq_vect_base);
 
-	status32 = _arc_v2_aux_reg_read(_ARC_V2_STATUS32);
+	status32 = z_arc_v2_aux_reg_read(_ARC_V2_STATUS32);
 	status32 |= _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
 
 	__builtin_arc_kflag(status32);
diff --git a/drivers/interrupt_controller/ioapic_intr.c b/drivers/interrupt_controller/ioapic_intr.c
index 8117f4a..ea9e99c 100644
--- a/drivers/interrupt_controller/ioapic_intr.c
+++ b/drivers/interrupt_controller/ioapic_intr.c
@@ -133,7 +133,7 @@
  *
  * @return N/A
  */
-void _ioapic_irq_enable(unsigned int irq)
+void z_ioapic_irq_enable(unsigned int irq)
 {
 	_IoApicRedUpdateLo(irq, 0, IOAPIC_INT_MASK);
 }
@@ -147,7 +147,7 @@
  *
  * @return N/A
  */
-void _ioapic_irq_disable(unsigned int irq)
+void z_ioapic_irq_disable(unsigned int irq)
 {
 	_IoApicRedUpdateLo(irq, IOAPIC_INT_MASK, IOAPIC_INT_MASK);
 }
@@ -283,7 +283,7 @@
  *
  * @return N/A
  */
-void _ioapic_irq_set(unsigned int irq, unsigned int vector, u32_t flags)
+void z_ioapic_irq_set(unsigned int irq, unsigned int vector, u32_t flags)
 {
 	u32_t rteValue;   /* value to copy into redirection table entry */
 
@@ -304,7 +304,7 @@
  * @param vector Vector number
  * @return N/A
  */
-void _ioapic_int_vec_set(unsigned int irq, unsigned int vector)
+void z_ioapic_int_vec_set(unsigned int irq, unsigned int vector)
 {
 	_IoApicRedUpdateLo(irq, vector, IOAPIC_VEC_MASK);
 }
diff --git a/drivers/interrupt_controller/loapic_intr.c b/drivers/interrupt_controller/loapic_intr.c
index 5f61e7d..0c54023 100644
--- a/drivers/interrupt_controller/loapic_intr.c
+++ b/drivers/interrupt_controller/loapic_intr.c
@@ -275,7 +275,7 @@
 
 	/* discard a pending interrupt if any */
 #if CONFIG_EOI_FORWARDING_BUG
-	_lakemont_eoi();
+	z_lakemont_eoi();
 #else
 	LOAPIC_WRITE(LOAPIC_EOI, 0);
 #endif
@@ -292,7 +292,7 @@
  * @return N/A
  */
 
-void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
+void z_loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
 				  unsigned int vector /* vector to copy into the LVT */
 				  )
 {
@@ -331,7 +331,7 @@
  * @return N/A
  */
 
-void _loapic_irq_enable(unsigned int irq)
+void z_loapic_irq_enable(unsigned int irq)
 {
 	unsigned int oldLevel;   /* previous interrupt lock level */
 
@@ -360,7 +360,7 @@
  * @return N/A
  */
 
-void _loapic_irq_disable(unsigned int irq)
+void z_loapic_irq_disable(unsigned int irq)
 {
 	unsigned int oldLevel;   /* previous interrupt lock level */
 
@@ -467,12 +467,12 @@
 
 		if (_irq_to_interrupt_vector[LOAPIC_IRQ_BASE + loapic_irq]) {
 			/* Configure vector and enable the required ones*/
-			_loapic_int_vec_set(loapic_irq,
+			z_loapic_int_vec_set(loapic_irq,
 				_irq_to_interrupt_vector[LOAPIC_IRQ_BASE + loapic_irq]);
 
 			if (sys_bitfield_test_bit((mem_addr_t) loapic_suspend_buf,
 							loapic_irq)) {
-				_loapic_irq_enable(loapic_irq);
+				z_loapic_irq_enable(loapic_irq);
 			}
 		}
 	}
diff --git a/drivers/interrupt_controller/mvic.c b/drivers/interrupt_controller/mvic.c
index 2cddc7e..39b4ccf 100644
--- a/drivers/interrupt_controller/mvic.c
+++ b/drivers/interrupt_controller/mvic.c
@@ -154,7 +154,7 @@
 SYS_INIT(_mvic_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
 
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	if (irq == CONFIG_MVIC_TIMER_IRQ) {
 		sys_write32(sys_read32(MVIC_LVTTIMER) & ~MVIC_LVTTIMER_MASK,
@@ -165,7 +165,7 @@
 }
 
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	if (irq == CONFIG_MVIC_TIMER_IRQ) {
 		sys_write32(sys_read32(MVIC_LVTTIMER) | MVIC_LVTTIMER_MASK,
diff --git a/drivers/interrupt_controller/plic.c b/drivers/interrupt_controller/plic.c
index 3a0ee9c..3874a58 100644
--- a/drivers/interrupt_controller/plic.c
+++ b/drivers/interrupt_controller/plic.c
@@ -30,7 +30,7 @@
  *
  * This routine enables a RISCV PLIC-specific interrupt line.
  * riscv_plic_irq_enable is called by SOC_FAMILY_RISCV_PRIVILEGE
- * _arch_irq_enable function to enable external interrupts for
+ * z_arch_irq_enable function to enable external interrupts for
  * IRQS > RISCV_MAX_GENERIC_IRQ, whenever CONFIG_RISCV_HAS_PLIC
  * variable is set.
  * @param irq IRQ number to enable
@@ -56,7 +56,7 @@
  *
  * This routine disables a RISCV PLIC-specific interrupt line.
  * riscv_plic_irq_disable is called by SOC_FAMILY_RISCV_PRIVILEGE
- * _arch_irq_disable function to disable external interrupts, for
+ * z_arch_irq_disable function to disable external interrupts, for
  * IRQS > RISCV_MAX_GENERIC_IRQ, whenever CONFIG_RISCV_HAS_PLIC
  * variable is set.
  * @param irq IRQ number to disable
@@ -100,7 +100,7 @@
  * @brief Set priority of a riscv PLIC-specific interrupt line
  *
  * This routine set the priority of a RISCV PLIC-specific interrupt line.
- * riscv_plic_irq_set_prio is called by riscv32 _ARCH_IRQ_CONNECT to set
+ * riscv_plic_irq_set_prio is called by riscv32 Z_ARCH_IRQ_CONNECT to set
  * the priority of an interrupt whenever CONFIG_RISCV_HAS_PLIC variable is set.
  * @param irq IRQ number for which to set priority
  *
@@ -157,11 +157,11 @@
 	save_irq = irq;
 
 	/*
-	 * If the IRQ is out of range, call _irq_spurious.
-	 * A call to _irq_spurious will not return.
+	 * If the IRQ is out of range, call z_irq_spurious.
+	 * A call to z_irq_spurious will not return.
 	 */
 	if (irq == 0 || irq >= PLIC_IRQS)
-		_irq_spurious(NULL);
+		z_irq_spurious(NULL);
 
 	irq += RISCV_MAX_GENERIC_IRQ;
 
diff --git a/drivers/interrupt_controller/system_apic.c b/drivers/interrupt_controller/system_apic.c
index 927980c..0e7ae7f 100644
--- a/drivers/interrupt_controller/system_apic.c
+++ b/drivers/interrupt_controller/system_apic.c
@@ -49,9 +49,9 @@
 	__ASSERT(irq <= HARDWARE_IRQ_LIMIT, "invalid irq line");
 
 	if (IS_IOAPIC_IRQ(irq)) {
-		_ioapic_irq_set(irq, vector, flags);
+		z_ioapic_irq_set(irq, vector, flags);
 	} else {
-		_loapic_int_vec_set(irq - LOAPIC_IRQ_BASE, vector);
+		z_loapic_int_vec_set(irq - LOAPIC_IRQ_BASE, vector);
 	}
 }
 
@@ -72,12 +72,12 @@
  *
  * @return N/A
  */
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	if (IS_IOAPIC_IRQ(irq)) {
-		_ioapic_irq_enable(irq);
+		z_ioapic_irq_enable(irq);
 	} else {
-		_loapic_irq_enable(irq - LOAPIC_IRQ_BASE);
+		z_loapic_irq_enable(irq - LOAPIC_IRQ_BASE);
 	}
 }
 
@@ -92,12 +92,12 @@
  *
  * @return N/A
  */
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	if (IS_IOAPIC_IRQ(irq)) {
-		_ioapic_irq_disable(irq);
+		z_ioapic_irq_disable(irq);
 	} else {
-		_loapic_irq_disable(irq - LOAPIC_IRQ_BASE);
+		z_loapic_irq_disable(irq - LOAPIC_IRQ_BASE);
 	}
 }
 
diff --git a/drivers/ipm/ipm_handlers.c b/drivers/ipm/ipm_handlers.c
index 48e4c4c..57838cb 100644
--- a/drivers/ipm/ipm_handlers.c
+++ b/drivers/ipm/ipm_handlers.c
@@ -11,24 +11,24 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_IPM(dev, send));
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(data, size));
-	return _impl_ipm_send((struct device *)dev, wait, id,
+	return z_impl_ipm_send((struct device *)dev, wait, id,
 			      (const void *)data, size);
 }
 
 Z_SYSCALL_HANDLER(ipm_max_data_size_get, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_IPM(dev, max_data_size_get));
-	return _impl_max_data_size_get((struct device *)dev);
+	return z_impl_max_data_size_get((struct device *)dev);
 }
 
 Z_SYSCALL_HANDLER(ipm_max_id_val_get, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_IPM(dev, max_id_val_get));
-	return _impl_max_id_val_get((struct device *)dev);
+	return z_impl_max_id_val_get((struct device *)dev);
 }
 
 Z_SYSCALL_HANDLER(ipm_set_enabled, dev, enable)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_IPM(dev, set_enabled));
-	return _impl_ipm_set_enabled((struct device *)dev, enable);
+	return z_impl_ipm_set_enabled((struct device *)dev, enable);
 }
diff --git a/drivers/ipm/ipm_quark_se.h b/drivers/ipm/ipm_quark_se.h
index e359a75..363e7f9 100644
--- a/drivers/ipm/ipm_quark_se.h
+++ b/drivers/ipm/ipm_quark_se.h
@@ -118,7 +118,7 @@
 		.direction = dir \
 	}; \
 	struct quark_se_ipm_driver_data quark_se_ipm_runtime_##name; \
-	DEVICE_AND_API_INIT(name, _STRINGIFY(name), quark_se_ipm_initialize, \
+	DEVICE_AND_API_INIT(name, Z_STRINGIFY(name), quark_se_ipm_initialize, \
 			    &quark_se_ipm_runtime_##name, \
 			    &quark_se_ipm_config_##name, \
 			    POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \
diff --git a/drivers/led/led_handlers.c b/drivers/led/led_handlers.c
index dbcf33b..f5fa763 100644
--- a/drivers/led/led_handlers.c
+++ b/drivers/led/led_handlers.c
@@ -10,24 +10,24 @@
 Z_SYSCALL_HANDLER(led_blink, dev, led, delay_on, delay_off)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_LED(dev, blink));
-	return _impl_led_blink((struct device *)dev, led, delay_on,
+	return z_impl_led_blink((struct device *)dev, led, delay_on,
 					delay_off);
 }
 
 Z_SYSCALL_HANDLER(led_set_brightness, dev, led, value)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_LED(dev, set_brightness));
-	return _impl_led_set_brightness((struct device *)dev, led, value);
+	return z_impl_led_set_brightness((struct device *)dev, led, value);
 }
 
 Z_SYSCALL_HANDLER(led_on, dev, led)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_LED(dev, on));
-	return _impl_led_on((struct device *)dev, led);
+	return z_impl_led_on((struct device *)dev, led);
 }
 
 Z_SYSCALL_HANDLER(led_off, dev, led)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_LED(dev, off));
-	return _impl_led_off((struct device *)dev, led);
+	return z_impl_led_off((struct device *)dev, led);
 }
diff --git a/drivers/pwm/pwm_handlers.c b/drivers/pwm/pwm_handlers.c
index df2239a..51c48e4 100644
--- a/drivers/pwm/pwm_handlers.c
+++ b/drivers/pwm/pwm_handlers.c
@@ -10,7 +10,7 @@
 Z_SYSCALL_HANDLER(pwm_pin_set_cycles, dev, pwm, period, pulse)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_PWM(dev, pin_set));
-	return _impl_pwm_pin_set_cycles((struct device *)dev, pwm, period,
+	return z_impl_pwm_pin_set_cycles((struct device *)dev, pwm, period,
 					pulse);
 }
 
@@ -18,6 +18,6 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_PWM(dev, get_cycles_per_sec));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(cycles, sizeof(u64_t)));
-	return _impl_pwm_get_cycles_per_sec((struct device *)dev,
+	return z_impl_pwm_get_cycles_per_sec((struct device *)dev,
 					    pwm, (u64_t *)cycles);
 }
diff --git a/drivers/rtc/rtc_handlers.c b/drivers/rtc/rtc_handlers.c
index 716a831..8d4dbdc 100644
--- a/drivers/rtc/rtc_handlers.c
+++ b/drivers/rtc/rtc_handlers.c
@@ -10,31 +10,31 @@
 Z_SYSCALL_HANDLER(rtc_read, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_RTC(dev, read));
-	return _impl_rtc_read((struct device *)dev);
+	return z_impl_rtc_read((struct device *)dev);
 }
 
 Z_SYSCALL_HANDLER(rtc_enable, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_RTC(dev, enable));
-	_impl_rtc_enable((struct device *)dev);
+	z_impl_rtc_enable((struct device *)dev);
 	return 0;
 }
 
 Z_SYSCALL_HANDLER(rtc_disable, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_RTC(dev, disable));
-	_impl_rtc_disable((struct device *)dev);
+	z_impl_rtc_disable((struct device *)dev);
 	return 0;
 }
 
 Z_SYSCALL_HANDLER(rtc_set_alarm, dev, alarm_val)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_RTC(dev, set_alarm));
-	return _impl_rtc_set_alarm((struct device *)dev, alarm_val);
+	return z_impl_rtc_set_alarm((struct device *)dev, alarm_val);
 }
 
 Z_SYSCALL_HANDLER(rtc_get_pending_int, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_RTC(dev, get_pending_int));
-	return _impl_rtc_get_pending_int((struct device *)dev);
+	return z_impl_rtc_get_pending_int((struct device *)dev);
 }
diff --git a/drivers/sensor/sensor_handlers.c b/drivers/sensor/sensor_handlers.c
index b1e8d08..763c1c1 100644
--- a/drivers/sensor/sensor_handlers.c
+++ b/drivers/sensor/sensor_handlers.c
@@ -11,26 +11,26 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_SENSOR(dev, attr_set));
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(val, sizeof(struct sensor_value)));
-	return _impl_sensor_attr_set((struct device *)dev, chan, attr,
+	return z_impl_sensor_attr_set((struct device *)dev, chan, attr,
 				     (const struct sensor_value *)val);
 }
 
 Z_SYSCALL_HANDLER(sensor_sample_fetch, dev)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_SENSOR(dev, sample_fetch));
-	return _impl_sensor_sample_fetch((struct device *)dev);
+	return z_impl_sensor_sample_fetch((struct device *)dev);
 }
 
 Z_SYSCALL_HANDLER(sensor_sample_fetch_chan, dev, type)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_SENSOR(dev, sample_fetch));
-	return _impl_sensor_sample_fetch_chan((struct device *)dev, type);
+	return z_impl_sensor_sample_fetch_chan((struct device *)dev, type);
 }
 
 Z_SYSCALL_HANDLER(sensor_channel_get, dev, chan, val)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_SENSOR(dev, channel_get));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(val, sizeof(struct sensor_value)));
-	return _impl_sensor_channel_get((struct device *)dev, chan,
+	return z_impl_sensor_channel_get((struct device *)dev, chan,
 					(struct sensor_value *)val);
 }
diff --git a/drivers/serial/uart_handlers.c b/drivers/serial/uart_handlers.c
index 01d9f5e..1b80d5c 100644
--- a/drivers/serial/uart_handlers.c
+++ b/drivers/serial/uart_handlers.c
@@ -10,13 +10,13 @@
 #define UART_SIMPLE(op_) \
 	Z_SYSCALL_HANDLER(uart_ ## op_, dev) { \
 		Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, op_)); \
-		return _impl_uart_ ## op_((struct device *)dev); \
+		return z_impl_uart_ ## op_((struct device *)dev); \
 	}
 
 #define UART_SIMPLE_VOID(op_) \
 	Z_SYSCALL_HANDLER(uart_ ## op_, dev) { \
 		Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, op_)); \
-		_impl_uart_ ## op_((struct device *)dev); \
+		z_impl_uart_ ## op_((struct device *)dev); \
 		return 0; \
 	}
 
@@ -26,14 +26,14 @@
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, poll_in));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(p_char, sizeof(unsigned char)));
-	return _impl_uart_poll_in((struct device *)dev,
+	return z_impl_uart_poll_in((struct device *)dev,
 				  (unsigned char *)p_char);
 }
 
 Z_SYSCALL_HANDLER(uart_poll_out, dev, out_char)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, poll_out));
-	_impl_uart_poll_out((struct device *)dev, out_char);
+	z_impl_uart_poll_out((struct device *)dev, out_char);
 
 	return 0;
 }
@@ -53,14 +53,14 @@
 Z_SYSCALL_HANDLER(uart_line_ctrl_set, dev, ctrl, val)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, line_ctrl_set));
-	return _impl_uart_line_ctrl_set((struct device *)dev, ctrl, val);
+	return z_impl_uart_line_ctrl_set((struct device *)dev, ctrl, val);
 }
 
 Z_SYSCALL_HANDLER(uart_line_ctrl_get, dev, ctrl, val)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, line_ctrl_get));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(val, sizeof(u32_t)));
-	return _impl_uart_line_ctrl_get((struct device *)dev, ctrl,
+	return z_impl_uart_line_ctrl_get((struct device *)dev, ctrl,
 					(u32_t *)val);
 }
 #endif /* CONFIG_UART_LINE_CTRL */
@@ -69,6 +69,6 @@
 Z_SYSCALL_HANDLER(uart_drv_cmd, dev, cmd, p)
 {
 	Z_OOPS(Z_SYSCALL_DRIVER_UART(dev, drv_cmd));
-	return _impl_uart_drv_cmd((struct device *)dev, cmd, p);
+	return z_impl_uart_drv_cmd((struct device *)dev, cmd, p);
 }
 #endif /* CONFIG_UART_DRV_CMD */
diff --git a/drivers/spi/spi_handlers.c b/drivers/spi/spi_handlers.c
index 6e33a74..5644e5c 100644
--- a/drivers/spi/spi_handlers.c
+++ b/drivers/spi/spi_handlers.c
@@ -62,7 +62,7 @@
 	copy_and_check(tx_bufs, tx_buf_copy, 0, ssf);
 	copy_and_check(rx_bufs, rx_buf_copy, 1, ssf);
 
-	return _impl_spi_transceive((struct device *)dev, config,
+	return z_impl_spi_transceive((struct device *)dev, config,
 				    tx_bufs, rx_bufs);
 }
 
@@ -126,5 +126,5 @@
 
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(config, sizeof(*config)));
 	Z_OOPS(Z_SYSCALL_DRIVER_SPI(dev, release));
-	return _impl_spi_release((struct device *)dev, config);
+	return z_impl_spi_release((struct device *)dev, config);
 }
diff --git a/drivers/timer/altera_avalon_timer_hal.c b/drivers/timer/altera_avalon_timer_hal.c
index 04113b5..b465d79 100644
--- a/drivers/timer/altera_avalon_timer_hal.c
+++ b/drivers/timer/altera_avalon_timer_hal.c
@@ -59,7 +59,7 @@
 	return 0;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	/* Per the Altera Embedded IP Peripherals guide, you cannot
 	 * use a timer instance for both the system clock and timestamps
diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c
index 2648b8d..189c1e6 100644
--- a/drivers/timer/arcv2_timer0.c
+++ b/drivers/timer/arcv2_timer0.c
@@ -48,7 +48,7 @@
  */
 static ALWAYS_INLINE u32_t timer0_count_register_get(void)
 {
-	return _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
+	return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
 }
 
 /**
@@ -59,7 +59,7 @@
  */
 static ALWAYS_INLINE void timer0_count_register_set(u32_t value)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, value);
+	z_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, value);
 }
 
 /**
@@ -70,7 +70,7 @@
  */
 static ALWAYS_INLINE u32_t timer0_control_register_get(void)
 {
-	return _arc_v2_aux_reg_read(_ARC_V2_TMR0_CONTROL);
+	return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_CONTROL);
 }
 
 /**
@@ -81,7 +81,7 @@
  */
 static ALWAYS_INLINE void timer0_control_register_set(u32_t value)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, value);
+	z_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, value);
 }
 
 /**
@@ -92,7 +92,7 @@
  */
 static ALWAYS_INLINE u32_t timer0_limit_register_get(void)
 {
-	return _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
+	return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
 }
 
 /**
@@ -103,7 +103,7 @@
  */
 static ALWAYS_INLINE void timer0_limit_register_set(u32_t count)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_TMR0_LIMIT, count);
+	z_arc_v2_aux_reg_write(_ARC_V2_TMR0_LIMIT, count);
 }
 
 static u32_t elapsed(void)
@@ -233,7 +233,7 @@
 	return cyc / CYC_PER_TICK;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 	u32_t ret = elapsed() + cycle_count;
diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c
index b3b183c..b6aca76 100644
--- a/drivers/timer/cortex_m_systick.c
+++ b/drivers/timer/cortex_m_systick.c
@@ -8,7 +8,7 @@
 #include <spinlock.h>
 #include <arch/arm/cortex_m/cmsis.h>
 
-void _ExcExit(void);
+void z_ExcExit(void);
 
 /* Minimum cycles in the future to try to program. */
 #define MIN_DELAY 512
@@ -61,7 +61,7 @@
 	ctrl_cache = 0U;
 
 	z_clock_announce(TICKLESS ? dticks : 1);
-	_ExcExit();
+	z_ExcExit();
 }
 
 int z_clock_driver_init(struct device *device)
@@ -127,7 +127,7 @@
 	return cyc / CYC_PER_TICK;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 	u32_t ret = elapsed() + cycle_count;
diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c
index 2e950cc..3ed789e 100644
--- a/drivers/timer/hpet.c
+++ b/drivers/timer/hpet.c
@@ -141,7 +141,7 @@
 	return ret;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	return MAIN_COUNTER_REG;
 }
diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c
index 5203be4..d251fa4 100644
--- a/drivers/timer/loapic_timer.c
+++ b/drivers/timer/loapic_timer.c
@@ -151,7 +151,7 @@
 #endif
 
 #ifdef CONFIG_JAILHOUSE_X2APIC
-void _jailhouse_eoi(void)
+void z_jailhouse_eoi(void)
 {
 	write_x2apic(LOAPIC_EOI >> 4, 0);
 }
@@ -420,7 +420,7 @@
 	initial_count_register_set(programmed_cycles);
 }
 
-void _enable_sys_clock(void)
+void z_enable_sys_clock(void)
 {
 	if (!programmed_full_ticks) {
 		program_max_cycles();
@@ -764,13 +764,13 @@
  *
  * @return up counter of elapsed clock cycles
  */
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 #if CONFIG_TSC_CYCLES_PER_SEC != 0
 	u64_t tsc;
 
 	/* 64-bit math to avoid overflows */
-	tsc = _tsc_read() * (u64_t)sys_clock_hw_cycles_per_sec() /
+	tsc = z_tsc_read() * (u64_t)sys_clock_hw_cycles_per_sec() /
 		(u64_t) CONFIG_TSC_CYCLES_PER_SEC;
 	return (u32_t)tsc;
 #else
diff --git a/drivers/timer/native_posix_timer.c b/drivers/timer/native_posix_timer.c
index b7f0a30..130b70f 100644
--- a/drivers/timer/native_posix_timer.c
+++ b/drivers/timer/native_posix_timer.c
@@ -30,7 +30,7 @@
  * Return the current HW cycle counter
  * (number of microseconds since boot in 32bits)
  */
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	return hwm_get_time();
 }
diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c
index 3c504cd..d852994 100644
--- a/drivers/timer/nrf_rtc_timer.c
+++ b/drivers/timer/nrf_rtc_timer.c
@@ -193,7 +193,7 @@
 	return ret;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	u32_t key = irq_lock();
 	u32_t ret = counter_sub(counter(), last_count) + last_count;
diff --git a/drivers/timer/riscv_machine_timer.c b/drivers/timer/riscv_machine_timer.c
index 354a91a..bcc8dfb 100644
--- a/drivers/timer/riscv_machine_timer.c
+++ b/drivers/timer/riscv_machine_timer.c
@@ -129,7 +129,7 @@
 	return ret;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	return (u32_t)mtime();
 }
diff --git a/drivers/timer/rv32m1_lptmr_timer.c b/drivers/timer/rv32m1_lptmr_timer.c
index f992ff3..587e5ed 100644
--- a/drivers/timer/rv32m1_lptmr_timer.c
+++ b/drivers/timer/rv32m1_lptmr_timer.c
@@ -129,7 +129,7 @@
 	return 0;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	return cycle_count + SYSTEM_TIMER_INSTANCE->CNR;
 }
diff --git a/drivers/timer/sam0_rtc_timer.c b/drivers/timer/sam0_rtc_timer.c
index 76f55fd..0362b1f 100644
--- a/drivers/timer/sam0_rtc_timer.c
+++ b/drivers/timer/sam0_rtc_timer.c
@@ -256,7 +256,7 @@
 #endif
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	/* Just return the absolute value of RTC cycle counter. */
 	return rtc_count();
diff --git a/drivers/timer/xtensa_sys_timer.c b/drivers/timer/xtensa_sys_timer.c
index c2055a6..ad7985e 100644
--- a/drivers/timer/xtensa_sys_timer.c
+++ b/drivers/timer/xtensa_sys_timer.c
@@ -114,7 +114,7 @@
 	return ret;
 }
 
-u32_t _timer_cycle_get_32(void)
+u32_t z_timer_cycle_get_32(void)
 {
 	return ccount();
 }
diff --git a/ext/hal/ti/simplelink/kernel/zephyr/dpl/ClockP_zephyr.c b/ext/hal/ti/simplelink/kernel/zephyr/dpl/ClockP_zephyr.c
index 34f5731..ccdc121 100644
--- a/ext/hal/ti/simplelink/kernel/zephyr/dpl/ClockP_zephyr.c
+++ b/ext/hal/ti/simplelink/kernel/zephyr/dpl/ClockP_zephyr.c
@@ -11,7 +11,7 @@
 
 uint32_t ClockP_getSystemTicks()
 {
-	return (uint32_t)_ms_to_ticks(k_uptime_get_32());
+	return (uint32_t)z_ms_to_ticks(k_uptime_get_32());
 }
 
 void ClockP_usleep(uint32_t usec)
diff --git a/include/aio_comparator.h b/include/aio_comparator.h
index 0493be8..db21c47 100644
--- a/include/aio_comparator.h
+++ b/include/aio_comparator.h
@@ -48,7 +48,7 @@
  */
 __syscall int aio_cmp_disable(struct device *dev, u8_t index);
 
-static inline int _impl_aio_cmp_disable(struct device *dev, u8_t index)
+static inline int z_impl_aio_cmp_disable(struct device *dev, u8_t index)
 {
 	const struct aio_cmp_driver_api *api = dev->driver_api;
 
@@ -95,7 +95,7 @@
  */
 __syscall int aio_cmp_get_pending_int(struct device *dev);
 
-static inline int _impl_aio_cmp_get_pending_int(struct device *dev)
+static inline int z_impl_aio_cmp_get_pending_int(struct device *dev)
 {
 	struct aio_cmp_driver_api *api;
 
diff --git a/include/app_memory/app_memdomain.h b/include/app_memory/app_memdomain.h
index bb4e47a..381482b 100644
--- a/include/app_memory/app_memdomain.h
+++ b/include/app_memory/app_memdomain.h
@@ -45,7 +45,7 @@
  *
  * @param id Name of the memory partition to associate this data
  */
-#define K_APP_DMEM(id) _GENERIC_SECTION(K_APP_DMEM_SECTION(id))
+#define K_APP_DMEM(id) Z_GENERIC_SECTION(K_APP_DMEM_SECTION(id))
 
 /**
  * @brief Place data in a partition's bss section
@@ -55,7 +55,7 @@
  *
  * @param id Name of the memory partition to associate this data
  */
-#define K_APP_BMEM(id) _GENERIC_SECTION(K_APP_BMEM_SECTION(id))
+#define K_APP_BMEM(id) Z_GENERIC_SECTION(K_APP_BMEM_SECTION(id))
 
 struct z_app_region {
 	void *bss_start;
@@ -120,7 +120,7 @@
 	}; \
 	extern char Z_APP_BSS_START(name)[]; \
 	extern char Z_APP_BSS_SIZE(name)[]; \
-	_GENERIC_SECTION(.app_regions.name) \
+	Z_GENERIC_SECTION(.app_regions.name) \
 	struct z_app_region name##_region = { \
 		.bss_start = &Z_APP_BSS_START(name), \
 		.bss_size = (size_t) &Z_APP_BSS_SIZE(name) \
diff --git a/include/arch/arc/arch.h b/include/arch/arc/arch.h
index 83bf65f..f35a7dd 100644
--- a/include/arch/arc/arch.h
+++ b/include/arch/arc/arch.h
@@ -83,23 +83,23 @@
 
 #if CONFIG_ARC_MPU_VER == 2
 
-#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
+#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
 		sym[POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
 		+  STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE]
 
-#define _ARCH_THREAD_STACK_LEN(size) \
+#define Z_ARCH_THREAD_STACK_LEN(size) \
 	    (POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
 	     MAX(POW2_CEIL(STACK_SIZE_ALIGN(size)), \
 		 POW2_CEIL(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)))
 
-#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
+#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
-		sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
+		sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
 
-#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
+#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
 	struct _k_thread_stack_element \
 		__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
 		sym[POW2_CEIL(size) + \
@@ -107,50 +107,50 @@
 
 #elif CONFIG_ARC_MPU_VER == 3
 
-#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
+#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
 	struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
 		sym[size + \
 		+ STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE]
 
-#define _ARCH_THREAD_STACK_LEN(size) \
+#define Z_ARCH_THREAD_STACK_LEN(size) \
 		((size) + STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)
 
-#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
+#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
 	struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
-		sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
+		sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
 
-#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
+#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
 	struct _k_thread_stack_element __aligned(STACK_ALIGN) \
 		sym[size + \
 		+ STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE]
 
 #endif /* CONFIG_ARC_MPU_VER */
 
-#define _ARCH_THREAD_STACK_SIZEOF(sym) \
+#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
 		(sizeof(sym) - CONFIG_PRIVILEGED_STACK_SIZE - STACK_GUARD_SIZE)
 
-#define _ARCH_THREAD_STACK_BUFFER(sym) \
+#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
 		((char *)(sym))
 
 #else /* CONFIG_USERSPACE */
 
-#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
+#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
 	struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
 		sym[size + STACK_GUARD_SIZE]
 
-#define _ARCH_THREAD_STACK_LEN(size) ((size) + STACK_GUARD_SIZE)
+#define Z_ARCH_THREAD_STACK_LEN(size) ((size) + STACK_GUARD_SIZE)
 
-#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
+#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
 	struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
-		sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
+		sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
 
-#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
+#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
 	struct _k_thread_stack_element __aligned(STACK_ALIGN) \
 		sym[size + STACK_GUARD_SIZE]
 
-#define _ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - STACK_GUARD_SIZE)
+#define Z_ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - STACK_GUARD_SIZE)
 
-#define _ARCH_THREAD_STACK_BUFFER(sym) ((char *)(sym + STACK_GUARD_SIZE))
+#define Z_ARCH_THREAD_STACK_BUFFER(sym) ((char *)(sym + STACK_GUARD_SIZE))
 
 #endif /* CONFIG_USERSPACE */
 
diff --git a/include/arch/arc/syscall.h b/include/arch/arc/syscall.h
index 0ad9da1..751ded8 100644
--- a/include/arch/arc/syscall.h
+++ b/include/arch/arc/syscall.h
@@ -38,7 +38,7 @@
  * just for enabling CONFIG_USERSPACE on arc w/o errors.
  */
 
-static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t arg6,
 					  u32_t call_id)
 {
@@ -62,7 +62,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
@@ -84,7 +84,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
@@ -105,7 +105,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
@@ -124,7 +124,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
+static inline u32_t z_arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
 	register u32_t r1 __asm__("r1") = arg2;
@@ -141,7 +141,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
+static inline u32_t z_arch_syscall_invoke1(u32_t arg1, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
 	register u32_t r6 __asm__("r6") = call_id;
@@ -157,7 +157,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke0(u32_t call_id)
+static inline u32_t z_arch_syscall_invoke0(u32_t call_id)
 {
 	register u32_t ret __asm__("r0");
 	register u32_t r6 __asm__("r6") = call_id;
@@ -173,7 +173,7 @@
 	return ret;
 }
 
-static inline bool _arch_is_user_context(void)
+static inline bool z_arch_is_user_context(void)
 {
 	u32_t status;
 
diff --git a/include/arch/arc/v2/arcv2_irq_unit.h b/include/arch/arc/v2/arcv2_irq_unit.h
index cb540ce..99cd5fc 100644
--- a/include/arch/arc/v2/arcv2_irq_unit.h
+++ b/include/arch/arc/v2/arcv2_irq_unit.h
@@ -49,13 +49,13 @@
  */
 
 static ALWAYS_INLINE
-void _arc_v2_irq_unit_irq_enable_set(
+void z_arc_v2_irq_unit_irq_enable_set(
 	int irq,
 	unsigned char enable
 	)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, enable);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, enable);
 }
 
 /*
@@ -69,7 +69,7 @@
 static ALWAYS_INLINE
 void _arc_v2_irq_unit_int_enable(int irq)
 {
-	_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_ENABLE);
+	z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_ENABLE);
 }
 
 /*
@@ -83,7 +83,7 @@
 static ALWAYS_INLINE
 void _arc_v2_irq_unit_int_disable(int irq)
 {
-	_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_DISABLE);
+	z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_DISABLE);
 }
 
 /*
@@ -97,13 +97,13 @@
 static ALWAYS_INLINE
 void _arc_v2_irq_unit_prio_set(int irq, unsigned char prio)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
 #ifdef CONFIG_ARC_HAS_SECURE
 /* if ARC has secure mode, all interrupt should be secure */
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, prio |
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, prio |
 			 _ARC_V2_IRQ_PRIORITY_SECURE);
 #else
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, prio);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, prio);
 #endif
 }
 
@@ -121,8 +121,8 @@
 static ALWAYS_INLINE
 void _arc_v2_irq_unit_sensitivity_set(int irq, int s)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
-	_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, s);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, s);
 }
 
 /*
@@ -133,12 +133,12 @@
  * @return N/A
  */
 static ALWAYS_INLINE
-bool _arc_v2_irq_unit_is_in_isr(void)
+bool z_arc_v2_irq_unit_is_in_isr(void)
 {
-	u32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
+	u32_t act = z_arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
 
 	/* in exception ?*/
-	if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE) {
+	if (z_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE) {
 		return true;
 	}
 
@@ -155,7 +155,7 @@
  * @return N/A
  */
 
-void _arc_v2_irq_unit_trigger_set(int irq, unsigned int trigger);
+void z_arc_v2_irq_unit_trigger_set(int irq, unsigned int trigger);
 
 /*
  * @brief Returns an IRQ line trigger type
@@ -166,7 +166,7 @@
  * @return N/A
  */
 
-unsigned int _arc_v2_irq_unit_trigger_get(int irq);
+unsigned int z_arc_v2_irq_unit_trigger_get(int irq);
 
 /*
  * @brief Send EOI signal to interrupt unit
@@ -180,7 +180,7 @@
  * @return N/A
  */
 
-void _arc_v2_irq_unit_int_eoi(int irq);
+void z_arc_v2_irq_unit_int_eoi(int irq);
 
 #endif /* _ASMLANGUAGE */
 
diff --git a/include/arch/arc/v2/asm_inline_gcc.h b/include/arch/arc/v2/asm_inline_gcc.h
index 4db2f1c..593d70e 100644
--- a/include/arch/arc/v2/asm_inline_gcc.h
+++ b/include/arch/arc/v2/asm_inline_gcc.h
@@ -24,7 +24,7 @@
 /**
  *  @brief read timestamp register (CPU frequency)
  */
-extern u64_t _tsc_read(void);
+extern u64_t z_tsc_read(void);
 
 
 /* Implementation of sys_io.h's documented functions */
@@ -32,37 +32,37 @@
 static ALWAYS_INLINE
 	void sys_out8(u8_t data, io_port_t port)
 {
-	_arc_v2_aux_reg_write(port, data);
+	z_arc_v2_aux_reg_write(port, data);
 }
 
 static ALWAYS_INLINE
 	u8_t sys_in8(io_port_t port)
 {
-	return (u8_t)(_arc_v2_aux_reg_read(port) & 0x000000ff);
+	return (u8_t)(z_arc_v2_aux_reg_read(port) & 0x000000ff);
 }
 
 static ALWAYS_INLINE
 	void sys_out16(u16_t data, io_port_t port)
 {
-	_arc_v2_aux_reg_write(port, data);
+	z_arc_v2_aux_reg_write(port, data);
 }
 
 static ALWAYS_INLINE
 	u16_t sys_in16(io_port_t port)
 {
-	return (u16_t)(_arc_v2_aux_reg_read(port) & 0x0000ffff);
+	return (u16_t)(z_arc_v2_aux_reg_read(port) & 0x0000ffff);
 }
 
 static ALWAYS_INLINE
 	void sys_out32(u32_t data, io_port_t port)
 {
-	_arc_v2_aux_reg_write(port, data);
+	z_arc_v2_aux_reg_write(port, data);
 }
 
 static ALWAYS_INLINE
 	u32_t sys_in32(io_port_t port)
 {
-	return _arc_v2_aux_reg_read(port);
+	return z_arc_v2_aux_reg_read(port);
 }
 
 static ALWAYS_INLINE
diff --git a/include/arch/arc/v2/aux_regs.h b/include/arch/arc/v2/aux_regs.h
index 24b279e..b4db266 100644
--- a/include/arch/arc/v2/aux_regs.h
+++ b/include/arch/arc/v2/aux_regs.h
@@ -154,12 +154,12 @@
 #if defined(__GNUC__)
 
 #include <zephyr/types.h>
-#define _arc_v2_aux_reg_read(reg) __builtin_arc_lr((volatile u32_t)reg)
-#define _arc_v2_aux_reg_write(reg, val) __builtin_arc_sr((unsigned int)val, (volatile u32_t)reg)
+#define z_arc_v2_aux_reg_read(reg) __builtin_arc_lr((volatile u32_t)reg)
+#define z_arc_v2_aux_reg_write(reg, val) __builtin_arc_sr((unsigned int)val, (volatile u32_t)reg)
 
 #else /* ! __GNUC__ */
 
-#define _arc_v2_aux_reg_read(reg)                                \
+#define z_arc_v2_aux_reg_read(reg)                                \
 	({                                               \
 		unsigned int __ret;                      \
 		__asm__ __volatile__("       lr %0, [%1]" \
@@ -168,7 +168,7 @@
 		__ret;                                   \
 	})
 
-#define _arc_v2_aux_reg_write(reg, val)                              \
+#define z_arc_v2_aux_reg_write(reg, val)                              \
 	({                                                   \
 		__asm__ __volatile__("       sr %0, [%1]"    \
 				     :                       \
diff --git a/include/arch/arc/v2/error.h b/include/arch/arc/v2/error.h
index 292bcc9..ec08f98 100644
--- a/include/arch/arc/v2/error.h
+++ b/include/arch/arc/v2/error.h
@@ -24,8 +24,8 @@
 
 #ifndef _ASMLANGUAGE
 #include <toolchain/gcc.h>
-extern void _NanoFatalErrorHandler(unsigned int, const NANO_ESF*);
-extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
+extern void z_NanoFatalErrorHandler(unsigned int, const NANO_ESF*);
+extern void z_SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
 #endif
 
 #define _NANO_ERR_HW_EXCEPTION (0)      /* MPU/Bus/Usage fault */
@@ -41,10 +41,10 @@
  * a new exception; when the processor is in thread context, the exception
  * will be raised
  */
-#define _ARCH_EXCEPT(reason_p)	do { \
-	if (_arc_v2_irq_unit_is_in_isr()) { \
+#define Z_ARCH_EXCEPT(reason_p)	do { \
+	if (z_arc_v2_irq_unit_is_in_isr()) { \
 		printk("@ %s:%d:\n", __FILE__,  __LINE__); \
-		_NanoFatalErrorHandler(reason_p, 0); \
+		z_NanoFatalErrorHandler(reason_p, 0); \
 	} else {\
 		__asm__ volatile ( \
 		"mov r0, %[reason]\n\t" \
diff --git a/include/arch/arc/v2/irq.h b/include/arch/arc/v2/irq.h
index b4f7555..61a4d11 100644
--- a/include/arch/arc/v2/irq.h
+++ b/include/arch/arc/v2/irq.h
@@ -26,25 +26,25 @@
 
 #ifdef _ASMLANGUAGE
 GTEXT(_irq_exit);
-GTEXT(_arch_irq_enable)
-GTEXT(_arch_irq_disable)
+GTEXT(z_arch_irq_enable)
+GTEXT(z_arch_irq_disable)
 #else
 
-extern void _arch_irq_enable(unsigned int irq);
-extern void _arch_irq_disable(unsigned int irq);
+extern void z_arch_irq_enable(unsigned int irq);
+extern void z_arch_irq_disable(unsigned int irq);
 
 extern void _irq_exit(void);
-extern void _irq_priority_set(unsigned int irq, unsigned int prio,
+extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
 			      u32_t flags);
 extern void _isr_wrapper(void);
-extern void _irq_spurious(void *unused);
+extern void z_irq_spurious(void *unused);
 
 /**
  * Configure a static interrupt.
  *
  * All arguments must be computable by the compiler at build time.
  *
- * _ISR_DECLARE will populate the .intList section with the interrupt's
+ * Z_ISR_DECLARE will populate the .intList section with the interrupt's
  * parameters, which will then be used by gen_irq_tables.py to create
  * the vector table and the software ISR table. This is all done at
  * build-time.
@@ -60,10 +60,10 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
@@ -101,7 +101,7 @@
  * "interrupt disable state" prior to the call.
  */
 
-static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
+static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
 {
 	unsigned int key;
 
@@ -122,7 +122,7 @@
  * @return N/A
  */
 
-static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
+static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
 {
 	__asm__ volatile("seti %0" : : "ir"(key) : "memory");
 }
diff --git a/include/arch/arc/v2/misc.h b/include/arch/arc/v2/misc.h
index 8aee8ac..00fa67d 100644
--- a/include/arch/arc/v2/misc.h
+++ b/include/arch/arc/v2/misc.h
@@ -23,8 +23,8 @@
 extern void k_cpu_idle(void);
 extern void k_cpu_atomic_idle(unsigned int key);
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()	_timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()	z_timer_cycle_get_32()
 #endif
 
 #ifdef __cplusplus
diff --git a/include/arch/arm/arch.h b/include/arch/arm/arch.h
index cf973cb..1b14a4d 100644
--- a/include/arch/arm/arch.h
+++ b/include/arch/arm/arch.h
@@ -142,11 +142,11 @@
  */
 #if defined(CONFIG_USERSPACE) && \
 	defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
-#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
+#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)]
 #else
-#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
+#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
 	struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
 		sym[size+MPU_GUARD_ALIGN_AND_SIZE]
 #endif
@@ -163,9 +163,9 @@
  */
 #if defined(CONFIG_USERSPACE) && \
 	defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
-#define _ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size))
+#define Z_ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size))
 #else
-#define _ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE)
+#define Z_ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE)
 #endif
 
 /**
@@ -183,15 +183,15 @@
  */
 #if defined(CONFIG_USERSPACE) && \
 	defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
-#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
+#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(POW2_CEIL(size)) \
-		sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
+		sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
 #else
-#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
+#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(STACK_ALIGN) \
-		sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
+		sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
 #endif
 
 /**
@@ -208,11 +208,11 @@
  */
 #if defined(CONFIG_USERSPACE) && \
 	defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
-#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
+#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
 	struct _k_thread_stack_element __aligned(POW2_CEIL(size)) \
 		sym[POW2_CEIL(size)]
 #else
-#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
+#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
 	struct _k_thread_stack_element __aligned(STACK_ALIGN) \
 		sym[size+MPU_GUARD_ALIGN_AND_SIZE]
 #endif
@@ -240,7 +240,7 @@
  * @param sym Stack memory symbol
  * @return Actual size of the stack available for the thread
  */
-#define _ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE)
+#define Z_ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE)
 
 /**
  * @brief Get a pointer to the physical stack buffer
@@ -253,7 +253,7 @@
  * @param sym Declared stack symbol name
  * @return The buffer itself, a char *
  */
-#define _ARCH_THREAD_STACK_BUFFER(sym) \
+#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
 		((char *)(sym) + MPU_GUARD_ALIGN_AND_SIZE)
 
 #ifdef CONFIG_ARM_MPU
diff --git a/include/arch/arm/cortex_m/asm_inline_gcc.h b/include/arch/arm/cortex_m/asm_inline_gcc.h
index 0625233..2384614 100644
--- a/include/arch/arm/cortex_m/asm_inline_gcc.h
+++ b/include/arch/arm/cortex_m/asm_inline_gcc.h
@@ -116,7 +116,7 @@
  * except NMI.
  */
 
-static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
+static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
 {
 	unsigned int key;
 
@@ -163,7 +163,7 @@
  *
  */
 
-static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
+static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
 {
 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
 	if (key) {
diff --git a/include/arch/arm/cortex_m/error.h b/include/arch/arm/cortex_m/error.h
index 9ea95a5..f0215a9 100644
--- a/include/arch/arm/cortex_m/error.h
+++ b/include/arch/arm/cortex_m/error.h
@@ -23,8 +23,8 @@
 #endif
 
 #ifndef _ASMLANGUAGE
-extern void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
-extern void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
+extern void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
+extern void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
 #endif
 
 #define _NANO_ERR_HW_EXCEPTION (0)      /* MPU/Bus/Usage fault */
@@ -43,7 +43,7 @@
  * schedule a new thread until they are unlocked which is not what we want.
  * Force them unlocked as well.
  */
-#define _ARCH_EXCEPT(reason_p) do { \
+#define Z_ARCH_EXCEPT(reason_p) do { \
 	__asm__ volatile ( \
 		"cpsie i\n\t" \
 		"mov r0, %[reason]\n\t" \
@@ -54,7 +54,7 @@
 	CODE_UNREACHABLE; \
 } while (false)
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
-#define _ARCH_EXCEPT(reason_p) do { \
+#define Z_ARCH_EXCEPT(reason_p) do { \
 	__asm__ volatile ( \
 		"eors.n r0, r0\n\t" \
 		"msr BASEPRI, r0\n\t" \
diff --git a/include/arch/arm/cortex_m/exc.h b/include/arch/arm/cortex_m/exc.h
index 23e4341..87ce548 100644
--- a/include/arch/arm/cortex_m/exc.h
+++ b/include/arch/arm/cortex_m/exc.h
@@ -19,7 +19,7 @@
 #endif
 
 /* for assembler, only works with constants */
-#define _EXC_PRIO(pri) (((pri) << (8 - DT_NUM_IRQ_PRIO_BITS)) & 0xff)
+#define Z_EXC_PRIO(pri) (((pri) << (8 - DT_NUM_IRQ_PRIO_BITS)) & 0xff)
 
 #if defined(CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS)
 #define _EXCEPTION_RESERVED_PRIO 1
@@ -37,10 +37,10 @@
 #define _IRQ_PRIO_OFFSET (_EXCEPTION_RESERVED_PRIO)
 #endif
 
-#define _EXC_IRQ_DEFAULT_PRIO _EXC_PRIO(_IRQ_PRIO_OFFSET)
+#define _EXC_IRQ_DEFAULT_PRIO Z_EXC_PRIO(_IRQ_PRIO_OFFSET)
 
 #ifdef _ASMLANGUAGE
-GTEXT(_ExcExit);
+GTEXT(z_ExcExit);
 #else
 #include <zephyr/types.h>
 
@@ -62,7 +62,7 @@
 
 typedef struct __esf NANO_ESF;
 
-extern void _ExcExit(void);
+extern void z_ExcExit(void);
 
 /**
  * @brief display the contents of a exception stack frame
diff --git a/include/arch/arm/cortex_m/irq.h b/include/arch/arm/cortex_m/irq.h
index 6b92423..3ae46f9 100644
--- a/include/arch/arm/cortex_m/irq.h
+++ b/include/arch/arm/cortex_m/irq.h
@@ -24,13 +24,13 @@
 
 #ifdef _ASMLANGUAGE
 GTEXT(_IntExit);
-GTEXT(_arch_irq_enable)
-GTEXT(_arch_irq_disable)
-GTEXT(_arch_irq_is_enabled)
+GTEXT(z_arch_irq_enable)
+GTEXT(z_arch_irq_disable)
+GTEXT(z_arch_irq_is_enabled)
 #else
-extern void _arch_irq_enable(unsigned int irq);
-extern void _arch_irq_disable(unsigned int irq);
-extern int _arch_irq_is_enabled(unsigned int irq);
+extern void z_arch_irq_enable(unsigned int irq);
+extern void z_arch_irq_disable(unsigned int irq);
+extern int z_arch_irq_is_enabled(unsigned int irq);
 
 extern void _IntExit(void);
 
@@ -43,7 +43,7 @@
 #define CONCAT(x, y) DO_CONCAT(x, y)
 
 /* internal routine documented in C file, needed by IRQ_CONNECT() macro */
-extern void _irq_priority_set(unsigned int irq, unsigned int prio,
+extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
 			      u32_t flags);
 
 
@@ -63,7 +63,7 @@
  *
  * All arguments must be computable by the compiler at build time.
  *
- * _ISR_DECLARE will populate the .intList section with the interrupt's
+ * Z_ISR_DECLARE will populate the .intList section with the interrupt's
  * parameters, which will then be used by gen_irq_tables.py to create
  * the vector table and the software ISR table. This is all done at
  * build-time.
@@ -79,10 +79,10 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
@@ -93,25 +93,25 @@
  * See include/irq.h for details.
  * All arguments must be computable at build time.
  */
-#define _ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
+#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
-	_irq_priority_set(irq_p, priority_p, flags_p); \
+	Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
+	z_irq_priority_set(irq_p, priority_p, flags_p); \
 	irq_p; \
 })
 
 /* FIXME prefer these inline, but see GH-3056 */
 #ifdef CONFIG_SYS_POWER_MANAGEMENT
 extern void _arch_isr_direct_pm(void);
-#define _ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm()
+#define Z_ARCH_ISR_DIRECT_PM() _arch_isr_direct_pm()
 #else
-#define _ARCH_ISR_DIRECT_PM() do { } while (false)
+#define Z_ARCH_ISR_DIRECT_PM() do { } while (false)
 #endif
 
-#define _ARCH_ISR_DIRECT_HEADER() _arch_isr_direct_header()
-extern void _arch_isr_direct_header(void);
+#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header()
+extern void z_arch_isr_direct_header(void);
 
-#define _ARCH_ISR_DIRECT_FOOTER(swap) _arch_isr_direct_footer(swap)
+#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap)
 
 /* arch/arm/core/exc_exit.S */
 extern void _IntExit(void);
@@ -120,7 +120,7 @@
 extern void z_sys_trace_isr_exit(void);
 #endif
 
-static inline void _arch_isr_direct_footer(int maybe_swap)
+static inline void z_arch_isr_direct_footer(int maybe_swap)
 {
 
 #ifdef CONFIG_TRACING
@@ -131,7 +131,7 @@
 	}
 }
 
-#define _ARCH_ISR_DIRECT_DECLARE(name) \
+#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
 	static inline int name##_body(void); \
 	__attribute__ ((interrupt ("IRQ"))) void name(void) \
 	{ \
@@ -143,7 +143,7 @@
 	static inline int name##_body(void)
 
 /* Spurious interrupt handler. Throws an error if called */
-extern void _irq_spurious(void *unused);
+extern void z_irq_spurious(void *unused);
 
 #ifdef CONFIG_GEN_SW_ISR_TABLE
 /* Architecture-specific common entry point for interrupts from the vector
diff --git a/include/arch/arm/cortex_m/misc.h b/include/arch/arm/cortex_m/misc.h
index 76945a1..82cedab 100644
--- a/include/arch/arm/cortex_m/misc.h
+++ b/include/arch/arm/cortex_m/misc.h
@@ -21,8 +21,8 @@
 #ifndef _ASMLANGUAGE
 extern void k_cpu_idle(void);
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()	_timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()	z_timer_cycle_get_32()
 
 /**
  * @brief Explicitly nop operation.
diff --git a/include/arch/arm/cortex_m/mpu/arm_core_mpu.h b/include/arch/arm/cortex_m/mpu/arm_core_mpu.h
index 2e668a6..83b1ebe 100644
--- a/include/arch/arm/cortex_m/mpu/arm_core_mpu.h
+++ b/include/arch/arm/cortex_m/mpu/arm_core_mpu.h
@@ -20,7 +20,7 @@
  * MPU driver. The function is meant to be invoked once,
  * during system initialization.
  */
-void _arch_configure_static_mpu_regions(void);
+void z_arch_configure_static_mpu_regions(void);
 
 /**
  * @brief Use the HW-specific MPU driver to program
@@ -33,7 +33,7 @@
  *
  * @param thread pointer to the current k_thread context
  */
-void _arch_configure_dynamic_mpu_regions(struct k_thread *thread);
+void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread);
 
 #ifdef __cplusplus
 }
diff --git a/include/arch/arm/cortex_m/nmi.h b/include/arch/arm/cortex_m/nmi.h
index 696deba..ec718ab 100644
--- a/include/arch/arm/cortex_m/nmi.h
+++ b/include/arch/arm/cortex_m/nmi.h
@@ -15,8 +15,8 @@
 
 #ifndef _ASMLANGUAGE
 #ifdef CONFIG_RUNTIME_NMI
-extern void _NmiInit(void);
-#define NMI_INIT() _NmiInit()
+extern void z_NmiInit(void);
+#define NMI_INIT() z_NmiInit()
 #else
 #define NMI_INIT()
 #endif
diff --git a/include/arch/arm/syscall.h b/include/arch/arm/syscall.h
index c240962..d774a4a 100644
--- a/include/arch/arm/syscall.h
+++ b/include/arch/arm/syscall.h
@@ -34,7 +34,7 @@
 /* Syscall invocation macros. arm-specific machine constraints used to ensure
  * args land in the proper registers.
  */
-static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t arg6,
 					  u32_t call_id)
 {
@@ -56,7 +56,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
@@ -76,7 +76,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
@@ -95,7 +95,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
@@ -112,7 +112,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
+static inline u32_t z_arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
 	register u32_t r1 __asm__("r1") = arg2;
@@ -127,7 +127,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
+static inline u32_t z_arch_syscall_invoke1(u32_t arg1, u32_t call_id)
 {
 	register u32_t ret __asm__("r0") = arg1;
 	register u32_t r6 __asm__("r6") = call_id;
@@ -140,7 +140,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke0(u32_t call_id)
+static inline u32_t z_arch_syscall_invoke0(u32_t call_id)
 {
 	register u32_t ret __asm__("r0");
 	register u32_t r6 __asm__("r6") = call_id;
@@ -154,7 +154,7 @@
 	return ret;
 }
 
-static inline bool _arch_is_user_context(void)
+static inline bool z_arch_is_user_context(void)
 {
 	u32_t value;
 
diff --git a/include/arch/nios2/arch.h b/include/arch/nios2/arch.h
index 9dd0d4e..3f9f3be 100644
--- a/include/arch/nios2/arch.h
+++ b/include/arch/nios2/arch.h
@@ -69,15 +69,15 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
+	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
 	irq_p; \
 })
 
-extern void _irq_spurious(void *unused);
+extern void z_irq_spurious(void *unused);
 
-static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
+static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
 {
 	unsigned int key, tmp;
 
@@ -92,7 +92,7 @@
 	return key;
 }
 
-static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
+static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
 {
 	/* If the CPU is built without certain features, then
 	 * the only writable bit in the status register is PIE
@@ -124,8 +124,8 @@
 #endif
 }
 
-void _arch_irq_enable(unsigned int irq);
-void _arch_irq_disable(unsigned int irq);
+void z_arch_irq_enable(unsigned int irq);
+void z_arch_irq_disable(unsigned int irq);
 
 struct __esf {
 	u32_t ra; /* return address r31 */
@@ -151,10 +151,10 @@
 typedef struct __esf NANO_ESF;
 extern const NANO_ESF _default_esf;
 
-FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
 					 const NANO_ESF *esf);
 
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *esf);
 
 enum nios2_exception_cause {
@@ -198,8 +198,8 @@
 	 BIT(NIOS2_EXCEPTION_ECC_DATA_ERR))
 
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()	_timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()	z_timer_cycle_get_32()
 
 /**
  * @brief Explicitly nop operation.
diff --git a/include/arch/nios2/nios2.h b/include/arch/nios2/nios2.h
index 037d93e..d4a1e34 100644
--- a/include/arch/nios2/nios2.h
+++ b/include/arch/nios2/nios2.h
@@ -155,17 +155,18 @@
 #define _nios2_creg_read(reg) __builtin_rdctl(reg)
 #define _nios2_creg_write(reg, val) __builtin_wrctl(reg, val)
 
-#define _nios2_get_register_address(base, regnum) \
+#define z_nios2_get_register_address(base, regnum) \
 	((void *)(((u8_t *)base) + ((regnum) * (SYSTEM_BUS_WIDTH / 8))))
 
 static inline void _nios2_reg_write(void *base, int regnum, u32_t data)
 {
-	sys_write32(data, (mm_reg_t)_nios2_get_register_address(base, regnum));
+	sys_write32(data,
+		    (mm_reg_t)z_nios2_get_register_address(base, regnum));
 }
 
 static inline u32_t _nios2_reg_read(void *base, int regnum)
 {
-	return sys_read32((mm_reg_t)_nios2_get_register_address(base, regnum));
+	return sys_read32((mm_reg_t)z_nios2_get_register_address(base, regnum));
 }
 
 #endif /* _ASMLANGUAGE */
diff --git a/include/arch/posix/arch.h b/include/arch/posix/arch.h
index 48f96a0..60bf659 100644
--- a/include/arch/posix/arch.h
+++ b/include/arch/posix/arch.h
@@ -45,13 +45,13 @@
 typedef struct __esf NANO_ESF;
 extern const NANO_ESF _default_esf;
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()  _timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()  z_timer_cycle_get_32()
 
-FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
 					 const NANO_ESF *esf);
 
-FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 					  const NANO_ESF *esf);
 
 /**
diff --git a/include/arch/riscv32/arch.h b/include/arch/riscv32/arch.h
index 9e1f0fa..fb9f501 100644
--- a/include/arch/riscv32/arch.h
+++ b/include/arch/riscv32/arch.h
@@ -50,10 +50,10 @@
  */
 extern u32_t __soc_get_irq(void);
 
-void _arch_irq_enable(unsigned int irq);
-void _arch_irq_disable(unsigned int irq);
-int _arch_irq_is_enabled(unsigned int irq);
-void _irq_spurious(void *unused);
+void z_arch_irq_enable(unsigned int irq);
+void z_arch_irq_disable(unsigned int irq);
+int z_arch_irq_is_enabled(unsigned int irq);
+void z_irq_spurious(void *unused);
 
 
 /**
@@ -70,16 +70,16 @@
  * @return The vector assigned to this interrupt
  */
 #if defined(CONFIG_RISCV_HAS_PLIC)
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
+	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
 	riscv_plic_set_priority(irq_p, priority_p); \
 	irq_p; \
 })
 #else
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
+	Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
 	irq_p; \
 })
 #endif
@@ -88,7 +88,7 @@
  * use atomic instruction csrrc to lock global irq
  * csrrc: atomic read and clear bits in CSR register
  */
-static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
+static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
 {
 	unsigned int key, mstatus;
 
@@ -105,7 +105,7 @@
  * use atomic instruction csrrs to unlock global irq
  * csrrs: atomic read and set bits in CSR register
  */
-static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
+static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
 {
 	unsigned int mstatus;
 
@@ -124,8 +124,8 @@
 }
 
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()	_timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()	z_timer_cycle_get_32()
 
 #endif /*_ASMLANGUAGE */
 
diff --git a/include/arch/riscv32/exp.h b/include/arch/riscv32/exp.h
index 7d91e2b..4ad586b 100644
--- a/include/arch/riscv32/exp.h
+++ b/include/arch/riscv32/exp.h
@@ -77,9 +77,9 @@
 #endif
 extern const NANO_ESF _default_esf;
 
-extern FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+extern FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 						 const NANO_ESF *esf);
-extern void _SysFatalErrorHandler(unsigned int reason,
+extern void z_SysFatalErrorHandler(unsigned int reason,
 				  const NANO_ESF *esf);
 
 #endif /* _ASMLANGUAGE */
diff --git a/include/arch/x86/arch.h b/include/arch/x86/arch.h
index e5c07cd..c3b71ec 100644
--- a/include/arch/x86/arch.h
+++ b/include/arch/x86/arch.h
@@ -52,11 +52,11 @@
 #ifndef _ASMLANGUAGE
 
 #ifdef CONFIG_INT_LATENCY_BENCHMARK
-void _int_latency_start(void);
-void _int_latency_stop(void);
+void z_int_latency_start(void);
+void z_int_latency_stop(void);
 #else
-#define _int_latency_start()  do { } while (false)
-#define _int_latency_stop()   do { } while (false)
+#define z_int_latency_start()  do { } while (false)
+#define z_int_latency_stop()   do { } while (false)
 #endif
 
 /* interrupt/exception/error related definitions */
@@ -175,7 +175,7 @@
  * These macros are only intended to be used by IRQ_CONNECT() macro.
  */
 #if CONFIG_X86_FIXED_IRQ_MAPPING
-#define _VECTOR_ARG(irq_p)	_IRQ_CONTROLLER_VECTOR_MAPPING(irq_p)
+#define _VECTOR_ARG(irq_p)	Z_IRQ_CONTROLLER_VECTOR_MAPPING(irq_p)
 #else
 #define _VECTOR_ARG(irq_p)	(-1)
 #endif /* CONFIG_X86_FIXED_IRQ_MAPPING */
@@ -200,7 +200,7 @@
  * 3. The IRQ stub pushes the ISR routine and its argument onto the stack
  * and then jumps to the common interrupt handling code in _interrupt_enter().
  *
- * 4. _irq_controller_irq_config() is called at runtime to set the mapping
+ * 4. z_irq_controller_irq_config() is called at runtime to set the mapping
  * between the vector and the IRQ line as well as triggering flags
  *
  * @param irq_p IRQ line number
@@ -211,7 +211,7 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
 	__asm__ __volatile__(							\
 		".pushsection .intList\n\t" \
@@ -235,9 +235,9 @@
 		  [priority] "i" (priority_p), \
 		  [vector] "i" _VECTOR_ARG(irq_p), \
 		  [irq] "i" (irq_p)); \
-	_irq_controller_irq_config(_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \
+	z_irq_controller_irq_config(Z_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \
 				   (flags_p)); \
-	_IRQ_TO_INTERRUPT_VECTOR(irq_p); \
+	Z_IRQ_TO_INTERRUPT_VECTOR(irq_p); \
 })
 
 /** Configure a 'direct' static interrupt
@@ -245,12 +245,12 @@
  * All arguments must be computable by the compiler at build time
  *
  */
-#define _ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
+#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
 ({ \
 	NANO_CPU_INT_REGISTER(isr_p, irq_p, priority_p, -1, 0); \
-	_irq_controller_irq_config(_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \
+	z_irq_controller_irq_config(Z_IRQ_TO_INTERRUPT_VECTOR(irq_p), (irq_p), \
 				   (flags_p)); \
-	_IRQ_TO_INTERRUPT_VECTOR(irq_p); \
+	Z_IRQ_TO_INTERRUPT_VECTOR(irq_p); \
 })
 
 
@@ -258,7 +258,7 @@
 /* Fixed vector-to-irq association mapping.
  * No need for the table at all.
  */
-#define _IRQ_TO_INTERRUPT_VECTOR(irq) _IRQ_CONTROLLER_VECTOR_MAPPING(irq)
+#define Z_IRQ_TO_INTERRUPT_VECTOR(irq) Z_IRQ_CONTROLLER_VECTOR_MAPPING(irq)
 #else
 /**
  * @brief Convert a statically connected IRQ to its interrupt vector number
@@ -266,25 +266,25 @@
  * @param irq IRQ number
  */
 extern unsigned char _irq_to_interrupt_vector[];
-#define _IRQ_TO_INTERRUPT_VECTOR(irq)                       \
+#define Z_IRQ_TO_INTERRUPT_VECTOR(irq)                       \
 			((unsigned int) _irq_to_interrupt_vector[irq])
 #endif
 
 #ifdef CONFIG_SYS_POWER_MANAGEMENT
-extern void _arch_irq_direct_pm(void);
-#define _ARCH_ISR_DIRECT_PM() _arch_irq_direct_pm()
+extern void z_arch_irq_direct_pm(void);
+#define Z_ARCH_ISR_DIRECT_PM() z_arch_irq_direct_pm()
 #else
-#define _ARCH_ISR_DIRECT_PM() do { } while (false)
+#define Z_ARCH_ISR_DIRECT_PM() do { } while (false)
 #endif
 
-#define _ARCH_ISR_DIRECT_HEADER() _arch_isr_direct_header()
-#define _ARCH_ISR_DIRECT_FOOTER(swap) _arch_isr_direct_footer(swap)
+#define Z_ARCH_ISR_DIRECT_HEADER() z_arch_isr_direct_header()
+#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap)
 
 /* FIXME prefer these inline, but see GH-3056 */
-extern void _arch_isr_direct_header(void);
-extern void _arch_isr_direct_footer(int maybe_swap);
+extern void z_arch_isr_direct_header(void);
+extern void z_arch_isr_direct_footer(int maybe_swap);
 
-#define _ARCH_ISR_DIRECT_DECLARE(name) \
+#define Z_ARCH_ISR_DIRECT_DECLARE(name) \
 	static inline int name##_body(void); \
 	__attribute__ ((interrupt)) void name(void *stack_frame) \
 	{ \
@@ -363,8 +363,8 @@
 #endif /* !_ASMLANGUAGE */
 
 /*
- * Reason codes passed to both _NanoFatalErrorHandler()
- * and _SysFatalErrorHandler().
+ * Reason codes passed to both z_NanoFatalErrorHandler()
+ * and z_SysFatalErrorHandler().
  */
 
 /** Unhandled exception/interrupt */
@@ -418,11 +418,11 @@
  *
  */
 
-static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
+static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
 {
 	unsigned int key = _do_irq_lock();
 
-	_int_latency_start();
+	z_int_latency_start();
 
 	return key;
 }
@@ -442,15 +442,15 @@
  *
  */
 
-static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
+static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
 {
 	if (!(key & 0x200)) {
 		return;
 	}
 
-	_int_latency_stop();
+	z_int_latency_stop();
 
-	_do_irq_unlock();
+	z_do_irq_unlock();
 }
 
 /**
@@ -473,12 +473,12 @@
  * @brief Enable a specific IRQ
  * @param irq IRQ
  */
-extern void	_arch_irq_enable(unsigned int irq);
+extern void	z_arch_irq_enable(unsigned int irq);
 /**
  * @brief Disable a specific IRQ
  * @param irq IRQ
  */
-extern void	_arch_irq_disable(unsigned int irq);
+extern void	z_arch_irq_disable(unsigned int irq);
 
 /**
  * @defgroup float_apis Floating Point APIs
@@ -501,7 +501,7 @@
  *
  * Invoking this routine initializes the thread's floating point context info
  * to that of an FPU that has been reset. The next time the thread is scheduled
- * by _Swap() it will either inherit an FPU that is guaranteed to be in a "sane"
+ * by z_swap() it will either inherit an FPU that is guaranteed to be in a "sane"
  * state (if the most recent user of the FPU was cooperatively swapped out)
  * or the thread's own floating point context will be loaded (if the most
  * recent user of the FPU was preempted, or if this thread is the first user
@@ -543,15 +543,15 @@
 
 extern void	k_cpu_idle(void);
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()	_timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()	z_timer_cycle_get_32()
 
 /** kernel provided routine to report any detected fatal error. */
-extern FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+extern FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
 						 const NANO_ESF * pEsf);
 
 /** User provided routine to handle any detected fatal error post reporting. */
-extern FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
+extern FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
 						const NANO_ESF * pEsf);
 
 
@@ -606,33 +606,33 @@
 #define _STACK_SIZE_ALIGN	1
 #endif
 
-#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
+#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(_STACK_BASE_ALIGN) \
 		sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + _STACK_GUARD_SIZE]
 
-#define _ARCH_THREAD_STACK_LEN(size) \
+#define Z_ARCH_THREAD_STACK_LEN(size) \
 		(ROUND_UP((size), \
 			  MAX(_STACK_BASE_ALIGN, _STACK_SIZE_ALIGN)) + \
 		_STACK_GUARD_SIZE)
 
-#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
+#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
 	struct _k_thread_stack_element __noinit \
 		__aligned(_STACK_BASE_ALIGN) \
-		sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
+		sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
 
-#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
+#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
 	struct _k_thread_stack_element __aligned(_STACK_BASE_ALIGN) \
 		sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + _STACK_GUARD_SIZE]
 
-#define _ARCH_THREAD_STACK_SIZEOF(sym) \
+#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
 	(sizeof(sym) - _STACK_GUARD_SIZE)
 
-#define _ARCH_THREAD_STACK_BUFFER(sym) \
+#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
 	((char *)((sym) + _STACK_GUARD_SIZE))
 
 #if CONFIG_X86_KERNEL_OOPS
-#define _ARCH_EXCEPT(reason_p) do { \
+#define Z_ARCH_EXCEPT(reason_p) do { \
 	__asm__ volatile( \
 		"push %[reason]\n\t" \
 		"int %[vector]\n\t" \
@@ -666,7 +666,7 @@
  * @param pde_flags Output parameter for page directory entry flags
  * @param pte_flags Output parameter for page table entry flags
  */
-void _x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
+void z_x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
 			x86_page_entry_data_t *pde_flags,
 			x86_page_entry_data_t *pte_flags);
 
@@ -684,7 +684,7 @@
  * @param mask Mask indicating which particular bits in the page table entries to
  *	 modify
  */
-void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
+void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
 			size_t size,
 			x86_page_entry_data_t flags,
 			x86_page_entry_data_t mask);
diff --git a/include/arch/x86/asm_inline_gcc.h b/include/arch/x86/asm_inline_gcc.h
index 3420dbe..8851d22 100644
--- a/include/arch/x86/asm_inline_gcc.h
+++ b/include/arch/x86/asm_inline_gcc.h
@@ -68,7 +68,7 @@
  * @return N/A
  */
 
-static ALWAYS_INLINE void _do_irq_unlock(void)
+static ALWAYS_INLINE void z_do_irq_unlock(void)
 {
 	__asm__ volatile (
 		"sti;\n\t"
@@ -180,7 +180,7 @@
  *  @brief read timestamp register ensuring serialization
  */
 
-static inline u64_t _tsc_read(void)
+static inline u64_t z_tsc_read(void)
 {
 	union {
 		struct  {
diff --git a/include/arch/x86/irq_controller.h b/include/arch/x86/irq_controller.h
index 80edca6..baf15c5 100644
--- a/include/arch/x86/irq_controller.h
+++ b/include/arch/x86/irq_controller.h
@@ -42,7 +42,7 @@
  * @param irq Interrupt line
  * @return Vector this interrupt has been assigned to
  */
-#define _IRQ_CONTROLLER_VECTOR_MAPPING(irq) \
+#define Z_IRQ_CONTROLLER_VECTOR_MAPPING(irq) \
 	__IRQ_CONTROLLER_VECTOR_MAPPING(irq)
 #endif
 
@@ -63,7 +63,7 @@
  *
  * @returns: N/A
  */
-static inline void _irq_controller_irq_config(unsigned int vector,
+static inline void z_irq_controller_irq_config(unsigned int vector,
 					      unsigned int irq, u32_t flags)
 {
 	__irq_controller_irq_config(vector, irq, flags);
diff --git a/include/arch/x86/segmentation.h b/include/arch/x86/segmentation.h
index 0ab50c4..c3b0fe6 100644
--- a/include/arch/x86/segmentation.h
+++ b/include/arch/x86/segmentation.h
@@ -388,7 +388,7 @@
  * @param offset Offset within segment
  * @param segment_selector Segment selector
  */
-static inline void _sd_set_seg_offset(struct segment_descriptor *sd,
+static inline void z_sd_set_seg_offset(struct segment_descriptor *sd,
 				      u16_t segment_selector,
 				      u32_t offset)
 {
@@ -411,7 +411,7 @@
 				  u16_t seg_selector, u32_t offset,
 				  u32_t dpl)
 {
-	_sd_set_seg_offset(sd, seg_selector, offset);
+	z_sd_set_seg_offset(sd, seg_selector, offset);
 	sd->dpl = dpl;
 	sd->descriptor_type = DT_TYPE_SYSTEM;
 	sd->present = 1;
diff --git a/include/arch/x86/syscall.h b/include/arch/x86/syscall.h
index 3a6f8e2..898669b 100644
--- a/include/arch/x86/syscall.h
+++ b/include/arch/x86/syscall.h
@@ -36,7 +36,7 @@
  * the entry stub clobbers EDX and ECX on IAMCU systems
  */
 
-static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t arg6,
 					  u32_t call_id)
 {
@@ -57,7 +57,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t call_id)
 {
 	u32_t ret;
@@ -73,7 +73,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t call_id)
 {
 	u32_t ret;
@@ -89,7 +89,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t call_id)
 {
 	u32_t ret;
@@ -104,7 +104,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
+static inline u32_t z_arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
 {
 	u32_t ret;
 
@@ -122,7 +122,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
+static inline u32_t z_arch_syscall_invoke1(u32_t arg1, u32_t call_id)
 {
 	u32_t ret;
 
@@ -137,7 +137,7 @@
 	return ret;
 }
 
-static inline u32_t _arch_syscall_invoke0(u32_t call_id)
+static inline u32_t z_arch_syscall_invoke0(u32_t call_id)
 {
 	u32_t ret;
 
@@ -152,7 +152,7 @@
 	return ret;
 }
 
-static inline bool _arch_is_user_context(void)
+static inline bool z_arch_is_user_context(void)
 {
 	int cs;
 
diff --git a/include/arch/x86_64/arch.h b/include/arch/x86_64/arch.h
index f3b52ae..4c3330a 100644
--- a/include/arch/x86_64/arch.h
+++ b/include/arch/x86_64/arch.h
@@ -13,8 +13,8 @@
 
 typedef struct NANO_ESF NANO_ESF;
 extern const NANO_ESF _default_esf;
-void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
-void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
+void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
+void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
 
 /* Existing code requires only these particular symbols be defined,
  * but doesn't put them in a global header.  Needs cleaner
diff --git a/include/arch/xtensa/arch.h b/include/arch/xtensa/arch.h
index 021afcc..9da0f0b 100644
--- a/include/arch/xtensa/arch.h
+++ b/include/arch/xtensa/arch.h
@@ -78,7 +78,7 @@
 }
 
 /* internal routine documented in C file, needed by IRQ_CONNECT() macro */
-extern void _irq_priority_set(u32_t irq, u32_t prio, u32_t flags);
+extern void z_irq_priority_set(u32_t irq, u32_t prio, u32_t flags);
 
 
 /**
@@ -100,7 +100,7 @@
  * spurious IRQ handler) with what was supplied here.
  *
  * 3. The priority level for the interrupt is configured by a call to
- * _irq_priority_set()
+ * z_irq_priority_set()
  *
  * @param irq_p IRQ line number
  * @param priority_p Interrupt priority
@@ -110,14 +110,14 @@
  *
  * @return The vector assigned to this interrupt
  */
-#define _ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
+#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
 ({ \
-	_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
+	Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
 	irq_p; \
 })
 
 /* Spurious interrupt handler. Throws an error if called */
-extern void _irq_spurious(void *unused);
+extern void z_irq_spurious(void *unused);
 
 #ifdef CONFIG_XTENSA_ASM2
 #define XTENSA_ERR_NORET /**/
@@ -125,14 +125,14 @@
 #define XTENSA_ERR_NORET FUNC_NORETURN
 #endif
 
-XTENSA_ERR_NORET void _SysFatalErrorHandler(unsigned int reason,
+XTENSA_ERR_NORET void z_SysFatalErrorHandler(unsigned int reason,
 					    const NANO_ESF *esf);
 
-XTENSA_ERR_NORET void _NanoFatalErrorHandler(unsigned int reason,
+XTENSA_ERR_NORET void z_NanoFatalErrorHandler(unsigned int reason,
 					     const NANO_ESF *pEsf);
 
-extern u32_t _timer_cycle_get_32(void);
-#define _arch_k_cycle_get_32()	_timer_cycle_get_32()
+extern u32_t z_timer_cycle_get_32(void);
+#define z_arch_k_cycle_get_32()	z_timer_cycle_get_32()
 
 /**
  * @brief Explicitly nop operation.
diff --git a/include/arch/xtensa/xtensa_irq.h b/include/arch/xtensa/xtensa_irq.h
index bed1953..b49c61b 100644
--- a/include/arch/xtensa/xtensa_irq.h
+++ b/include/arch/xtensa/xtensa_irq.h
@@ -21,15 +21,15 @@
 			CONFIG_NUM_3RD_LEVEL_AGGREGATORS) *\
 			CONFIG_MAX_IRQ_PER_AGGREGATOR)
 
-#define _arch_irq_enable(irq)	_soc_irq_enable(irq)
-#define _arch_irq_disable(irq)	_soc_irq_disable(irq)
+#define z_arch_irq_enable(irq)	z_soc_irq_enable(irq)
+#define z_arch_irq_disable(irq)	z_soc_irq_disable(irq)
 
 #else
 
 #define CONFIG_NUM_IRQS XCHAL_NUM_INTERRUPTS
 
-#define _arch_irq_enable(irq)	_xtensa_irq_enable(irq)
-#define _arch_irq_disable(irq)	_xtensa_irq_disable(irq)
+#define z_arch_irq_enable(irq)	z_xtensa_irq_enable(irq)
+#define z_arch_irq_disable(irq)	z_xtensa_irq_disable(irq)
 
 #endif
 
@@ -43,9 +43,9 @@
  *
  * @return N/A
  */
-static ALWAYS_INLINE void _xtensa_irq_enable(u32_t irq)
+static ALWAYS_INLINE void z_xtensa_irq_enable(u32_t irq)
 {
-	_xt_ints_on(1 << irq);
+	z_xt_ints_on(1 << irq);
 }
 
 /**
@@ -57,18 +57,18 @@
  *
  * @return N/A
  */
-static ALWAYS_INLINE void _xtensa_irq_disable(u32_t irq)
+static ALWAYS_INLINE void z_xtensa_irq_disable(u32_t irq)
 {
-	_xt_ints_off(1 << irq);
+	z_xt_ints_off(1 << irq);
 }
 
-static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
+static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
 {
 	unsigned int key = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
 	return key;
 }
 
-static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
+static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
 {
 	XTOS_RESTORE_INTLEVEL(key);
 }
diff --git a/include/can.h b/include/can.h
index d8e35bf..576d022 100644
--- a/include/can.h
+++ b/include/can.h
@@ -265,7 +265,7 @@
 __syscall int can_send(struct device *dev, const struct zcan_frame *msg,
 		       s32_t timeout, can_tx_callback_t callback_isr);
 
-static inline int _impl_can_send(struct device *dev,
+static inline int z_impl_can_send(struct device *dev,
 				 const struct zcan_frame *msg,
 				 s32_t timeout, can_tx_callback_t callback_isr)
 {
@@ -340,7 +340,7 @@
 __syscall int can_attach_msgq(struct device *dev, struct k_msgq *msg_q,
 			      const struct zcan_filter *filter);
 
-static inline int _impl_can_attach_msgq(struct device *dev,
+static inline int z_impl_can_attach_msgq(struct device *dev,
 					struct k_msgq *msg_q,
 					const struct zcan_filter *filter)
 {
@@ -389,7 +389,7 @@
  */
 __syscall void can_detach(struct device *dev, int filter_id);
 
-static inline void _impl_can_detach(struct device *dev, int filter_id)
+static inline void z_impl_can_detach(struct device *dev, int filter_id)
 {
 	const struct can_driver_api *api = dev->driver_api;
 
@@ -409,7 +409,7 @@
 __syscall int can_configure(struct device *dev, enum can_mode mode,
 			    u32_t bitrate);
 
-static inline int _impl_can_configure(struct device *dev, enum can_mode mode,
+static inline int z_impl_can_configure(struct device *dev, enum can_mode mode,
 				      u32_t bitrate)
 {
 	const struct can_driver_api *api = dev->driver_api;
diff --git a/include/counter.h b/include/counter.h
index 2ddaf06..3d48ba5 100644
--- a/include/counter.h
+++ b/include/counter.h
@@ -215,7 +215,7 @@
  */
 __syscall int counter_start(struct device *dev);
 
-static inline int _impl_counter_start(struct device *dev)
+static inline int z_impl_counter_start(struct device *dev)
 {
 	const struct counter_driver_api *api = dev->driver_api;
 
@@ -233,7 +233,7 @@
  */
 __syscall int counter_stop(struct device *dev);
 
-static inline int _impl_counter_stop(struct device *dev)
+static inline int z_impl_counter_stop(struct device *dev)
 {
 	const struct counter_driver_api *api = dev->driver_api;
 
@@ -248,7 +248,7 @@
  */
 __syscall u32_t counter_read(struct device *dev);
 
-static inline u32_t _impl_counter_read(struct device *dev)
+static inline u32_t z_impl_counter_read(struct device *dev)
 {
 	const struct counter_driver_api *api = dev->driver_api;
 
@@ -356,7 +356,7 @@
  */
 __syscall int counter_get_pending_int(struct device *dev);
 
-static inline int _impl_counter_get_pending_int(struct device *dev)
+static inline int z_impl_counter_get_pending_int(struct device *dev)
 {
 	const struct counter_driver_api *api = dev->driver_api;
 
@@ -372,7 +372,7 @@
  */
 __syscall u32_t counter_get_top_value(struct device *dev);
 
-static inline u32_t _impl_counter_get_top_value(struct device *dev)
+static inline u32_t z_impl_counter_get_top_value(struct device *dev)
 {
 	const struct counter_driver_api *api = dev->driver_api;
 
@@ -389,7 +389,7 @@
  */
 __syscall u32_t counter_get_max_relative_alarm(struct device *dev);
 
-static inline u32_t _impl_counter_get_max_relative_alarm(struct device *dev)
+static inline u32_t z_impl_counter_get_max_relative_alarm(struct device *dev)
 {
 	const struct counter_driver_api *api = dev->driver_api;
 
diff --git a/include/device.h b/include/device.h
index 4b2262e..f5ec7a3 100644
--- a/include/device.h
+++ b/include/device.h
@@ -247,7 +247,7 @@
 #endif
 };
 
-void _sys_device_do_config_level(s32_t level);
+void z_sys_device_do_config_level(s32_t level);
 
 /**
  * @brief Retrieve the device structure for a driver by name
diff --git a/include/dma.h b/include/dma.h
index 1ec8814..86e5673 100644
--- a/include/dma.h
+++ b/include/dma.h
@@ -239,7 +239,7 @@
  */
 __syscall int dma_start(struct device *dev, u32_t channel);
 
-static inline int _impl_dma_start(struct device *dev, u32_t channel)
+static inline int z_impl_dma_start(struct device *dev, u32_t channel)
 {
 	const struct dma_driver_api *api =
 		(const struct dma_driver_api *)dev->driver_api;
@@ -262,7 +262,7 @@
  */
 __syscall int dma_stop(struct device *dev, u32_t channel);
 
-static inline int _impl_dma_stop(struct device *dev, u32_t channel)
+static inline int z_impl_dma_stop(struct device *dev, u32_t channel)
 {
 	const struct dma_driver_api *api =
 		(const struct dma_driver_api *)dev->driver_api;
diff --git a/include/drivers/ioapic.h b/include/drivers/ioapic.h
index 25ad507..94ff0bb 100644
--- a/include/drivers/ioapic.h
+++ b/include/drivers/ioapic.h
@@ -35,10 +35,10 @@
 #define IOAPIC_EXTINT 0x00000700
 
 #ifndef _ASMLANGUAGE
-void _ioapic_irq_enable(unsigned int irq);
-void _ioapic_irq_disable(unsigned int irq);
-void _ioapic_int_vec_set(unsigned int irq, unsigned int vector);
-void _ioapic_irq_set(unsigned int irq, unsigned int vector, u32_t flags);
+void z_ioapic_irq_enable(unsigned int irq);
+void z_ioapic_irq_disable(unsigned int irq);
+void z_ioapic_int_vec_set(unsigned int irq, unsigned int vector);
+void z_ioapic_irq_set(unsigned int irq, unsigned int vector, u32_t flags);
 #endif /* _ASMLANGUAGE */
 
 #ifdef __cplusplus
diff --git a/include/drivers/loapic.h b/include/drivers/loapic.h
index b6965e2..949cd5c 100644
--- a/include/drivers/loapic.h
+++ b/include/drivers/loapic.h
@@ -46,12 +46,12 @@
 
 #ifndef _ASMLANGUAGE
 
-extern void _loapic_int_vec_set(unsigned int irq, unsigned int vector);
-extern void _loapic_irq_enable(unsigned int irq);
-extern void _loapic_irq_disable(unsigned int irq);
+extern void z_loapic_int_vec_set(unsigned int irq, unsigned int vector);
+extern void z_loapic_irq_enable(unsigned int irq);
+extern void z_loapic_irq_disable(unsigned int irq);
 
 #if CONFIG_EOI_FORWARDING_BUG
-extern void _lakemont_eoi(void);
+extern void z_lakemont_eoi(void);
 #endif
 
 #endif /* _ASMLANGUAGE */
diff --git a/include/drivers/sysapic.h b/include/drivers/sysapic.h
index 3c25e4f..afbeffa 100644
--- a/include/drivers/sysapic.h
+++ b/include/drivers/sysapic.h
@@ -28,13 +28,13 @@
 int __irq_controller_isr_vector_get(void);
 
 #ifdef CONFIG_JAILHOUSE_X2APIC
-void _jailhouse_eoi(void);
+void z_jailhouse_eoi(void);
 #endif
 
 static inline void __irq_controller_eoi(void)
 {
 #if CONFIG_EOI_FORWARDING_BUG
-	_lakemont_eoi();
+	z_lakemont_eoi();
 #else
 	*(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI) = 0;
 #endif
@@ -44,12 +44,12 @@
 
 #if CONFIG_EOI_FORWARDING_BUG
 .macro __irq_controller_eoi_macro
-	call	_lakemont_eoi
+	call	z_lakemont_eoi
 .endm
 #else
 .macro __irq_controller_eoi_macro
 #ifdef CONFIG_JAILHOUSE_X2APIC
-	call	_jailhouse_eoi
+	call	z_jailhouse_eoi
 #else
 	xorl %eax, %eax			/* zeroes eax */
 	loapic_eoi_reg = (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
diff --git a/include/entropy.h b/include/entropy.h
index 313c4b0..b1090c1 100644
--- a/include/entropy.h
+++ b/include/entropy.h
@@ -65,7 +65,7 @@
 				  u8_t *buffer,
 				  u16_t length);
 
-static inline int _impl_entropy_get_entropy(struct device *dev,
+static inline int z_impl_entropy_get_entropy(struct device *dev,
 					    u8_t *buffer,
 					    u16_t length)
 {
diff --git a/include/flash.h b/include/flash.h
index fa255f9..1986097 100644
--- a/include/flash.h
+++ b/include/flash.h
@@ -94,7 +94,7 @@
 __syscall int flash_read(struct device *dev, off_t offset, void *data,
 			 size_t len);
 
-static inline int _impl_flash_read(struct device *dev, off_t offset, void *data,
+static inline int z_impl_flash_read(struct device *dev, off_t offset, void *data,
 			     size_t len)
 {
 	const struct flash_driver_api *api = dev->driver_api;
@@ -118,7 +118,7 @@
 __syscall int flash_write(struct device *dev, off_t offset, const void *data,
 			  size_t len);
 
-static inline int _impl_flash_write(struct device *dev, off_t offset,
+static inline int z_impl_flash_write(struct device *dev, off_t offset,
 				    const void *data, size_t len)
 {
 	const struct flash_driver_api *api = dev->driver_api;
@@ -149,7 +149,7 @@
  */
 __syscall int flash_erase(struct device *dev, off_t offset, size_t size);
 
-static inline int _impl_flash_erase(struct device *dev, off_t offset,
+static inline int z_impl_flash_erase(struct device *dev, off_t offset,
 				    size_t size)
 {
 	const struct flash_driver_api *api = dev->driver_api;
@@ -175,7 +175,7 @@
  */
 __syscall int flash_write_protection_set(struct device *dev, bool enable);
 
-static inline int _impl_flash_write_protection_set(struct device *dev,
+static inline int z_impl_flash_write_protection_set(struct device *dev,
 						   bool enable)
 {
 	const struct flash_driver_api *api = dev->driver_api;
@@ -263,7 +263,7 @@
  */
 __syscall size_t flash_get_write_block_size(struct device *dev);
 
-static inline size_t _impl_flash_get_write_block_size(struct device *dev)
+static inline size_t z_impl_flash_get_write_block_size(struct device *dev)
 {
 	const struct flash_driver_api *api = dev->driver_api;
 
diff --git a/include/gpio.h b/include/gpio.h
index f38105c..4507f2e 100644
--- a/include/gpio.h
+++ b/include/gpio.h
@@ -123,7 +123,7 @@
 __syscall int gpio_config(struct device *port, int access_op, u32_t pin,
 			  int flags);
 
-static inline int _impl_gpio_config(struct device *port, int access_op,
+static inline int z_impl_gpio_config(struct device *port, int access_op,
 				    u32_t pin, int flags)
 {
 	const struct gpio_driver_api *api =
@@ -135,7 +135,7 @@
 __syscall int gpio_write(struct device *port, int access_op, u32_t pin,
 			 u32_t value);
 
-static inline int _impl_gpio_write(struct device *port, int access_op,
+static inline int z_impl_gpio_write(struct device *port, int access_op,
 				   u32_t pin, u32_t value)
 {
 	const struct gpio_driver_api *api =
@@ -147,7 +147,7 @@
 __syscall int gpio_read(struct device *port, int access_op, u32_t pin,
 			u32_t *value);
 
-static inline int _impl_gpio_read(struct device *port, int access_op,
+static inline int z_impl_gpio_read(struct device *port, int access_op,
 				  u32_t pin, u32_t *value)
 {
 	const struct gpio_driver_api *api =
@@ -159,7 +159,7 @@
 __syscall int gpio_enable_callback(struct device *port, int access_op,
 				   u32_t pin);
 
-static inline int _impl_gpio_enable_callback(struct device *port,
+static inline int z_impl_gpio_enable_callback(struct device *port,
 					     int access_op, u32_t pin)
 {
 	const struct gpio_driver_api *api =
@@ -175,7 +175,7 @@
 __syscall int gpio_disable_callback(struct device *port, int access_op,
 				    u32_t pin);
 
-static inline int _impl_gpio_disable_callback(struct device *port,
+static inline int z_impl_gpio_disable_callback(struct device *port,
 					      int access_op, u32_t pin)
 {
 	const struct gpio_driver_api *api =
@@ -423,7 +423,7 @@
 /**
  * @internal
  */
-static inline int _impl_gpio_get_pending_int(struct device *dev)
+static inline int z_impl_gpio_get_pending_int(struct device *dev)
 {
 	const struct gpio_driver_api *api =
 		(const struct gpio_driver_api *)dev->driver_api;
diff --git a/include/hwinfo.h b/include/hwinfo.h
index a558cf4..7f9e94c 100644
--- a/include/hwinfo.h
+++ b/include/hwinfo.h
@@ -43,7 +43,7 @@
  */
 __syscall ssize_t hwinfo_get_device_id(u8_t *buffer, size_t length);
 
-ssize_t _impl_hwinfo_get_device_id(u8_t *buffer, size_t length);
+ssize_t z_impl_hwinfo_get_device_id(u8_t *buffer, size_t length);
 
 /**
  * @}
diff --git a/include/i2c.h b/include/i2c.h
index 820d00e..cfb6998 100644
--- a/include/i2c.h
+++ b/include/i2c.h
@@ -204,7 +204,7 @@
  */
 __syscall int i2c_configure(struct device *dev, u32_t dev_config);
 
-static inline int _impl_i2c_configure(struct device *dev, u32_t dev_config)
+static inline int z_impl_i2c_configure(struct device *dev, u32_t dev_config)
 {
 	const struct i2c_driver_api *api =
 		(const struct i2c_driver_api *)dev->driver_api;
@@ -242,7 +242,7 @@
 			   struct i2c_msg *msgs, u8_t num_msgs,
 			   u16_t addr);
 
-static inline int _impl_i2c_transfer(struct device *dev,
+static inline int z_impl_i2c_transfer(struct device *dev,
 				     struct i2c_msg *msgs, u8_t num_msgs,
 				     u16_t addr)
 {
@@ -278,7 +278,7 @@
 __syscall int i2c_slave_register(struct device *dev,
 				 struct i2c_slave_config *cfg);
 
-static inline int _impl_i2c_slave_register(struct device *dev,
+static inline int z_impl_i2c_slave_register(struct device *dev,
 					   struct i2c_slave_config *cfg)
 {
 	const struct i2c_driver_api *api =
@@ -309,7 +309,7 @@
 __syscall int i2c_slave_unregister(struct device *dev,
 				   struct i2c_slave_config *cfg);
 
-static inline int _impl_i2c_slave_unregister(struct device *dev,
+static inline int z_impl_i2c_slave_unregister(struct device *dev,
 					     struct i2c_slave_config *cfg)
 {
 	const struct i2c_driver_api *api =
@@ -336,7 +336,7 @@
  */
 __syscall int i2c_slave_driver_register(struct device *dev);
 
-static inline int _impl_i2c_slave_driver_register(struct device *dev)
+static inline int z_impl_i2c_slave_driver_register(struct device *dev)
 {
 	const struct i2c_slave_driver_api *api =
 		(const struct i2c_slave_driver_api *)dev->driver_api;
@@ -358,7 +358,7 @@
  */
 __syscall int i2c_slave_driver_unregister(struct device *dev);
 
-static inline int _impl_i2c_slave_driver_unregister(struct device *dev)
+static inline int z_impl_i2c_slave_driver_unregister(struct device *dev)
 {
 	const struct i2c_slave_driver_api *api =
 		(const struct i2c_slave_driver_api *)dev->driver_api;
diff --git a/include/i2s.h b/include/i2s.h
index ee027c7..a04c033 100644
--- a/include/i2s.h
+++ b/include/i2s.h
@@ -349,7 +349,7 @@
 __syscall int i2s_configure(struct device *dev, enum i2s_dir dir,
 			    struct i2s_config *cfg);
 
-static inline int _impl_i2s_configure(struct device *dev, enum i2s_dir dir,
+static inline int z_impl_i2s_configure(struct device *dev, enum i2s_dir dir,
 				      struct i2s_config *cfg)
 {
 	const struct i2s_driver_api *api = dev->driver_api;
@@ -508,7 +508,7 @@
 __syscall int i2s_trigger(struct device *dev, enum i2s_dir dir,
 			  enum i2s_trigger_cmd cmd);
 
-static inline int _impl_i2s_trigger(struct device *dev, enum i2s_dir dir,
+static inline int z_impl_i2s_trigger(struct device *dev, enum i2s_dir dir,
 				    enum i2s_trigger_cmd cmd)
 {
 	const struct i2s_driver_api *api = dev->driver_api;
diff --git a/include/init.h b/include/init.h
index ae4c7a8..07408f0 100644
--- a/include/init.h
+++ b/include/init.h
@@ -30,7 +30,7 @@
 /* A counter is used to avoid issues when two or more system devices
  * are declared in the same C file with the same init function.
  */
-#define _SYS_NAME(init_fn) _CONCAT(_CONCAT(sys_init_, init_fn), __COUNTER__)
+#define Z_SYS_NAME(init_fn) _CONCAT(_CONCAT(sys_init_, init_fn), __COUNTER__)
 
 /**
  * @def SYS_INIT
@@ -47,7 +47,7 @@
  * DEVICE_AND_API_INIT for details.
  */
 #define SYS_INIT(init_fn, level, prio) \
-	DEVICE_AND_API_INIT(_SYS_NAME(init_fn), "", init_fn, NULL, NULL, level,\
+	DEVICE_AND_API_INIT(Z_SYS_NAME(init_fn), "", init_fn, NULL, NULL, level,\
 	prio, NULL)
 
 /**
@@ -68,7 +68,7 @@
  * 	       DEVICE_INIT for details.
  */
 #define SYS_DEVICE_DEFINE(drv_name, init_fn, pm_control_fn, level, prio) \
-	DEVICE_DEFINE(_SYS_NAME(init_fn), drv_name, init_fn, pm_control_fn, \
+	DEVICE_DEFINE(Z_SYS_NAME(init_fn), drv_name, init_fn, pm_control_fn, \
 		      NULL, NULL, level, prio, NULL)
 
 #ifdef __cplusplus
diff --git a/include/ipm.h b/include/ipm.h
index 04d8544..56bbfda 100644
--- a/include/ipm.h
+++ b/include/ipm.h
@@ -131,7 +131,7 @@
 __syscall int ipm_send(struct device *ipmdev, int wait, u32_t id,
 		       const void *data, int size);
 
-static inline int _impl_ipm_send(struct device *ipmdev, int wait, u32_t id,
+static inline int z_impl_ipm_send(struct device *ipmdev, int wait, u32_t id,
 			   const void *data, int size)
 {
 	const struct ipm_driver_api *api = ipmdev->driver_api;
@@ -167,7 +167,7 @@
  */
 __syscall int ipm_max_data_size_get(struct device *ipmdev);
 
-static inline int _impl_ipm_max_data_size_get(struct device *ipmdev)
+static inline int z_impl_ipm_max_data_size_get(struct device *ipmdev)
 {
 	const struct ipm_driver_api *api = ipmdev->driver_api;
 
@@ -187,7 +187,7 @@
  */
 __syscall u32_t ipm_max_id_val_get(struct device *ipmdev);
 
-static inline u32_t _impl_ipm_max_id_val_get(struct device *ipmdev)
+static inline u32_t z_impl_ipm_max_id_val_get(struct device *ipmdev)
 {
 	const struct ipm_driver_api *api = ipmdev->driver_api;
 
@@ -205,7 +205,7 @@
  */
 __syscall int ipm_set_enabled(struct device *ipmdev, int enable);
 
-static inline int _impl_ipm_set_enabled(struct device *ipmdev, int enable)
+static inline int z_impl_ipm_set_enabled(struct device *ipmdev, int enable)
 {
 	const struct ipm_driver_api *api = ipmdev->driver_api;
 
diff --git a/include/irq.h b/include/irq.h
index dd3cd16..43e9a31 100644
--- a/include/irq.h
+++ b/include/irq.h
@@ -48,7 +48,7 @@
  * @return Interrupt vector assigned to this interrupt.
  */
 #define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
-	_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
+	Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
 
 /**
  * Configure a dynamic interrupt.
@@ -63,7 +63,7 @@
  *
  * @return The vector assigned to this interrupt
  */
-extern int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+extern int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
 			     void (*routine)(void *parameter), void *parameter,
 			     u32_t flags);
 
@@ -72,7 +72,7 @@
 		    void (*routine)(void *parameter), void *parameter,
 		    u32_t flags)
 {
-	return _arch_irq_connect_dynamic(irq, priority, routine, parameter, flags);
+	return z_arch_irq_connect_dynamic(irq, priority, routine, parameter, flags);
 }
 
 /**
@@ -116,7 +116,7 @@
  * @return Interrupt vector assigned to this interrupt.
  */
 #define IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
-	_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
+	Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
 
 /**
  * @brief Common tasks before executing the body of an ISR
@@ -125,7 +125,7 @@
  * minimal architecture-specific tasks before the ISR itself can run. It takes
  * no arguments and has no return value.
  */
-#define ISR_DIRECT_HEADER() _ARCH_ISR_DIRECT_HEADER()
+#define ISR_DIRECT_HEADER() Z_ARCH_ISR_DIRECT_HEADER()
 
 /**
  * @brief Common tasks before exiting the body of an ISR
@@ -134,16 +134,16 @@
  * minimal architecture-specific tasks like EOI. It has no return value.
  *
  * In a normal interrupt, a check is done at end of interrupt to invoke
- * _Swap() logic if the current thread is preemptible and there is another
+ * z_swap() logic if the current thread is preemptible and there is another
  * thread ready to run in the kernel's ready queue cache. This is now optional
  * and controlled by the check_reschedule argument. If unsure, set to nonzero.
  * On systems that do stack switching and nested interrupt tracking in software,
- * _Swap() should only be called if this was a non-nested interrupt.
+ * z_swap() should only be called if this was a non-nested interrupt.
  *
  * @param check_reschedule If nonzero, additionally invoke scheduling logic
  */
 #define ISR_DIRECT_FOOTER(check_reschedule) \
-	_ARCH_ISR_DIRECT_FOOTER(check_reschedule)
+	Z_ARCH_ISR_DIRECT_FOOTER(check_reschedule)
 
 /**
  * @brief Perform power management idle exit logic
@@ -153,7 +153,7 @@
  * exit power management idle state. It takes no parameters and returns no
  * arguments. It may be omitted, but be careful!
  */
-#define ISR_DIRECT_PM() _ARCH_ISR_DIRECT_PM()
+#define ISR_DIRECT_PM() Z_ARCH_ISR_DIRECT_PM()
 
 /**
  * @brief Helper macro to declare a direct interrupt service routine.
@@ -175,7 +175,7 @@
  *	bool done = do_stuff();
  *	ISR_DIRECT_PM(); <-- done after do_stuff() due to latency concerns
  *	if (!done) {
- *		return 0;  <-- Don't bother checking if we have to _Swap()
+ *		return 0;  <-- Don't bother checking if we have to z_swap()
  *	}
  *	k_sem_give(some_sem);
  *	return 1;
@@ -183,7 +183,7 @@
  *
  * @param name symbol name of the ISR
  */
-#define ISR_DIRECT_DECLARE(name) _ARCH_ISR_DIRECT_DECLARE(name)
+#define ISR_DIRECT_DECLARE(name) Z_ARCH_ISR_DIRECT_DECLARE(name)
 
 /**
  * @brief Lock interrupts.
@@ -217,10 +217,10 @@
  * @return Lock-out key.
  */
 #ifdef CONFIG_SMP
-unsigned int _smp_global_lock(void);
-#define irq_lock() _smp_global_lock()
+unsigned int z_smp_global_lock(void);
+#define irq_lock() z_smp_global_lock()
 #else
-#define irq_lock() _arch_irq_lock()
+#define irq_lock() z_arch_irq_lock()
 #endif
 
 /**
@@ -238,10 +238,10 @@
  * @return N/A
  */
 #ifdef CONFIG_SMP
-void _smp_global_unlock(unsigned int key);
-#define irq_unlock(key) _smp_global_unlock(key)
+void z_smp_global_unlock(unsigned int key);
+#define irq_unlock(key) z_smp_global_unlock(key)
 #else
-#define irq_unlock(key) _arch_irq_unlock(key)
+#define irq_unlock(key) z_arch_irq_unlock(key)
 #endif
 
 /**
@@ -253,7 +253,7 @@
  *
  * @return N/A
  */
-#define irq_enable(irq) _arch_irq_enable(irq)
+#define irq_enable(irq) z_arch_irq_enable(irq)
 
 /**
  * @brief Disable an IRQ.
@@ -264,7 +264,7 @@
  *
  * @return N/A
  */
-#define irq_disable(irq) _arch_irq_disable(irq)
+#define irq_disable(irq) z_arch_irq_disable(irq)
 
 /**
  * @brief Get IRQ enable state.
@@ -275,7 +275,7 @@
  *
  * @return interrupt enable state, true or false
  */
-#define irq_is_enabled(irq) _arch_irq_is_enabled(irq)
+#define irq_is_enabled(irq) z_arch_irq_is_enabled(irq)
 
 /**
  * @}
diff --git a/include/kernel.h b/include/kernel.h
index f447a79..22e03e2 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -81,9 +81,9 @@
 	struct _priq_rb waitq;
 } _wait_q_t;
 
-extern bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
+extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
 
-#define _WAIT_Q_INIT(wait_q) { { { .lessthan_fn = _priq_rb_lessthan } } }
+#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
 
 #else
 
@@ -91,7 +91,7 @@
 	sys_dlist_t waitq;
 } _wait_q_t;
 
-#define _WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
+#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
 
 #endif
 
@@ -157,7 +157,7 @@
 
 #ifdef CONFIG_USERSPACE
 /* Table generated by gperf, these objects are retrieved via
- * _k_object_find() */
+ * z_object_find() */
 struct _k_object {
 	char *name;
 	u8_t perms[CONFIG_MAX_THREAD_BYTES];
@@ -205,7 +205,7 @@
  *
  * @param obj Address of the kernel object
  */
-void _k_object_init(void *obj);
+void z_object_init(void *obj);
 #else
 
 #define K_THREAD_ACCESS_GRANT(thread, ...)
@@ -213,7 +213,7 @@
 /**
  * @internal
  */
-static inline void _k_object_init(void *obj)
+static inline void z_object_init(void *obj)
 {
 	ARG_UNUSED(obj);
 }
@@ -221,7 +221,7 @@
 /**
  * @internal
  */
-static inline void _impl_k_object_access_grant(void *object,
+static inline void z_impl_k_object_access_grant(void *object,
 					       struct k_thread *thread)
 {
 	ARG_UNUSED(object);
@@ -241,7 +241,7 @@
 /**
  * @internal
  */
-static inline void _impl_k_object_release(void *object)
+static inline void z_impl_k_object_release(void *object)
 {
 	ARG_UNUSED(object);
 }
@@ -326,7 +326,7 @@
  */
 void k_object_free(void *obj);
 #else
-static inline void *_impl_k_object_alloc(enum k_objects otype)
+static inline void *z_impl_k_object_alloc(enum k_objects otype)
 {
 	ARG_UNUSED(otype);
 
@@ -573,7 +573,7 @@
 	 * become part of the core OS
 	 */
 
-	/** _Swap() return value */
+	/** z_swap() return value */
 	int swap_retval;
 
 	/** Context handle returned via _arch_switch() */
@@ -1392,9 +1392,9 @@
 	.timeout = { \
 		.node = {},\
 		.dticks = 0, \
-		.fn = _timer_expiration_handler \
+		.fn = z_timer_expiration_handler \
 	}, \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
 	.expiry_fn = expiry, \
 	.stop_fn = stop, \
 	.period = 0, \
@@ -1559,7 +1559,7 @@
  */
 __syscall u32_t k_timer_remaining_get(struct k_timer *timer);
 
-static inline u32_t _impl_k_timer_remaining_get(struct k_timer *timer)
+static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer)
 {
 	const s32_t ticks = z_timeout_remaining(&timer->timeout);
 	return (ticks > 0) ? (u32_t)__ticks_to_ms(ticks) : 0U;
@@ -1584,7 +1584,7 @@
 /**
  * @internal
  */
-static inline void _impl_k_timer_user_data_set(struct k_timer *timer,
+static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
 					       void *user_data)
 {
 	timer->user_data = user_data;
@@ -1599,7 +1599,7 @@
  */
 __syscall void *k_timer_user_data_get(struct k_timer *timer);
 
-static inline void *_impl_k_timer_user_data_get(struct k_timer *timer)
+static inline void *z_impl_k_timer_user_data_get(struct k_timer *timer)
 {
 	return timer->user_data;
 }
@@ -1721,7 +1721,7 @@
  *
  * @return Current hardware clock up-counter (in cycles).
  */
-#define k_cycle_get_32()	_arch_k_cycle_get_32()
+#define k_cycle_get_32()	z_arch_k_cycle_get_32()
 
 /**
  * @}
@@ -1746,7 +1746,7 @@
 #define _K_QUEUE_INITIALIZER(obj) \
 	{ \
 	.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
 	_POLL_EVENT_OBJ_INIT(obj) \
 	_OBJECT_TRACING_INIT \
 	}
@@ -1989,7 +1989,7 @@
  */
 __syscall int k_queue_is_empty(struct k_queue *queue);
 
-static inline int _impl_k_queue_is_empty(struct k_queue *queue)
+static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
 {
 	return (int)sys_sflist_is_empty(&queue->data_q);
 }
@@ -2005,7 +2005,7 @@
  */
 __syscall void *k_queue_peek_head(struct k_queue *queue);
 
-static inline void *_impl_k_queue_peek_head(struct k_queue *queue)
+static inline void *z_impl_k_queue_peek_head(struct k_queue *queue)
 {
 	return z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
 }
@@ -2021,7 +2021,7 @@
  */
 __syscall void *k_queue_peek_tail(struct k_queue *queue);
 
-static inline void *_impl_k_queue_peek_tail(struct k_queue *queue)
+static inline void *z_impl_k_queue_peek_tail(struct k_queue *queue)
 {
 	return z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
 }
@@ -2384,7 +2384,7 @@
 
 #define _K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
 	{ \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q),	\
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),	\
 	.base = stack_buffer, \
 	.next = stack_buffer, \
 	.top = stack_buffer + stack_num_entries, \
@@ -2558,14 +2558,14 @@
  * INTERNAL_HIDDEN @endcond
  */
 
-#define _K_WORK_INITIALIZER(work_handler) \
+#define Z_WORK_INITIALIZER(work_handler) \
 	{ \
 	._reserved = NULL, \
 	.handler = work_handler, \
 	.flags = { 0 } \
 	}
 
-#define K_WORK_INITIALIZER DEPRECATED_MACRO _K_WORK_INITIALIZER
+#define K_WORK_INITIALIZER DEPRECATED_MACRO Z_WORK_INITIALIZER
 
 /**
  * @brief Initialize a statically-defined work item.
@@ -2580,7 +2580,7 @@
  * @req K-WORK-002
  */
 #define K_WORK_DEFINE(work, work_handler) \
-	struct k_work work = _K_WORK_INITIALIZER(work_handler)
+	struct k_work work = Z_WORK_INITIALIZER(work_handler)
 
 /**
  * @brief Initialize a work item.
@@ -2595,7 +2595,7 @@
  */
 static inline void k_work_init(struct k_work *work, k_work_handler_t handler)
 {
-	*work = (struct k_work)_K_WORK_INITIALIZER(handler);
+	*work = (struct k_work)Z_WORK_INITIALIZER(handler);
 }
 
 /**
@@ -2910,7 +2910,7 @@
  */
 #define _K_MUTEX_INITIALIZER(obj) \
 	{ \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
 	.owner = NULL, \
 	.lock_count = 0, \
 	.owner_orig_prio = K_LOWEST_THREAD_PRIO, \
@@ -3009,7 +3009,7 @@
 
 #define _K_SEM_INITIALIZER(obj, initial_count, count_limit) \
 	{ \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
 	.count = initial_count, \
 	.limit = count_limit, \
 	_POLL_EVENT_OBJ_INIT(obj) \
@@ -3097,7 +3097,7 @@
 /**
  * @internal
  */
-static inline void _impl_k_sem_reset(struct k_sem *sem)
+static inline void z_impl_k_sem_reset(struct k_sem *sem)
 {
 	sem->count = 0;
 }
@@ -3117,7 +3117,7 @@
 /**
  * @internal
  */
-static inline unsigned int _impl_k_sem_count_get(struct k_sem *sem)
+static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
 {
 	return sem->count;
 }
@@ -3173,7 +3173,7 @@
 
 #define _K_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
 	{ \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
 	.max_msgs = q_max_msgs, \
 	.msg_size = q_msg_size, \
 	.buffer_start = q_buffer, \
@@ -3375,7 +3375,7 @@
 __syscall void  k_msgq_get_attrs(struct k_msgq *q, struct k_msgq_attrs *attrs);
 
 
-static inline u32_t _impl_k_msgq_num_free_get(struct k_msgq *q)
+static inline u32_t z_impl_k_msgq_num_free_get(struct k_msgq *q)
 {
 	return q->max_msgs - q->used_msgs;
 }
@@ -3392,7 +3392,7 @@
  */
 __syscall u32_t k_msgq_num_used_get(struct k_msgq *q);
 
-static inline u32_t _impl_k_msgq_num_used_get(struct k_msgq *q)
+static inline u32_t z_impl_k_msgq_num_used_get(struct k_msgq *q)
 {
 	return q->used_msgs;
 }
@@ -3467,8 +3467,8 @@
 
 #define _K_MBOX_INITIALIZER(obj) \
 	{ \
-	.tx_msg_queue = _WAIT_Q_INIT(&obj.tx_msg_queue), \
-	.rx_msg_queue = _WAIT_Q_INIT(&obj.rx_msg_queue), \
+	.tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
+	.rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
 	_OBJECT_TRACING_INIT \
 	}
 
@@ -3662,8 +3662,8 @@
 	.write_index = 0,                                           \
 	.lock = {},                                                 \
 	.wait_q = {                                                 \
-		.readers = _WAIT_Q_INIT(&obj.wait_q.readers),       \
-		.writers = _WAIT_Q_INIT(&obj.wait_q.writers)        \
+		.readers = Z_WAIT_Q_INIT(&obj.wait_q.readers),       \
+		.writers = Z_WAIT_Q_INIT(&obj.wait_q.writers)        \
 	},                                                          \
 	_OBJECT_TRACING_INIT                                        \
 	.flags = 0                                                  \
@@ -3828,7 +3828,7 @@
 #define _K_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
 			       slab_num_blocks) \
 	{ \
-	.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
 	.num_blocks = slab_num_blocks, \
 	.block_size = slab_block_size, \
 	.buffer = slab_buffer, \
@@ -4009,13 +4009,13 @@
 #define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align)		\
 	char __aligned(align) _mpool_buf_##name[_ALIGN4(maxsz * nmax)	\
 				  + _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
-	struct sys_mem_pool_lvl _mpool_lvls_##name[_MPOOL_LVLS(maxsz, minsz)]; \
+	struct sys_mem_pool_lvl _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
 	struct k_mem_pool name __in_section(_k_mem_pool, static, name) = { \
 		.base = {						\
 			.buf = _mpool_buf_##name,			\
 			.max_sz = maxsz,				\
 			.n_max = nmax,					\
-			.n_levels = _MPOOL_LVLS(maxsz, minsz),		\
+			.n_levels = Z_MPOOL_LVLS(maxsz, minsz),		\
 			.levels = _mpool_lvls_##name,			\
 			.flags = SYS_MEM_POOL_KERNEL			\
 		} \
@@ -4166,7 +4166,7 @@
 	_POLL_NUM_TYPES
 };
 
-#define _POLL_TYPE_BIT(type) (1 << ((type) - 1))
+#define Z_POLL_TYPE_BIT(type) (1 << ((type) - 1))
 
 /* private - states bit positions */
 enum _poll_states_bits {
@@ -4188,7 +4188,7 @@
 	_POLL_NUM_STATES
 };
 
-#define _POLL_STATE_BIT(state) (1 << ((state) - 1))
+#define Z_POLL_STATE_BIT(state) (1 << ((state) - 1))
 
 #define _POLL_EVENT_NUM_UNUSED_BITS \
 	(32 - (0 \
@@ -4211,9 +4211,9 @@
 
 /* public - values for k_poll_event.type bitfield */
 #define K_POLL_TYPE_IGNORE 0
-#define K_POLL_TYPE_SIGNAL _POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
-#define K_POLL_TYPE_SEM_AVAILABLE _POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
-#define K_POLL_TYPE_DATA_AVAILABLE _POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
+#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
+#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
+#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
 #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
 
 /* public - polling modes */
@@ -4226,11 +4226,11 @@
 
 /* public - values for k_poll_event.state bitfield */
 #define K_POLL_STATE_NOT_READY 0
-#define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED)
-#define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
-#define K_POLL_STATE_DATA_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
+#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
+#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
+#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
 #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
-#define K_POLL_STATE_CANCELLED _POLL_STATE_BIT(_POLL_STATE_CANCELLED)
+#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
 
 /* public - poll signal object */
 struct k_poll_signal {
@@ -4396,7 +4396,7 @@
  */
 __syscall void k_poll_signal_reset(struct k_poll_signal *signal);
 
-static inline void _impl_k_poll_signal_reset(struct k_poll_signal *signal)
+static inline void z_impl_k_poll_signal_reset(struct k_poll_signal *signal)
 {
 	signal->signaled = 0;
 }
@@ -4441,7 +4441,7 @@
 /**
  * @internal
  */
-extern void _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
+extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state);
 
 /** @} */
 
@@ -4486,23 +4486,23 @@
 /**
  * @internal
  */
-extern void _sys_power_save_idle_exit(s32_t ticks);
+extern void z_sys_power_save_idle_exit(s32_t ticks);
 
-#ifdef _ARCH_EXCEPT
+#ifdef Z_ARCH_EXCEPT
 /* This archtecture has direct support for triggering a CPU exception */
-#define _k_except_reason(reason)	_ARCH_EXCEPT(reason)
+#define z_except_reason(reason)	Z_ARCH_EXCEPT(reason)
 #else
 
 /* NOTE: This is the implementation for arches that do not implement
- * _ARCH_EXCEPT() to generate a real CPU exception.
+ * Z_ARCH_EXCEPT() to generate a real CPU exception.
  *
  * We won't have a real exception frame to determine the PC value when
  * the oops occurred, so print file and line number before we jump into
  * the fatal error handler.
  */
-#define _k_except_reason(reason) do { \
+#define z_except_reason(reason) do { \
 		printk("@ %s:%d:\n", __FILE__,  __LINE__); \
-		_NanoFatalErrorHandler(reason, &_default_esf); \
+		z_NanoFatalErrorHandler(reason, &_default_esf); \
 		CODE_UNREACHABLE; \
 	} while (false)
 
@@ -4520,7 +4520,7 @@
  * will treat it as an unrecoverable system error, just like k_panic().
  * @req K-MISC-003
  */
-#define k_oops()	_k_except_reason(_NANO_ERR_KERNEL_OOPS)
+#define k_oops()	z_except_reason(_NANO_ERR_KERNEL_OOPS)
 
 /**
  * @brief Fatally terminate the system
@@ -4531,7 +4531,7 @@
  * will be called will reason code _NANO_ERR_KERNEL_PANIC.
  * @req K-MISC-004
  */
-#define k_panic()	_k_except_reason(_NANO_ERR_KERNEL_PANIC)
+#define k_panic()	z_except_reason(_NANO_ERR_KERNEL_PANIC)
 
 /*
  * private APIs that are utilized by one or more public APIs
@@ -4541,22 +4541,22 @@
 /**
  * @internal
  */
-extern void _init_static_threads(void);
+extern void z_init_static_threads(void);
 #else
 /**
  * @internal
  */
-#define _init_static_threads() do { } while (false)
+#define z_init_static_threads() do { } while (false)
 #endif
 
 /**
  * @internal
  */
-extern bool _is_thread_essential(void);
+extern bool z_is_thread_essential(void);
 /**
  * @internal
  */
-extern void _timer_expiration_handler(struct _timeout *t);
+extern void z_timer_expiration_handler(struct _timeout *t);
 
 /* arch/cpu.h may declare an architecture or platform-specific macro
  * for properly declaring stacks, compatible with MMU/MPU constraints if
@@ -4574,16 +4574,16 @@
  */
 #define K_THREAD_STACK_EXTERN(sym) extern k_thread_stack_t sym[]
 
-#ifdef _ARCH_THREAD_STACK_DEFINE
-#define K_THREAD_STACK_DEFINE(sym, size) _ARCH_THREAD_STACK_DEFINE(sym, size)
+#ifdef Z_ARCH_THREAD_STACK_DEFINE
+#define K_THREAD_STACK_DEFINE(sym, size) Z_ARCH_THREAD_STACK_DEFINE(sym, size)
 #define K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
-		_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
-#define K_THREAD_STACK_LEN(size) _ARCH_THREAD_STACK_LEN(size)
-#define K_THREAD_STACK_MEMBER(sym, size) _ARCH_THREAD_STACK_MEMBER(sym, size)
-#define K_THREAD_STACK_SIZEOF(sym) _ARCH_THREAD_STACK_SIZEOF(sym)
+		Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
+#define K_THREAD_STACK_LEN(size) Z_ARCH_THREAD_STACK_LEN(size)
+#define K_THREAD_STACK_MEMBER(sym, size) Z_ARCH_THREAD_STACK_MEMBER(sym, size)
+#define K_THREAD_STACK_SIZEOF(sym) Z_ARCH_THREAD_STACK_SIZEOF(sym)
 static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
 {
-	return _ARCH_THREAD_STACK_BUFFER(sym);
+	return Z_ARCH_THREAD_STACK_BUFFER(sym);
 }
 #else
 /**
@@ -4849,7 +4849,7 @@
  *        an irq_unlock() key.
  * @param arg Untyped argument to be passed to "fn"
  */
-extern void _arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
+extern void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
 			    void (*fn)(int key, void *data), void *arg);
 
 #ifdef __cplusplus
diff --git a/include/led.h b/include/led.h
index 6c1d140..6edf6f9 100644
--- a/include/led.h
+++ b/include/led.h
@@ -74,7 +74,7 @@
 __syscall int led_blink(struct device *dev, u32_t led,
 			    u32_t delay_on, u32_t delay_off);
 
-static inline int _impl_led_blink(struct device *dev, u32_t led,
+static inline int z_impl_led_blink(struct device *dev, u32_t led,
 			    u32_t delay_on, u32_t delay_off)
 {
 	const struct led_driver_api *api = dev->driver_api;
@@ -96,7 +96,7 @@
 __syscall int led_set_brightness(struct device *dev, u32_t led,
 				     u8_t value);
 
-static inline int _impl_led_set_brightness(struct device *dev, u32_t led,
+static inline int z_impl_led_set_brightness(struct device *dev, u32_t led,
 				     u8_t value)
 {
 	const struct led_driver_api *api = dev->driver_api;
@@ -115,7 +115,7 @@
  */
 __syscall int led_on(struct device *dev, u32_t led);
 
-static inline int _impl_led_on(struct device *dev, u32_t led)
+static inline int z_impl_led_on(struct device *dev, u32_t led)
 {
 	const struct led_driver_api *api = dev->driver_api;
 
@@ -133,7 +133,7 @@
  */
 __syscall int led_off(struct device *dev, u32_t led);
 
-static inline int _impl_led_off(struct device *dev, u32_t led)
+static inline int z_impl_led_off(struct device *dev, u32_t led)
 {
 	const struct led_driver_api *api = dev->driver_api;
 
diff --git a/include/linker/kobject-text.ld b/include/linker/kobject-text.ld
index df8b3e3..a733eeb 100644
--- a/include/linker/kobject-text.ld
+++ b/include/linker/kobject-text.ld
@@ -11,11 +11,11 @@
 	_kobject_text_area_end = .;
 #ifndef LINKER_PASS2
 #ifdef CONFIG_DYNAMIC_OBJECTS
-	PROVIDE(_k_object_gperf_find = .);
-	PROVIDE(_k_object_gperf_wordlist_foreach = .);
+	PROVIDE(z_object_gperf_find = .);
+	PROVIDE(z_object_gperf_wordlist_foreach = .);
 #else
-	PROVIDE(_k_object_find = .);
-	PROVIDE(_k_object_wordlist_foreach = .);
+	PROVIDE(z_object_find = .);
+	PROVIDE(z_object_wordlist_foreach = .);
 #endif
 #endif
 	. += CONFIG_KOBJECT_TEXT_AREA - (_kobject_text_area_end - _kobject_text_area_start);
diff --git a/include/linker/linker-defs.h b/include/linker/linker-defs.h
index b732234..f3a01fa 100644
--- a/include/linker/linker-defs.h
+++ b/include/linker/linker-defs.h
@@ -161,11 +161,11 @@
 extern char __kernel_ram_end[];
 extern char __kernel_ram_size[];
 
-/* Used by _bss_zero or arch-specific implementation */
+/* Used by z_bss_zero or arch-specific implementation */
 extern char __bss_start[];
 extern char __bss_end[];
 
-/* Used by _data_copy() or arch-specific implementation */
+/* Used by z_data_copy() or arch-specific implementation */
 #ifdef CONFIG_XIP
 extern char __data_rom_start[];
 extern char __data_ram_start[];
diff --git a/include/linker/section_tags.h b/include/linker/section_tags.h
index 784272d..14ca8b8 100644
--- a/include/linker/section_tags.h
+++ b/include/linker/section_tags.h
@@ -14,19 +14,19 @@
 #if !defined(_ASMLANGUAGE)
 
 #define __noinit		__in_section_unique(NOINIT)
-#define __irq_vector_table	_GENERIC_SECTION(IRQ_VECTOR_TABLE)
-#define __sw_isr_table		_GENERIC_SECTION(SW_ISR_TABLE)
+#define __irq_vector_table	Z_GENERIC_SECTION(IRQ_VECTOR_TABLE)
+#define __sw_isr_table		Z_GENERIC_SECTION(SW_ISR_TABLE)
 
 #if defined(CONFIG_ARM)
 #define __kinetis_flash_config_section __in_section_unique(KINETIS_FLASH_CONFIG)
-#define __ti_ccfg_section _GENERIC_SECTION(TI_CCFG)
-#define __ccm_data_section _GENERIC_SECTION(_CCM_DATA_SECTION_NAME)
-#define __ccm_bss_section _GENERIC_SECTION(_CCM_BSS_SECTION_NAME)
-#define __ccm_noinit_section _GENERIC_SECTION(_CCM_NOINIT_SECTION_NAME)
-#define __imx_boot_conf_section	_GENERIC_SECTION(IMX_BOOT_CONF)
-#define __imx_boot_data_section	_GENERIC_SECTION(IMX_BOOT_DATA)
-#define __imx_boot_ivt_section	_GENERIC_SECTION(IMX_BOOT_IVT)
-#define __imx_boot_dcd_section	_GENERIC_SECTION(IMX_BOOT_DCD)
+#define __ti_ccfg_section Z_GENERIC_SECTION(TI_CCFG)
+#define __ccm_data_section Z_GENERIC_SECTION(_CCM_DATA_SECTION_NAME)
+#define __ccm_bss_section Z_GENERIC_SECTION(_CCM_BSS_SECTION_NAME)
+#define __ccm_noinit_section Z_GENERIC_SECTION(_CCM_NOINIT_SECTION_NAME)
+#define __imx_boot_conf_section	Z_GENERIC_SECTION(IMX_BOOT_CONF)
+#define __imx_boot_data_section	Z_GENERIC_SECTION(IMX_BOOT_DATA)
+#define __imx_boot_ivt_section	Z_GENERIC_SECTION(IMX_BOOT_IVT)
+#define __imx_boot_dcd_section	Z_GENERIC_SECTION(IMX_BOOT_DCD)
 #endif /* CONFIG_ARM */
 
 #if defined(CONFIG_NOCACHE_MEMORY)
diff --git a/include/logging/log.h b/include/logging/log.h
index a3c0968..6a05af9 100644
--- a/include/logging/log.h
+++ b/include/logging/log.h
@@ -43,7 +43,7 @@
  * @param ... A string optionally containing printk valid conversion specifier,
  * followed by as many values as specifiers.
  */
-#define LOG_ERR(...)    _LOG(LOG_LEVEL_ERR, __VA_ARGS__)
+#define LOG_ERR(...)    Z_LOG(LOG_LEVEL_ERR, __VA_ARGS__)
 
 /**
  * @brief Writes a WARNING level message to the log.
@@ -54,7 +54,7 @@
  * @param ... A string optionally containing printk valid conversion specifier,
  * followed by as many values as specifiers.
  */
-#define LOG_WRN(...)   _LOG(LOG_LEVEL_WRN, __VA_ARGS__)
+#define LOG_WRN(...)   Z_LOG(LOG_LEVEL_WRN, __VA_ARGS__)
 
 /**
  * @brief Writes an INFO level message to the log.
@@ -64,7 +64,7 @@
  * @param ... A string optionally containing printk valid conversion specifier,
  * followed by as many values as specifiers.
  */
-#define LOG_INF(...)   _LOG(LOG_LEVEL_INF, __VA_ARGS__)
+#define LOG_INF(...)   Z_LOG(LOG_LEVEL_INF, __VA_ARGS__)
 
 /**
  * @brief Writes a DEBUG level message to the log.
@@ -74,7 +74,7 @@
  * @param ... A string optionally containing printk valid conversion specifier,
  * followed by as many values as specifiers.
  */
-#define LOG_DBG(...)    _LOG(LOG_LEVEL_DBG, __VA_ARGS__)
+#define LOG_DBG(...)    Z_LOG(LOG_LEVEL_DBG, __VA_ARGS__)
 
 /**
  * @brief Writes an ERROR level message associated with the instance to the log.
@@ -89,7 +89,7 @@
  * followed by as many values as specifiers.
  */
 #define LOG_INST_ERR(_log_inst, ...) \
-	_LOG_INSTANCE(LOG_LEVEL_ERR, _log_inst, __VA_ARGS__)
+	Z_LOG_INSTANCE(LOG_LEVEL_ERR, _log_inst, __VA_ARGS__)
 
 /**
  * @brief Writes a WARNING level message associated with the instance to the
@@ -105,7 +105,7 @@
  *                  specifier, followed by as many values as specifiers.
  */
 #define LOG_INST_WRN(_log_inst, ...) \
-	_LOG_INSTANCE(LOG_LEVEL_WRN, _log_inst, __VA_ARGS__)
+	Z_LOG_INSTANCE(LOG_LEVEL_WRN, _log_inst, __VA_ARGS__)
 
 /**
  * @brief Writes an INFO level message associated with the instance to the log.
@@ -120,7 +120,7 @@
  * followed by as many values as specifiers.
  */
 #define LOG_INST_INF(_log_inst, ...) \
-	_LOG_INSTANCE(LOG_LEVEL_INF, _log_inst, __VA_ARGS__)
+	Z_LOG_INSTANCE(LOG_LEVEL_INF, _log_inst, __VA_ARGS__)
 
 /**
  * @brief Writes a DEBUG level message associated with the instance to the log.
@@ -135,7 +135,7 @@
  * followed by as many values as specifiers.
  */
 #define LOG_INST_DBG(_log_inst, ...) \
-	_LOG_INSTANCE(LOG_LEVEL_DBG, _log_inst, __VA_ARGS__)
+	Z_LOG_INSTANCE(LOG_LEVEL_DBG, _log_inst, __VA_ARGS__)
 
 /**
  * @brief Writes an ERROR level hexdump message to the log.
@@ -148,7 +148,7 @@
  * @param _str    Persistent, raw string.
  */
 #define LOG_HEXDUMP_ERR(_data, _length, _str) \
-	_LOG_HEXDUMP(LOG_LEVEL_ERR, _data, _length, _str)
+	Z_LOG_HEXDUMP(LOG_LEVEL_ERR, _data, _length, _str)
 
 /**
  * @brief Writes a WARNING level message to the log.
@@ -161,7 +161,7 @@
  * @param _str    Persistent, raw string.
  */
 #define LOG_HEXDUMP_WRN(_data, _length, _str) \
-	_LOG_HEXDUMP(LOG_LEVEL_WRN, _data, _length, _str)
+	Z_LOG_HEXDUMP(LOG_LEVEL_WRN, _data, _length, _str)
 
 /**
  * @brief Writes an INFO level message to the log.
@@ -173,7 +173,7 @@
  * @param _str    Persistent, raw string.
  */
 #define LOG_HEXDUMP_INF(_data, _length, _str) \
-	_LOG_HEXDUMP(LOG_LEVEL_INF, _data, _length, _str)
+	Z_LOG_HEXDUMP(LOG_LEVEL_INF, _data, _length, _str)
 
 /**
  * @brief Writes a DEBUG level message to the log.
@@ -185,7 +185,7 @@
  * @param _str    Persistent, raw string.
  */
 #define LOG_HEXDUMP_DBG(_data, _length, _str) \
-	_LOG_HEXDUMP(LOG_LEVEL_DBG, _data, _length, _str)
+	Z_LOG_HEXDUMP(LOG_LEVEL_DBG, _data, _length, _str)
 
 /**
  * @brief Writes an ERROR hexdump message associated with the instance to the
@@ -202,7 +202,7 @@
  * @param _str        Persistent, raw string.
  */
 #define LOG_INST_HEXDUMP_ERR(_log_inst, _data, _length, _str) \
-	_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_ERR, _log_inst, _data, _length, _str)
+	Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_ERR, _log_inst, _data, _length, _str)
 
 /**
  * @brief Writes a WARNING level hexdump message associated with the instance to
@@ -217,7 +217,7 @@
  * @param _str        Persistent, raw string.
  */
 #define LOG_INST_HEXDUMP_WRN(_log_inst, _data, _length, _str) \
-	_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_WRN, _log_inst, _data, _length, _str)
+	Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_WRN, _log_inst, _data, _length, _str)
 
 /**
  * @brief Writes an INFO level hexdump message associated with the instance to
@@ -231,7 +231,7 @@
  * @param _str        Persistent, raw string.
  */
 #define LOG_INST_HEXDUMP_INF(_log_inst, _data, _length, _str) \
-	_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_INF, _log_inst, _data, _length, _str)
+	Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_INF, _log_inst, _data, _length, _str)
 
 /**
  * @brief Writes a DEBUG level hexdump message associated with the instance to
@@ -245,7 +245,7 @@
  * @param _str        Persistent, raw string.
  */
 #define LOG_INST_HEXDUMP_DBG(_log_inst, _data, _length, _str)	\
-	_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_DBG, _log_inst, _data, _length, _str)
+	Z_LOG_HEXDUMP_INSTANCE(LOG_LEVEL_DBG, _log_inst, _data, _length, _str)
 
 /**
  * @brief Writes an formatted string to the log.
@@ -284,7 +284,7 @@
 #define _LOG_LEVEL_RESOLVE(...) LOG_LEVEL_NONE
 #else
 #define _LOG_LEVEL_RESOLVE(...) \
-	_LOG_EVAL(LOG_LEVEL, \
+	Z_LOG_EVAL(LOG_LEVEL, \
 		  (GET_ARG2(__VA_ARGS__, LOG_LEVEL)), \
 		  (GET_ARG2(__VA_ARGS__, CONFIG_LOG_DEFAULT_LEVEL)))
 #endif
@@ -309,7 +309,7 @@
 	__attribute__((used))
 
 #define _LOG_MODULE_DYNAMIC_DATA_COND_CREATE(_name)		\
-	_LOG_EVAL(						\
+	Z_LOG_EVAL(						\
 		IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING),	\
 		(_LOG_MODULE_DYNAMIC_DATA_CREATE(_name);),	\
 		()						\
@@ -353,7 +353,7 @@
 
 
 #define LOG_MODULE_REGISTER(...)					\
-	_LOG_EVAL(							\
+	Z_LOG_EVAL(							\
 		_LOG_LEVEL_RESOLVE(__VA_ARGS__),			\
 		(_LOG_MODULE_DATA_CREATE(GET_ARG1(__VA_ARGS__),		\
 				      _LOG_LEVEL_RESOLVE(__VA_ARGS__))),\
diff --git a/include/logging/log_core.h b/include/logging/log_core.h
index c17c4a6..2cc1e95 100644
--- a/include/logging/log_core.h
+++ b/include/logging/log_core.h
@@ -35,10 +35,10 @@
  *
  * Check @ref IS_ENABLED macro for detailed explanation of the trick.
  */
-#define _LOG_RESOLVED_LEVEL(_level, _default) \
-	_LOG_RESOLVED_LEVEL1(_level, _default)
+#define Z_LOG_RESOLVED_LEVEL(_level, _default) \
+	Z_LOG_RESOLVED_LEVEL1(_level, _default)
 
-#define _LOG_RESOLVED_LEVEL1(_level, _default) \
+#define Z_LOG_RESOLVED_LEVEL1(_level, _default) \
 	__COND_CODE(_LOG_XXXX##_level, (_level), (_default))
 
 #define _LOG_XXXX0 _LOG_YYYY,
@@ -61,10 +61,10 @@
  * @param _iffalse    Code that should be inserted when evaluated to false.
  *		      Note, that parameter must be provided in brackets.
  */
-#define _LOG_EVAL(_eval_level, _iftrue, _iffalse) \
-	_LOG_EVAL1(_eval_level, _iftrue, _iffalse)
+#define Z_LOG_EVAL(_eval_level, _iftrue, _iffalse) \
+	Z_LOG_EVAL1(_eval_level, _iftrue, _iffalse)
 
-#define _LOG_EVAL1(_eval_level, _iftrue, _iffalse) \
+#define Z_LOG_EVAL1(_eval_level, _iftrue, _iffalse) \
 	__COND_CODE(_LOG_ZZZZ##_eval_level, _iftrue, _iffalse)
 
 #define _LOG_ZZZZ1 _LOG_YYYY,
@@ -77,7 +77,7 @@
  * It is evaluated to LOG_LEVEL if defined. Otherwise CONFIG_LOG_DEFAULT_LEVEL
  * is used.
  */
-#define _LOG_LEVEL() _LOG_RESOLVED_LEVEL(LOG_LEVEL, CONFIG_LOG_DEFAULT_LEVEL)
+#define _LOG_LEVEL() Z_LOG_RESOLVED_LEVEL(LOG_LEVEL, CONFIG_LOG_DEFAULT_LEVEL)
 
 /**
  *  @def LOG_CONST_ID_GET
@@ -86,7 +86,7 @@
  *  @param _addr Address of the element.
  */
 #define LOG_CONST_ID_GET(_addr) \
-	_LOG_EVAL(\
+	Z_LOG_EVAL(\
 	  CONFIG_LOG,\
 	  (__log_level ? \
 	  log_const_source_id((const struct log_source_const_data *)_addr) : \
@@ -113,7 +113,7 @@
  *  @param _addr Address of the element.
  */
 #define LOG_DYNAMIC_ID_GET(_addr) \
-	_LOG_EVAL(\
+	Z_LOG_EVAL(\
 	  CONFIG_LOG,\
 	  (__log_level ? \
 	  log_dynamic_source_id((struct log_source_dynamic_data *)_addr) : 0),\
@@ -131,7 +131,7 @@
  *	  used.
  */
 
-#define _LOG_STR(...) "%s: " GET_ARG1(__VA_ARGS__), __func__\
+#define Z_LOG_STR(...) "%s: " GET_ARG1(__VA_ARGS__), __func__\
 		COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
 			    (),\
 			    (, GET_ARGS_LESS_1(__VA_ARGS__))\
@@ -141,7 +141,7 @@
 /******************************************************************************/
 /****************** Internal macros for log frontend **************************/
 /******************************************************************************/
-/**@brief Second stage for _LOG_NARGS_POSTFIX */
+/**@brief Second stage for Z_LOG_NARGS_POSTFIX */
 #define _LOG_NARGS_POSTFIX_IMPL(				\
 	_ignored,						\
 	_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10,		\
@@ -155,18 +155,18 @@
  *
  * @retval  Postfix, number of arguments or _LONG when more than 3 arguments.
  */
-#define _LOG_NARGS_POSTFIX(...) \
+#define Z_LOG_NARGS_POSTFIX(...) \
 	_LOG_NARGS_POSTFIX_IMPL(__VA_ARGS__, LONG, LONG, LONG, LONG, LONG, \
 			LONG, LONG, LONG, LONG, LONG, LONG, LONG, 3, 2, 1, 0, ~)
 
-#define _LOG_INTERNAL_X(N, ...)  UTIL_CAT(_LOG_INTERNAL_, N)(__VA_ARGS__)
+#define Z_LOG_INTERNAL_X(N, ...)  UTIL_CAT(_LOG_INTERNAL_, N)(__VA_ARGS__)
 
 #define __LOG_INTERNAL(_src_level, ...)					 \
 	do {								 \
 		if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {		 \
 			log_string_sync(_src_level, __VA_ARGS__);	 \
 		} else {						 \
-			_LOG_INTERNAL_X(_LOG_NARGS_POSTFIX(__VA_ARGS__), \
+			Z_LOG_INTERNAL_X(Z_LOG_NARGS_POSTFIX(__VA_ARGS__), \
 						_src_level, __VA_ARGS__);\
 		}							 \
 	} while (false)
@@ -193,13 +193,12 @@
 		log_n(_str, args, ARRAY_SIZE(args), _src_level); \
 	} while (false)
 
-#define _LOG_LEVEL_CHECK(_level, _check_level, _default_level) \
-	(_level <= _LOG_RESOLVED_LEVEL(_check_level, _default_level))
+#define Z_LOG_LEVEL_CHECK(_level, _check_level, _default_level) \
+	(_level <= Z_LOG_RESOLVED_LEVEL(_check_level, _default_level))
 
-#define _LOG_CONST_LEVEL_CHECK(_level)					    \
+#define Z_LOG_CONST_LEVEL_CHECK(_level)					    \
 	(IS_ENABLED(CONFIG_LOG) &&					    \
-	(								    \
-	_LOG_LEVEL_CHECK(_level, CONFIG_LOG_OVERRIDE_LEVEL, LOG_LEVEL_NONE) \
+	(Z_LOG_LEVEL_CHECK(_level, CONFIG_LOG_OVERRIDE_LEVEL, LOG_LEVEL_NONE) \
 	||								    \
 	(!IS_ENABLED(CONFIG_LOG_OVERRIDE_LEVEL) &&			    \
 	(_level <= __log_level) &&					    \
@@ -212,7 +211,7 @@
 /******************************************************************************/
 #define __LOG(_level, _id, _filter, ...)				    \
 	do {								    \
-		if (_LOG_CONST_LEVEL_CHECK(_level) &&			    \
+		if (Z_LOG_CONST_LEVEL_CHECK(_level) &&			    \
 		    (_level <= LOG_RUNTIME_FILTER(_filter))) {		    \
 			struct log_msg_ids src_level = {		    \
 				.level = _level,			    \
@@ -222,7 +221,7 @@
 									    \
 			if ((1 << _level) & LOG_FUNCTION_PREFIX_MASK) {	    \
 				__LOG_INTERNAL(src_level,		    \
-						_LOG_STR(__VA_ARGS__));	    \
+						Z_LOG_STR(__VA_ARGS__));	    \
 			} else {					    \
 				__LOG_INTERNAL(src_level, __VA_ARGS__);	    \
 			}						    \
@@ -234,13 +233,13 @@
 		}							    \
 	} while (false)
 
-#define _LOG(_level, ...)			       \
+#define Z_LOG(_level, ...)			       \
 	__LOG(_level,				       \
 	      (u16_t)LOG_CURRENT_MODULE_ID(),	       \
 	      LOG_CURRENT_DYNAMIC_DATA_ADDR(),	       \
 	      __VA_ARGS__)
 
-#define _LOG_INSTANCE(_level, _inst, ...)		 \
+#define Z_LOG_INSTANCE(_level, _inst, ...)		 \
 	__LOG(_level,					 \
 	      IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
 	      LOG_DYNAMIC_ID_GET(_inst) :		 \
@@ -254,7 +253,7 @@
 /******************************************************************************/
 #define __LOG_HEXDUMP(_level, _id, _filter, _data, _length, _str)	      \
 	do {								      \
-		if (_LOG_CONST_LEVEL_CHECK(_level) &&			      \
+		if (Z_LOG_CONST_LEVEL_CHECK(_level) &&			      \
 		    (_level <= LOG_RUNTIME_FILTER(_filter))) {		      \
 			struct log_msg_ids src_level = {		      \
 				.level = _level,			      \
@@ -271,13 +270,13 @@
 		}							      \
 	} while (false)
 
-#define _LOG_HEXDUMP(_level, _data, _length, _str)	       \
+#define Z_LOG_HEXDUMP(_level, _data, _length, _str)	       \
 	__LOG_HEXDUMP(_level,				       \
 		      LOG_CURRENT_MODULE_ID(),		       \
 		      LOG_CURRENT_DYNAMIC_DATA_ADDR(),	       \
 		      _data, _length, _str)
 
-#define _LOG_HEXDUMP_INSTANCE(_level, _inst, _data, _length, _str) \
+#define Z_LOG_HEXDUMP_INSTANCE(_level, _inst, _data, _length, _str) \
 	__LOG_HEXDUMP(_level,					   \
 		      IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ?   \
 		      LOG_DYNAMIC_ID_GET(_inst) :		   \
diff --git a/include/logging/log_instance.h b/include/logging/log_instance.h
index c513b9d..e44565b 100644
--- a/include/logging/log_instance.h
+++ b/include/logging/log_instance.h
@@ -43,7 +43,7 @@
  */
 #define LOG_ITEM_CONST_DATA(_name) UTIL_CAT(log_const_, _name)
 
-#define _LOG_CONST_ITEM_REGISTER(_name, _str_name, _level)		     \
+#define Z_LOG_CONST_ITEM_REGISTER(_name, _str_name, _level)		     \
 	const struct log_source_const_data LOG_ITEM_CONST_DATA(_name)	     \
 	__attribute__ ((section("." STRINGIFY(LOG_ITEM_CONST_DATA(_name))))) \
 	__attribute__((used)) = {					     \
@@ -78,7 +78,7 @@
 	struct log_source_dynamic_data *_name
 
 #define LOG_INSTANCE_REGISTER(_module_name, _inst_name, _level)		   \
-	_LOG_CONST_ITEM_REGISTER(					   \
+	Z_LOG_CONST_ITEM_REGISTER(					   \
 		LOG_INSTANCE_FULL_NAME(_module_name, _inst_name),	   \
 		STRINGIFY(_module_name._inst_name),			   \
 		_level);						   \
@@ -99,7 +99,7 @@
 	const struct log_source_const_data *_name
 
 #define LOG_INSTANCE_REGISTER(_module_name, _inst_name, _level)	  \
-	_LOG_CONST_ITEM_REGISTER(				  \
+	Z_LOG_CONST_ITEM_REGISTER(				  \
 		LOG_INSTANCE_FULL_NAME(_module_name, _inst_name), \
 		STRINGIFY(_module_name._inst_name),		  \
 		_level)
diff --git a/include/logging/log_msg.h b/include/logging/log_msg.h
index 98743c6..73daec6 100644
--- a/include/logging/log_msg.h
+++ b/include/logging/log_msg.h
@@ -332,7 +332,7 @@
  *
  *  @return Allocated chunk of NULL.
  */
-static inline struct log_msg *_log_msg_std_alloc(void)
+static inline struct log_msg *z_log_msg_std_alloc(void)
 {
 	struct  log_msg *msg = (struct  log_msg *)log_msg_chunk_alloc();
 
@@ -356,7 +356,7 @@
  */
 static inline struct log_msg *log_msg_create_0(const char *str)
 {
-	struct log_msg *msg = _log_msg_std_alloc();
+	struct log_msg *msg = z_log_msg_std_alloc();
 
 	if (msg != NULL) {
 		msg->str = str;
@@ -381,7 +381,7 @@
 static inline struct log_msg *log_msg_create_1(const char *str,
 					       u32_t arg1)
 {
-	struct  log_msg *msg = _log_msg_std_alloc();
+	struct  log_msg *msg = z_log_msg_std_alloc();
 
 	if (msg != NULL) {
 		msg->str = str;
@@ -410,7 +410,7 @@
 					       u32_t arg1,
 					       u32_t arg2)
 {
-	struct  log_msg *msg = _log_msg_std_alloc();
+	struct  log_msg *msg = z_log_msg_std_alloc();
 
 	if (msg != NULL) {
 		msg->str = str;
@@ -442,7 +442,7 @@
 					       u32_t arg2,
 					       u32_t arg3)
 {
-	struct  log_msg *msg = _log_msg_std_alloc();
+	struct  log_msg *msg = z_log_msg_std_alloc();
 
 	if (msg != NULL) {
 		msg->str = str;
diff --git a/include/misc/__assert.h b/include/misc/__assert.h
index 2c543cb..2b76182 100644
--- a/include/misc/__assert.h
+++ b/include/misc/__assert.h
@@ -85,7 +85,7 @@
 
 #define __ASSERT_LOC(test)                               \
 	printk("ASSERTION FAIL [%s] @ %s:%d\n",    \
-	       _STRINGIFY(test),                         \
+	       Z_STRINGIFY(test),                         \
 	       __FILE__,                                 \
 	       __LINE__)                                 \
 
diff --git a/include/misc/libc-hooks.h b/include/misc/libc-hooks.h
index 544c094..e990283 100644
--- a/include/misc/libc-hooks.h
+++ b/include/misc/libc-hooks.h
@@ -23,16 +23,16 @@
  */
 #define _MLIBC_RESTRICT
 
-__syscall int _zephyr_read_stdin(char *buf, int nbytes);
+__syscall int z_zephyr_read_stdin(char *buf, int nbytes);
 
-__syscall int _zephyr_write_stdout(const void *buf, int nbytes);
+__syscall int z_zephyr_write_stdout(const void *buf, int nbytes);
 
 #else
 /* Minimal libc */
 
 __syscall int _zephyr_fputc(int c, FILE *stream);
 
-__syscall size_t _zephyr_fwrite(const void *_MLIBC_RESTRICT ptr, size_t size,
+__syscall size_t z_zephyr_fwrite(const void *_MLIBC_RESTRICT ptr, size_t size,
 				size_t nitems, FILE *_MLIBC_RESTRICT stream);
 #endif /* CONFIG_NEWLIB_LIBC */
 
diff --git a/include/misc/mempool.h b/include/misc/mempool.h
index a4d380a..f076f48 100644
--- a/include/misc/mempool.h
+++ b/include/misc/mempool.h
@@ -47,17 +47,17 @@
  * @param section Destination binary section for pool data
  */
 #define SYS_MEM_POOL_DEFINE(name, kmutex, minsz, maxsz, nmax, align, section) \
-	char __aligned(align) _GENERIC_SECTION(section)			\
+	char __aligned(align) Z_GENERIC_SECTION(section)			\
 		_mpool_buf_##name[_ALIGN4(maxsz * nmax)			\
 				  + _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
-	struct sys_mem_pool_lvl _GENERIC_SECTION(section)		\
-		_mpool_lvls_##name[_MPOOL_LVLS(maxsz, minsz)];		\
-	_GENERIC_SECTION(section) struct sys_mem_pool name = {		\
+	struct sys_mem_pool_lvl Z_GENERIC_SECTION(section)		\
+		_mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)];		\
+	Z_GENERIC_SECTION(section) struct sys_mem_pool name = {		\
 		.base = {						\
 			.buf = _mpool_buf_##name,			\
 			.max_sz = maxsz,				\
 			.n_max = nmax,					\
-			.n_levels = _MPOOL_LVLS(maxsz, minsz),		\
+			.n_levels = Z_MPOOL_LVLS(maxsz, minsz),		\
 			.levels = _mpool_lvls_##name,			\
 			.flags = SYS_MEM_POOL_USER			\
 		},							\
@@ -74,7 +74,7 @@
  */
 static inline void sys_mem_pool_init(struct sys_mem_pool *p)
 {
-	_sys_mem_pool_base_init(&p->base);
+	z_sys_mem_pool_base_init(&p->base);
 }
 
 /**
diff --git a/include/misc/mempool_base.h b/include/misc/mempool_base.h
index 39d9594..68efef6 100644
--- a/include/misc/mempool_base.h
+++ b/include/misc/mempool_base.h
@@ -38,75 +38,75 @@
 
 #define _ALIGN4(n) ((((n)+3)/4)*4)
 
-#define _MPOOL_HAVE_LVL(maxsz, minsz, l) (((maxsz) >> (2*(l))) \
+#define Z_MPOOL_HAVE_LVL(maxsz, minsz, l) (((maxsz) >> (2*(l))) \
 					  >= (minsz) ? 1 : 0)
 
 #define __MPOOL_LVLS(maxsz, minsz)		\
-	(_MPOOL_HAVE_LVL((maxsz), (minsz), 0) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 1) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 2) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 3) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 4) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 5) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 6) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 7) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 8) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 9) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 10) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 11) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 12) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 13) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 14) +	\
-	_MPOOL_HAVE_LVL((maxsz), (minsz), 15))
+	(Z_MPOOL_HAVE_LVL((maxsz), (minsz), 0) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 1) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 2) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 3) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 4) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 5) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 6) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 7) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 8) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 9) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 10) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 11) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 12) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 13) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 14) +	\
+	Z_MPOOL_HAVE_LVL((maxsz), (minsz), 15))
 
 #define _MPOOL_MINBLK sizeof(sys_dnode_t)
 
-#define _MPOOL_LVLS(maxsz, minsz)		\
+#define Z_MPOOL_LVLS(maxsz, minsz)		\
 	__MPOOL_LVLS((maxsz), (minsz) >= _MPOOL_MINBLK ? (minsz) : \
 		     _MPOOL_MINBLK)
 
 /* Rounds the needed bits up to integer multiples of u32_t */
-#define _MPOOL_LBIT_WORDS_UNCLAMPED(n_max, l) \
+#define Z_MPOOL_LBIT_WORDS_UNCLAMPED(n_max, l) \
 	((((n_max) << (2*(l))) + 31) / 32)
 
 /* One word gets stored free unioned with the pointer, otherwise the
  * calculated unclamped value
  */
-#define _MPOOL_LBIT_WORDS(n_max, l)			\
-	(_MPOOL_LBIT_WORDS_UNCLAMPED(n_max, l) < 2 ? 0	\
-	 : _MPOOL_LBIT_WORDS_UNCLAMPED(n_max, l))
+#define Z_MPOOL_LBIT_WORDS(n_max, l)			\
+	(Z_MPOOL_LBIT_WORDS_UNCLAMPED(n_max, l) < 2 ? 0	\
+	 : Z_MPOOL_LBIT_WORDS_UNCLAMPED(n_max, l))
 
 /* How many bytes for the bitfields of a single level? */
-#define _MPOOL_LBIT_BYTES(maxsz, minsz, l, n_max)	\
-	(_MPOOL_LVLS((maxsz), (minsz)) >= (l) ?		\
-	 4 * _MPOOL_LBIT_WORDS((n_max), l) : 0)
+#define Z_MPOOL_LBIT_BYTES(maxsz, minsz, l, n_max)	\
+	(Z_MPOOL_LVLS((maxsz), (minsz)) >= (l) ?		\
+	 4 * Z_MPOOL_LBIT_WORDS((n_max), l) : 0)
 
 /* Size of the bitmap array that follows the buffer in allocated memory */
 #define _MPOOL_BITS_SIZE(maxsz, minsz, n_max) \
-	(_MPOOL_LBIT_BYTES(maxsz, minsz, 0, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 1, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 2, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 3, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 4, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 5, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 6, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 7, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 8, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 9, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 10, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 11, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 12, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 13, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 14, n_max) +	\
-	_MPOOL_LBIT_BYTES(maxsz, minsz, 15, n_max))
+	(Z_MPOOL_LBIT_BYTES(maxsz, minsz, 0, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 1, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 2, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 3, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 4, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 5, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 6, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 7, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 8, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 9, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 10, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 11, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 12, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 13, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 14, n_max) +	\
+	Z_MPOOL_LBIT_BYTES(maxsz, minsz, 15, n_max))
 
 
-void _sys_mem_pool_base_init(struct sys_mem_pool_base *p);
+void z_sys_mem_pool_base_init(struct sys_mem_pool_base *p);
 
-int _sys_mem_pool_block_alloc(struct sys_mem_pool_base *p, size_t size,
+int z_sys_mem_pool_block_alloc(struct sys_mem_pool_base *p, size_t size,
 			      u32_t *level_p, u32_t *block_p, void **data_p);
 
-void _sys_mem_pool_block_free(struct sys_mem_pool_base *p, u32_t level,
+void z_sys_mem_pool_block_free(struct sys_mem_pool_base *p, u32_t level,
 			      u32_t block);
 
 #endif /* ZEPHYR_INCLUDE_MISC_MEMPOOL_BASE_H_ */
diff --git a/include/misc/rb.h b/include/misc/rb.h
index 207f15b..467e834 100644
--- a/include/misc/rb.h
+++ b/include/misc/rb.h
@@ -85,12 +85,12 @@
 
 typedef void (*rb_visit_t)(struct rbnode *node, void *cookie);
 
-struct rbnode *_rb_child(struct rbnode *node, int side);
-int _rb_is_black(struct rbnode *node);
+struct rbnode *z_rb_child(struct rbnode *node, int side);
+int z_rb_is_black(struct rbnode *node);
 #ifndef CONFIG_MISRA_SANE
-void _rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie);
+void z_rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie);
 #endif
-struct rbnode *_rb_get_minmax(struct rbtree *tree, int side);
+struct rbnode *z_rb_get_minmax(struct rbtree *tree, int side);
 
 /**
  * @brief Insert node into tree
@@ -107,7 +107,7 @@
  */
 static inline struct rbnode *rb_get_min(struct rbtree *tree)
 {
-	return _rb_get_minmax(tree, 0);
+	return z_rb_get_minmax(tree, 0);
 }
 
 /**
@@ -115,7 +115,7 @@
  */
 static inline struct rbnode *rb_get_max(struct rbtree *tree)
 {
-	return _rb_get_minmax(tree, 1);
+	return z_rb_get_minmax(tree, 1);
 }
 
 /**
@@ -141,7 +141,7 @@
 static inline void rb_walk(struct rbtree *tree, rb_visit_t visit_fn,
 			   void *cookie)
 {
-	_rb_walk(tree->root, visit_fn, cookie);
+	z_rb_walk(tree->root, visit_fn, cookie);
 }
 #endif
 
@@ -165,7 +165,7 @@
 }
 #endif
 
-struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f);
+struct rbnode *z_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f);
 
 /**
  * @brief Walk a tree in-order without recursing
@@ -190,7 +190,7 @@
  */
 #define RB_FOR_EACH(tree, node) \
 	for (struct _rb_foreach __f = _RB_FOREACH_INIT(tree, node);	\
-	     (node = _rb_foreach_next(tree, &__f));			\
+	     (node = z_rb_foreach_next(tree, &__f));			\
 	     /**/)
 
 /**
@@ -205,7 +205,7 @@
  */
 #define RB_FOR_EACH_CONTAINER(tree, node, field)		           \
 	for (struct _rb_foreach __f = _RB_FOREACH_INIT(tree, node);	   \
-			({struct rbnode *n = _rb_foreach_next(tree, &__f); \
+			({struct rbnode *n = z_rb_foreach_next(tree, &__f); \
 			 node = n ? CONTAINER_OF(n, __typeof__(*(node)),   \
 					 field) : NULL; }) != NULL;        \
 			 /**/)
diff --git a/include/misc/util.h b/include/misc/util.h
index ae1a71e..73f50bd 100644
--- a/include/misc/util.h
+++ b/include/misc/util.h
@@ -169,14 +169,14 @@
  * value to be e.g. a literal "1" at expansion time in the next macro,
  * not "(1)", etc...  Standard recursive expansion does not work.
  */
-#define IS_ENABLED(config_macro) _IS_ENABLED1(config_macro)
+#define IS_ENABLED(config_macro) Z_IS_ENABLED1(config_macro)
 
 /* Now stick on a "_XXXX" prefix, it will now be "_XXXX1" if config_macro
  * is "1", or just "_XXXX" if it's undefined.
- *   ENABLED:   _IS_ENABLED2(_XXXX1)
- *   DISABLED   _IS_ENABLED2(_XXXX)
+ *   ENABLED:   Z_IS_ENABLED2(_XXXX1)
+ *   DISABLED   Z_IS_ENABLED2(_XXXX)
  */
-#define _IS_ENABLED1(config_macro) _IS_ENABLED2(_XXXX##config_macro)
+#define Z_IS_ENABLED1(config_macro) Z_IS_ENABLED2(_XXXX##config_macro)
 
 /* Here's the core trick, we map "_XXXX1" to "_YYYY," (i.e. a string
  * with a trailing comma), so it has the effect of making this a
@@ -190,15 +190,15 @@
 /* Then we append an extra argument to fool the gcc preprocessor into
  * accepting it as a varargs macro.
  *                         arg1   arg2  arg3
- *   ENABLED:   _IS_ENABLED3(_YYYY,    1,    0)
- *   DISABLED   _IS_ENABLED3(_XXXX 1,  0)
+ *   ENABLED:   Z_IS_ENABLED3(_YYYY,    1,    0)
+ *   DISABLED   Z_IS_ENABLED3(_XXXX 1,  0)
  */
-#define _IS_ENABLED2(one_or_two_args) _IS_ENABLED3(one_or_two_args true, false)
+#define Z_IS_ENABLED2(one_or_two_args) Z_IS_ENABLED3(one_or_two_args true, false)
 
 /* And our second argument is thus now cooked to be 1 in the case
  * where the value is defined to 1, and 0 if not:
  */
-#define _IS_ENABLED3(ignore_this, val, ...) val
+#define Z_IS_ENABLED3(ignore_this, val, ...) val
 
 /**
  * @brief Insert code depending on result of flag evaluation.
@@ -234,9 +234,9 @@
  *
  */
 #define COND_CODE_1(_flag, _if_1_code, _else_code) \
-	_COND_CODE_1(_flag, _if_1_code, _else_code)
+	Z_COND_CODE_1(_flag, _if_1_code, _else_code)
 
-#define _COND_CODE_1(_flag, _if_1_code, _else_code) \
+#define Z_COND_CODE_1(_flag, _if_1_code, _else_code) \
 	__COND_CODE(_XXXX##_flag, _if_1_code, _else_code)
 
 /**
@@ -251,9 +251,9 @@
  *
  */
 #define COND_CODE_0(_flag, _if_0_code, _else_code) \
-	_COND_CODE_0(_flag, _if_0_code, _else_code)
+	Z_COND_CODE_0(_flag, _if_0_code, _else_code)
 
-#define _COND_CODE_0(_flag, _if_0_code, _else_code) \
+#define Z_COND_CODE_0(_flag, _if_0_code, _else_code) \
 	__COND_CODE(_ZZZZ##_flag, _if_0_code, _else_code)
 
 #define _ZZZZ0 _YYYY,
@@ -514,27 +514,27 @@
 /*
  * The following provides variadic preprocessor macro support to
  * help eliminate multiple, repetitive function/macro calls.  This
- * allows up to 10 "arguments" in addition to _call .
+ * allows up to 10 "arguments" in addition to z_call .
  * Note - derived from work on:
  * https://codecraft.co/2014/11/25/variadic-macros-tricks/
  */
 
-#define _GET_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
+#define Z_GET_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
 
-#define _for_0(_call, ...)
-#define _for_1(_call, x) _call(x)
-#define _for_2(_call, x, ...) _call(x) _for_1(_call, ##__VA_ARGS__)
-#define _for_3(_call, x, ...) _call(x) _for_2(_call, ##__VA_ARGS__)
-#define _for_4(_call, x, ...) _call(x) _for_3(_call, ##__VA_ARGS__)
-#define _for_5(_call, x, ...) _call(x) _for_4(_call, ##__VA_ARGS__)
-#define _for_6(_call, x, ...) _call(x) _for_5(_call, ##__VA_ARGS__)
-#define _for_7(_call, x, ...) _call(x) _for_6(_call, ##__VA_ARGS__)
-#define _for_8(_call, x, ...) _call(x) _for_7(_call, ##__VA_ARGS__)
-#define _for_9(_call, x, ...) _call(x) _for_8(_call, ##__VA_ARGS__)
-#define _for_10(_call, x, ...) _call(x) _for_9(_call, ##__VA_ARGS__)
+#define _for_0(z_call, ...)
+#define _for_1(z_call, x) z_call(x)
+#define _for_2(z_call, x, ...) z_call(x) _for_1(z_call, ##__VA_ARGS__)
+#define _for_3(z_call, x, ...) z_call(x) _for_2(z_call, ##__VA_ARGS__)
+#define _for_4(z_call, x, ...) z_call(x) _for_3(z_call, ##__VA_ARGS__)
+#define _for_5(z_call, x, ...) z_call(x) _for_4(z_call, ##__VA_ARGS__)
+#define _for_6(z_call, x, ...) z_call(x) _for_5(z_call, ##__VA_ARGS__)
+#define _for_7(z_call, x, ...) z_call(x) _for_6(z_call, ##__VA_ARGS__)
+#define _for_8(z_call, x, ...) z_call(x) _for_7(z_call, ##__VA_ARGS__)
+#define _for_9(z_call, x, ...) z_call(x) _for_8(z_call, ##__VA_ARGS__)
+#define _for_10(z_call, x, ...) z_call(x) _for_9(z_call, ##__VA_ARGS__)
 
 #define FOR_EACH(x, ...) \
-	_GET_ARG(__VA_ARGS__, \
+	Z_GET_ARG(__VA_ARGS__, \
 	_for_10, _for_9, _for_8, _for_7, _for_6, _for_5, \
 	_for_4, _for_3, _for_2, _for_1, _for_0)(x, ##__VA_ARGS__)
 
diff --git a/include/net/gptp.h b/include/net/gptp.h
index 2b1b938..d6297ce 100644
--- a/include/net/gptp.h
+++ b/include/net/gptp.h
@@ -68,7 +68,7 @@
 #define GPTP_POW2(exp) pow(2, exp)
 #else
 
-static inline double _gptp_pow2(int exp)
+static inline double z_gptp_pow2(int exp)
 {
 	double res;
 
@@ -85,7 +85,7 @@
 	return res;
 }
 
-#define GPTP_POW2(exp) _gptp_pow2(exp)
+#define GPTP_POW2(exp) z_gptp_pow2(exp)
 #endif
 
 /* Pre-calculated constants */
diff --git a/include/posix/pthread.h b/include/posix/pthread.h
index 267d05b..ee21450 100644
--- a/include/posix/pthread.h
+++ b/include/posix/pthread.h
@@ -71,7 +71,7 @@
  */
 #define PTHREAD_COND_DEFINE(name)					\
 	struct pthread_cond name = {					\
-		.wait_q = _WAIT_Q_INIT(&name.wait_q),			\
+		.wait_q = Z_WAIT_Q_INIT(&name.wait_q),			\
 	}
 
 /**
@@ -83,7 +83,7 @@
 				    const pthread_condattr_t *att)
 {
 	ARG_UNUSED(att);
-	_waitq_init(&cv->wait_q);
+	z_waitq_init(&cv->wait_q);
 	return 0;
 }
 
@@ -164,7 +164,7 @@
 		__in_section(_k_mutex, static, name) = \
 	{ \
 		.lock_count = 0, \
-		.wait_q = _WAIT_Q_INIT(&name.wait_q),	\
+		.wait_q = Z_WAIT_Q_INIT(&name.wait_q),	\
 		.owner = NULL, \
 	}
 
@@ -330,7 +330,7 @@
  */
 #define PTHREAD_BARRIER_DEFINE(name, count)			\
 	struct pthread_barrier name = {				\
-		.wait_q = _WAIT_Q_INIT(&name.wait_q),		\
+		.wait_q = Z_WAIT_Q_INIT(&name.wait_q),		\
 		.max = count,					\
 	}
 
@@ -356,7 +356,7 @@
 
 	b->max = count;
 	b->count = 0;
-	_waitq_init(&b->wait_q);
+	z_waitq_init(&b->wait_q);
 
 	return 0;
 }
diff --git a/include/pwm.h b/include/pwm.h
index e595e2f..030682b 100644
--- a/include/pwm.h
+++ b/include/pwm.h
@@ -67,7 +67,7 @@
 __syscall int pwm_pin_set_cycles(struct device *dev, u32_t pwm,
 				 u32_t period, u32_t pulse);
 
-static inline int _impl_pwm_pin_set_cycles(struct device *dev, u32_t pwm,
+static inline int z_impl_pwm_pin_set_cycles(struct device *dev, u32_t pwm,
 					   u32_t period, u32_t pulse)
 {
 	struct pwm_driver_api *api;
@@ -91,7 +91,7 @@
 __syscall int pwm_get_cycles_per_sec(struct device *dev, u32_t pwm,
 				     u64_t *cycles);
 
-static inline int _impl_pwm_get_cycles_per_sec(struct device *dev, u32_t pwm,
+static inline int z_impl_pwm_get_cycles_per_sec(struct device *dev, u32_t pwm,
 					       u64_t *cycles)
 {
 	struct pwm_driver_api *api;
diff --git a/include/rtc.h b/include/rtc.h
index e3f95ad..11aac3a 100644
--- a/include/rtc.h
+++ b/include/rtc.h
@@ -60,21 +60,21 @@
 
 __deprecated __syscall u32_t rtc_read(struct device *dev);
 
-static inline u32_t _impl_rtc_read(struct device *dev)
+static inline u32_t z_impl_rtc_read(struct device *dev)
 {
 	return counter_read(dev);
 }
 
 __deprecated __syscall void rtc_enable(struct device *dev);
 
-static inline void _impl_rtc_enable(struct device *dev)
+static inline void z_impl_rtc_enable(struct device *dev)
 {
 	counter_start(dev);
 }
 
 __deprecated __syscall void rtc_disable(struct device *dev);
 
-static inline void _impl_rtc_disable(struct device *dev)
+static inline void z_impl_rtc_disable(struct device *dev)
 {
 	counter_stop(dev);
 }
@@ -111,7 +111,7 @@
 __deprecated __syscall int rtc_set_alarm(struct device *dev,
 					 const u32_t alarm_val);
 
-static inline int _impl_rtc_set_alarm(struct device *dev,
+static inline int z_impl_rtc_set_alarm(struct device *dev,
 				      const u32_t alarm_val)
 {
 	return counter_set_top_value(dev, alarm_val, rtc_counter_top_callback,
@@ -133,7 +133,7 @@
  */
 __deprecated __syscall int rtc_get_pending_int(struct device *dev);
 
-static inline int _impl_rtc_get_pending_int(struct device *dev)
+static inline int z_impl_rtc_get_pending_int(struct device *dev)
 {
 	return counter_get_pending_int(dev);
 }
diff --git a/include/sched_priq.h b/include/sched_priq.h
index 0850b37..c3d7ab7 100644
--- a/include/sched_priq.h
+++ b/include/sched_priq.h
@@ -31,18 +31,18 @@
 
 struct k_thread;
 
-struct k_thread *_priq_dumb_best(sys_dlist_t *pq);
-void _priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread);
-void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread);
+struct k_thread *z_priq_dumb_best(sys_dlist_t *pq);
+void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread);
+void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread);
 
 struct _priq_rb {
 	struct rbtree tree;
 	int next_order_key;
 };
 
-void _priq_rb_add(struct _priq_rb *pq, struct k_thread *thread);
-void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread);
-struct k_thread *_priq_rb_best(struct _priq_rb *pq);
+void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread);
+void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread);
+struct k_thread *z_priq_rb_best(struct _priq_rb *pq);
 
 /* Traditional/textbook "multi-queue" structure.  Separate lists for a
  * small number (max 32 here) of fixed priorities.  This corresponds
@@ -56,8 +56,8 @@
 	unsigned int bitmask; /* bit 1<<i set if queues[i] is non-empty */
 };
 
-void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread);
-void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread);
-struct k_thread *_priq_mq_best(struct _priq_mq *pq);
+void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread);
+void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread);
+struct k_thread *z_priq_mq_best(struct _priq_mq *pq);
 
 #endif /* ZEPHYR_INCLUDE_SCHED_PRIQ_H_ */
diff --git a/include/sensor.h b/include/sensor.h
index 9d8acf1..d65a3df 100644
--- a/include/sensor.h
+++ b/include/sensor.h
@@ -281,7 +281,7 @@
 			      enum sensor_attribute attr,
 			      const struct sensor_value *val);
 
-static inline int _impl_sensor_attr_set(struct device *dev,
+static inline int z_impl_sensor_attr_set(struct device *dev,
 					enum sensor_channel chan,
 					enum sensor_attribute attr,
 					const struct sensor_value *val)
@@ -343,7 +343,7 @@
  */
 __syscall int sensor_sample_fetch(struct device *dev);
 
-static inline int _impl_sensor_sample_fetch(struct device *dev)
+static inline int z_impl_sensor_sample_fetch(struct device *dev)
 {
 	const struct sensor_driver_api *api = dev->driver_api;
 
@@ -372,7 +372,7 @@
 __syscall int sensor_sample_fetch_chan(struct device *dev,
 				       enum sensor_channel type);
 
-static inline int _impl_sensor_sample_fetch_chan(struct device *dev,
+static inline int z_impl_sensor_sample_fetch_chan(struct device *dev,
 						 enum sensor_channel type)
 {
 	const struct sensor_driver_api *api = dev->driver_api;
@@ -405,7 +405,7 @@
 				 enum sensor_channel chan,
 				 struct sensor_value *val);
 
-static inline int _impl_sensor_channel_get(struct device *dev,
+static inline int z_impl_sensor_channel_get(struct device *dev,
 					   enum sensor_channel chan,
 					   struct sensor_value *val)
 {
diff --git a/include/spi.h b/include/spi.h
index 73e5755..d2e78a8 100644
--- a/include/spi.h
+++ b/include/spi.h
@@ -252,7 +252,7 @@
 			     const struct spi_buf_set *tx_bufs,
 			     const struct spi_buf_set *rx_bufs);
 
-static inline int _impl_spi_transceive(struct device *dev,
+static inline int z_impl_spi_transceive(struct device *dev,
 				       const struct spi_config *config,
 				       const struct spi_buf_set *tx_bufs,
 				       const struct spi_buf_set *rx_bufs)
@@ -403,7 +403,7 @@
 __syscall int spi_release(struct device *dev,
 			  const struct spi_config *config);
 
-static inline int _impl_spi_release(struct device *dev,
+static inline int z_impl_spi_release(struct device *dev,
 				    const struct spi_config *config)
 {
 	const struct spi_driver_api *api =
diff --git a/include/spinlock.h b/include/spinlock.h
index dccbfea..d732310 100644
--- a/include/spinlock.h
+++ b/include/spinlock.h
@@ -13,12 +13,12 @@
  * proper "arch" layer.
  */
 #ifdef ZTEST_UNITTEST
-static inline int _arch_irq_lock(void)
+static inline int z_arch_irq_lock(void)
 {
 	return 0;
 }
 
-static inline void _arch_irq_unlock(int key)
+static inline void z_arch_irq_unlock(int key)
 {
 	ARG_UNUSED(key);
 }
@@ -70,7 +70,7 @@
 	 * implementation.  The "irq_lock()" API in SMP context is
 	 * actually a wrapper for a global spinlock!
 	 */
-	k.key = _arch_irq_lock();
+	k.key = z_arch_irq_lock();
 
 #ifdef SPIN_VALIDATE
 	__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
@@ -102,7 +102,7 @@
 	 */
 	atomic_clear(&l->locked);
 #endif
-	_arch_irq_unlock(key.key);
+	z_arch_irq_unlock(key.key);
 }
 
 /* Internal function: releases the lock, but leaves local interrupts
diff --git a/include/sw_isr_table.h b/include/sw_isr_table.h
index 5d810dc..9fea36e 100644
--- a/include/sw_isr_table.h
+++ b/include/sw_isr_table.h
@@ -64,8 +64,8 @@
  * section. This gets consumed by gen_isr_tables.py which creates the vector
  * and/or SW ISR tables.
  */
-#define _ISR_DECLARE(irq, flags, func, param) \
-	static struct _isr_list _GENERIC_SECTION(.intList) __used \
+#define Z_ISR_DECLARE(irq, flags, func, param) \
+	static struct _isr_list Z_GENERIC_SECTION(.intList) __used \
 		_MK_ISR_NAME(func, __COUNTER__) = \
 			{irq, flags, &func, (void *)param}
 
diff --git a/include/sys_clock.h b/include/sys_clock.h
index 9062731..287d1c7 100644
--- a/include/sys_clock.h
+++ b/include/sys_clock.h
@@ -28,7 +28,7 @@
 
 #ifdef CONFIG_TICKLESS_KERNEL
 extern int _sys_clock_always_on;
-extern void _enable_sys_clock(void);
+extern void z_enable_sys_clock(void);
 #endif
 
 static inline int sys_clock_hw_cycles_per_sec(void)
@@ -102,7 +102,7 @@
 #endif
 #endif
 
-static ALWAYS_INLINE s32_t _ms_to_ticks(s32_t ms)
+static ALWAYS_INLINE s32_t z_ms_to_ticks(s32_t ms)
 {
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 
diff --git a/include/syscall.h b/include/syscall.h
index 5e42bf0..7a5ac90 100644
--- a/include/syscall.h
+++ b/include/syscall.h
@@ -120,7 +120,7 @@
  *
  * @return true if the CPU is currently running with user permissions
  */
-static inline bool _arch_is_user_context(void);
+static inline bool z_arch_is_user_context(void);
 
 /**
  * Indicate whether the CPU is currently in user mode
@@ -129,7 +129,7 @@
  */
 static inline bool _is_user_context(void)
 {
-	return _arch_is_user_context();
+	return z_arch_is_user_context();
 }
 
 /*
@@ -166,28 +166,28 @@
  * Interfaces for invoking system calls
  */
 
-static inline u32_t _arch_syscall_invoke0(u32_t call_id);
+static inline u32_t z_arch_syscall_invoke0(u32_t call_id);
 
-static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id);
+static inline u32_t z_arch_syscall_invoke1(u32_t arg1, u32_t call_id);
 
-static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2,
+static inline u32_t z_arch_syscall_invoke2(u32_t arg1, u32_t arg2,
 					  u32_t call_id);
 
-static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t call_id);
 
-static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t call_id);
 
-static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5,
 					  u32_t call_id);
 
-static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
 					  u32_t arg4, u32_t arg5, u32_t arg6,
 					  u32_t call_id);
 
-static inline u32_t _syscall_invoke7(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_syscall_invoke7(u32_t arg1, u32_t arg2, u32_t arg3,
 				    u32_t arg4, u32_t arg5, u32_t arg6,
 				    u32_t arg7, u32_t call_id) {
 	struct _syscall_7_args args = {
@@ -195,11 +195,11 @@
 		.arg7 = arg7,
 	};
 
-	return _arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
+	return z_arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
 				     call_id);
 }
 
-static inline u32_t _syscall_invoke8(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_syscall_invoke8(u32_t arg1, u32_t arg2, u32_t arg3,
 				    u32_t arg4, u32_t arg5, u32_t arg6,
 				    u32_t arg7, u32_t arg8, u32_t call_id)
 {
@@ -209,11 +209,11 @@
 		.arg8 = arg8,
 	};
 
-	return _arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
+	return z_arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
 				     call_id);
 }
 
-static inline u32_t _syscall_invoke9(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_syscall_invoke9(u32_t arg1, u32_t arg2, u32_t arg3,
 				    u32_t arg4, u32_t arg5, u32_t arg6,
 				    u32_t arg7, u32_t arg8, u32_t arg9,
 				    u32_t call_id)
@@ -225,11 +225,11 @@
 		.arg9 = arg9,
 	};
 
-	return _arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
+	return z_arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
 				     call_id);
 }
 
-static inline u32_t _syscall_invoke10(u32_t arg1, u32_t arg2, u32_t arg3,
+static inline u32_t z_syscall_invoke10(u32_t arg1, u32_t arg2, u32_t arg3,
 				     u32_t arg4, u32_t arg5, u32_t arg6,
 				     u32_t arg7, u32_t arg8, u32_t arg9,
 				     u32_t arg10, u32_t call_id)
@@ -242,32 +242,32 @@
 		.arg10 = arg10
 	};
 
-	return _arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
+	return z_arch_syscall_invoke6(arg1, arg2, arg3, arg4, arg5, (u32_t)&args,
 				     call_id);
 }
 
-static inline u64_t _syscall_ret64_invoke0(u32_t call_id)
+static inline u64_t z_syscall_ret64_invoke0(u32_t call_id)
 {
 	u64_t ret;
 
-	(void)_arch_syscall_invoke1((u32_t)&ret, call_id);
+	(void)z_arch_syscall_invoke1((u32_t)&ret, call_id);
 	return ret;
 }
 
-static inline u64_t _syscall_ret64_invoke1(u32_t arg1, u32_t call_id)
+static inline u64_t z_syscall_ret64_invoke1(u32_t arg1, u32_t call_id)
 {
 	u64_t ret;
 
-	(void)_arch_syscall_invoke2(arg1, (u32_t)&ret, call_id);
+	(void)z_arch_syscall_invoke2(arg1, (u32_t)&ret, call_id);
 	return ret;
 }
 
-static inline u64_t _syscall_ret64_invoke2(u32_t arg1, u32_t arg2,
+static inline u64_t z_syscall_ret64_invoke2(u32_t arg1, u32_t arg2,
 					   u32_t call_id)
 {
 	u64_t ret;
 
-	(void)_arch_syscall_invoke3(arg1, arg2, (u32_t)&ret, call_id);
+	(void)z_arch_syscall_invoke3(arg1, arg2, (u32_t)&ret, call_id);
 	return ret;
 }
 
diff --git a/include/toolchain/common.h b/include/toolchain/common.h
index d51cd0d..bcaed5b 100644
--- a/include/toolchain/common.h
+++ b/include/toolchain/common.h
@@ -92,8 +92,8 @@
   #define ALWAYS_INLINE inline __attribute__((always_inline))
 #endif
 
-#define _STRINGIFY(x) #x
-#define STRINGIFY(s) _STRINGIFY(s)
+#define Z_STRINGIFY(x) #x
+#define STRINGIFY(s) Z_STRINGIFY(s)
 
 /* concatenate the values of the arguments into one */
 #define _DO_CONCAT(x, y) x ## y
diff --git a/include/toolchain/gcc.h b/include/toolchain/gcc.h
index 243f6eb..0c45559 100644
--- a/include/toolchain/gcc.h
+++ b/include/toolchain/gcc.h
@@ -105,12 +105,12 @@
  * stringification
  */
 #define __GENERIC_SECTION(segment) __attribute__((section(STRINGIFY(segment))))
-#define _GENERIC_SECTION(segment) __GENERIC_SECTION(segment)
+#define Z_GENERIC_SECTION(segment) __GENERIC_SECTION(segment)
 
 #define ___in_section(a, b, c) \
-	__attribute__((section("." _STRINGIFY(a)			\
-				"." _STRINGIFY(b)			\
-				"." _STRINGIFY(c))))
+	__attribute__((section("." Z_STRINGIFY(a)			\
+				"." Z_STRINGIFY(b)			\
+				"." Z_STRINGIFY(c))))
 #define __in_section(a, b, c) ___in_section(a, b, c)
 
 #define __in_section_unique(seg) ___in_section(seg, __FILE__, __COUNTER__)
diff --git a/include/uart.h b/include/uart.h
index eb34d83..889fde7 100644
--- a/include/uart.h
+++ b/include/uart.h
@@ -577,7 +577,7 @@
  */
 __syscall int uart_err_check(struct device *dev);
 
-static inline int _impl_uart_err_check(struct device *dev)
+static inline int z_impl_uart_err_check(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -603,7 +603,7 @@
  */
 __syscall int uart_poll_in(struct device *dev, unsigned char *p_char);
 
-static inline int _impl_uart_poll_in(struct device *dev, unsigned char *p_char)
+static inline int z_impl_uart_poll_in(struct device *dev, unsigned char *p_char)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -627,7 +627,7 @@
 __syscall void uart_poll_out(struct device *dev,
 				      unsigned char out_char);
 
-static inline void _impl_uart_poll_out(struct device *dev,
+static inline void z_impl_uart_poll_out(struct device *dev,
 						unsigned char out_char)
 {
 	const struct uart_driver_api *api =
@@ -651,7 +651,7 @@
  */
 __syscall int uart_configure(struct device *dev, const struct uart_config *cfg);
 
-static inline int _impl_uart_configure(struct device *dev,
+static inline int z_impl_uart_configure(struct device *dev,
 				       const struct uart_config *cfg)
 {
 	const struct uart_driver_api *api =
@@ -678,7 +678,7 @@
  */
 __syscall int uart_config_get(struct device *dev, struct uart_config *cfg);
 
-static inline int _impl_uart_config_get(struct device *dev,
+static inline int z_impl_uart_config_get(struct device *dev,
 				     struct uart_config *cfg)
 {
 	const struct uart_driver_api *api =
@@ -765,7 +765,7 @@
  */
 __syscall void uart_irq_tx_enable(struct device *dev);
 
-static inline void _impl_uart_irq_tx_enable(struct device *dev)
+static inline void z_impl_uart_irq_tx_enable(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -783,7 +783,7 @@
  */
 __syscall void uart_irq_tx_disable(struct device *dev);
 
-static inline void _impl_uart_irq_tx_disable(struct device *dev)
+static inline void z_impl_uart_irq_tx_disable(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -829,7 +829,7 @@
  */
 __syscall void uart_irq_rx_enable(struct device *dev);
 
-static inline void _impl_uart_irq_rx_enable(struct device *dev)
+static inline void z_impl_uart_irq_rx_enable(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -848,7 +848,7 @@
  */
 __syscall void uart_irq_rx_disable(struct device *dev);
 
-static inline void _impl_uart_irq_rx_disable(struct device *dev)
+static inline void z_impl_uart_irq_rx_disable(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -927,7 +927,7 @@
  */
 __syscall void uart_irq_err_enable(struct device *dev);
 
-static inline void _impl_uart_irq_err_enable(struct device *dev)
+static inline void z_impl_uart_irq_err_enable(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -947,7 +947,7 @@
  */
 __syscall void uart_irq_err_disable(struct device *dev);
 
-static inline void _impl_uart_irq_err_disable(struct device *dev)
+static inline void z_impl_uart_irq_err_disable(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -967,7 +967,7 @@
  */
 __syscall int uart_irq_is_pending(struct device *dev);
 
-static inline int _impl_uart_irq_is_pending(struct device *dev)
+static inline int z_impl_uart_irq_is_pending(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -1004,7 +1004,7 @@
  */
 __syscall int uart_irq_update(struct device *dev);
 
-static inline int _impl_uart_irq_update(struct device *dev)
+static inline int z_impl_uart_irq_update(struct device *dev)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
@@ -1077,7 +1077,7 @@
 __syscall int uart_line_ctrl_set(struct device *dev,
 				 u32_t ctrl, u32_t val);
 
-static inline int _impl_uart_line_ctrl_set(struct device *dev,
+static inline int z_impl_uart_line_ctrl_set(struct device *dev,
 					   u32_t ctrl, u32_t val)
 {
 	const struct uart_driver_api *api =
@@ -1102,7 +1102,7 @@
  */
 __syscall int uart_line_ctrl_get(struct device *dev, u32_t ctrl, u32_t *val);
 
-static inline int _impl_uart_line_ctrl_get(struct device *dev,
+static inline int z_impl_uart_line_ctrl_get(struct device *dev,
 					   u32_t ctrl, u32_t *val)
 {
 	const struct uart_driver_api *api =
@@ -1134,7 +1134,7 @@
  */
 __syscall int uart_drv_cmd(struct device *dev, u32_t cmd, u32_t p);
 
-static inline int _impl_uart_drv_cmd(struct device *dev, u32_t cmd, u32_t p)
+static inline int z_impl_uart_drv_cmd(struct device *dev, u32_t cmd, u32_t p)
 {
 	const struct uart_driver_api *api =
 		(const struct uart_driver_api *)dev->driver_api;
diff --git a/kernel/compiler_stack_protect.c b/kernel/compiler_stack_protect.c
index c2869b5..1e5f98a 100644
--- a/kernel/compiler_stack_protect.c
+++ b/kernel/compiler_stack_protect.c
@@ -36,7 +36,7 @@
 {
 	/* Stack canary error is a software fatal condition; treat it as such.
 	 */
-	_k_except_reason(_NANO_ERR_STACK_CHK_FAIL);
+	z_except_reason(_NANO_ERR_STACK_CHK_FAIL);
 	CODE_UNREACHABLE;
 }
 
@@ -44,7 +44,7 @@
 
 /*
  * Symbol referenced by GCC compiler generated code for canary value.
- * The canary value gets initialized in _Cstart().
+ * The canary value gets initialized in z_cstart().
  */
 #ifdef CONFIG_USERSPACE
 K_APP_DMEM(z_libc_partition) uintptr_t __stack_chk_guard;
diff --git a/kernel/device.c b/kernel/device.c
index 05a712d..47ca592 100644
--- a/kernel/device.c
+++ b/kernel/device.c
@@ -36,7 +36,7 @@
  *
  * @param level init level to run.
  */
-void _sys_device_do_config_level(s32_t level)
+void z_sys_device_do_config_level(s32_t level)
 {
 	struct device *info;
 	static struct device *config_levels[] = {
@@ -60,12 +60,12 @@
 			 */
 			info->driver_api = NULL;
 		} else {
-			_k_object_init(info);
+			z_object_init(info);
 		}
 	}
 }
 
-struct device *_impl_device_get_binding(const char *name)
+struct device *z_impl_device_get_binding(const char *name)
 {
 	struct device *info;
 
@@ -104,7 +104,7 @@
 		return 0;
 	}
 
-	return (u32_t)_impl_device_get_binding(name_copy);
+	return (u32_t)z_impl_device_get_binding(name_copy);
 }
 #endif /* CONFIG_USERSPACE */
 
diff --git a/kernel/errno.c b/kernel/errno.c
index 070ec0a..3919600 100644
--- a/kernel/errno.c
+++ b/kernel/errno.c
@@ -24,7 +24,7 @@
 
 #ifdef CONFIG_ERRNO
 #ifdef CONFIG_USERSPACE
-int *_impl_z_errno(void)
+int *z_impl_z_errno(void)
 {
 	/* Initialized to the lowest address in the stack so the thread can
 	 * directly read/write it
@@ -34,7 +34,7 @@
 
 Z_SYSCALL_HANDLER0_SIMPLE(z_errno);
 #else
-int *_impl_z_errno(void)
+int *z_impl_z_errno(void)
 {
 	return &_current->errno_var;
 }
diff --git a/kernel/idle.c b/kernel/idle.c
index 0ba6b12..667965f 100644
--- a/kernel/idle.c
+++ b/kernel/idle.c
@@ -61,7 +61,7 @@
 
 static void sys_power_save_idle(void)
 {
-	s32_t ticks = _get_next_timeout_expiry();
+	s32_t ticks = z_get_next_timeout_expiry();
 
 	/* The documented behavior of CONFIG_TICKLESS_IDLE_THRESH is
 	 * that the system should not enter a tickless idle for
@@ -102,7 +102,7 @@
 }
 #endif
 
-void _sys_power_save_idle_exit(s32_t ticks)
+void z_sys_power_save_idle_exit(s32_t ticks)
 {
 #if defined(CONFIG_SYS_POWER_LOW_POWER_STATES)
 	/* Some CPU low power states require notification at the ISR
diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h
index 76be796..40c40c2 100644
--- a/kernel/include/kernel_internal.h
+++ b/kernel/include/kernel_internal.h
@@ -25,27 +25,27 @@
 
 /* Early boot functions */
 
-void _bss_zero(void);
+void z_bss_zero(void);
 #ifdef CONFIG_XIP
-void _data_copy(void);
+void z_data_copy(void);
 #else
-static inline void _data_copy(void)
+static inline void z_data_copy(void)
 {
 	/* Do nothing */
 }
 #endif
-FUNC_NORETURN void _Cstart(void);
+FUNC_NORETURN void z_cstart(void);
 
-extern FUNC_NORETURN void _thread_entry(k_thread_entry_t entry,
+extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
 			  void *p1, void *p2, void *p3);
 
-/* Implemented by architectures. Only called from _setup_new_thread. */
-extern void _new_thread(struct k_thread *thread, k_thread_stack_t *pStack,
+/* Implemented by architectures. Only called from z_setup_new_thread. */
+extern void z_new_thread(struct k_thread *thread, k_thread_stack_t *pStack,
 			size_t stackSize, k_thread_entry_t entry,
 			void *p1, void *p2, void *p3,
 			int prio, unsigned int options);
 
-extern void _setup_new_thread(struct k_thread *new_thread,
+extern void z_setup_new_thread(struct k_thread *new_thread,
 			      k_thread_stack_t *stack, size_t stack_size,
 			      k_thread_entry_t entry,
 			      void *p1, void *p2, void *p3,
@@ -65,7 +65,7 @@
  *
  * @return Max number of free regions, or -1 if there is no limit
  */
-extern int _arch_mem_domain_max_partitions_get(void);
+extern int z_arch_mem_domain_max_partitions_get(void);
 
 /**
  * @brief Configure the memory domain of the thread.
@@ -77,7 +77,7 @@
  *
  * @param thread Thread which needs to be configured.
  */
-extern void _arch_mem_domain_configure(struct k_thread *thread);
+extern void z_arch_mem_domain_configure(struct k_thread *thread);
 
 /**
  * @brief Remove a partition from the memory domain
@@ -90,8 +90,8 @@
  * @param domain The memory domain structure
  * @param partition_id The partition that needs to be deleted
  */
-extern void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
-					      u32_t partition_id);
+extern void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
+					       u32_t partition_id);
 
 /**
  * @brief Remove a partition from the memory domain
@@ -105,7 +105,7 @@
  * @param partition_id The partition that needs to be added
  */
 extern void _arch_mem_domain_partition_add(struct k_mem_domain *domain,
-					   u32_t partition_id);
+					    u32_t partition_id);
 
 /**
  * @brief Remove the memory domain
@@ -116,7 +116,7 @@
  *
  * @param domain The memory domain structure which needs to be deleted.
  */
-extern void _arch_mem_domain_destroy(struct k_mem_domain *domain);
+extern void z_arch_mem_domain_destroy(struct k_mem_domain *domain);
 
 /**
  * @brief Check memory region permissions
@@ -132,7 +132,7 @@
  *
  * @return nonzero if the permissions don't match.
  */
-extern int _arch_buffer_validate(void *addr, size_t size, int write);
+extern int z_arch_buffer_validate(void *addr, size_t size, int write);
 
 /**
  * Perform a one-way transition from supervisor to kernel mode.
@@ -143,7 +143,7 @@
  * - Set up any kernel stack region for the CPU to use during privilege
  *   elevation
  * - Put the CPU in whatever its equivalent of user mode is
- * - Transfer execution to _new_thread() passing along all the supplied
+ * - Transfer execution to z_new_thread() passing along all the supplied
  *   arguments, in user mode.
  *
  * @param Entry point to start executing as a user thread
@@ -152,7 +152,7 @@
  * @param p3 3rd parameter to user thread
  */
 extern FUNC_NORETURN
-void _arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2,
+void z_arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2,
 			   void *p3);
 
 
@@ -170,7 +170,7 @@
  *            to _k_syscall_handler_t functions and its contents are completely
  *            architecture specific.
  */
-extern FUNC_NORETURN void _arch_syscall_oops(void *ssf);
+extern FUNC_NORETURN void z_arch_syscall_oops(void *ssf);
 
 /**
  * @brief Safely take the length of a potentially bad string
@@ -190,7 +190,7 @@
  * @brief Zero out BSS sections for application shared memory
  *
  * This isn't handled by any platform bss zeroing, and is called from
- * _Cstart() if userspace is enabled.
+ * z_cstart() if userspace is enabled.
  */
 extern void z_app_shmem_bss_zero(void);
 #endif /* CONFIG_USERSPACE */
@@ -210,15 +210,15 @@
 
 /* set and clear essential thread flag */
 
-extern void _thread_essential_set(void);
-extern void _thread_essential_clear(void);
+extern void z_thread_essential_set(void);
+extern void z_thread_essential_clear(void);
 
 /* clean up when a thread is aborted */
 
 #if defined(CONFIG_THREAD_MONITOR)
-extern void _thread_monitor_exit(struct k_thread *thread);
+extern void z_thread_monitor_exit(struct k_thread *thread);
 #else
-#define _thread_monitor_exit(thread) \
+#define z_thread_monitor_exit(thread) \
 	do {/* nothing */    \
 	} while (false)
 #endif /* CONFIG_THREAD_MONITOR */
diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h
index 46e79fb..9ed6937 100644
--- a/kernel/include/kernel_structs.h
+++ b/kernel/include/kernel_structs.h
@@ -174,8 +174,8 @@
 extern struct z_kernel _kernel;
 
 #ifdef CONFIG_SMP
-#define _current_cpu (_arch_curr_cpu())
-#define _current (_arch_curr_cpu()->current)
+#define _current_cpu (z_arch_curr_cpu())
+#define _current (z_arch_curr_cpu()->current)
 #else
 #define _current_cpu (&_kernel.cpus[0])
 #define _current _kernel.current
@@ -187,25 +187,25 @@
 
 #if CONFIG_USE_SWITCH
 /* This is a arch function traditionally, but when the switch-based
- * _Swap() is in use it's a simple inline provided by the kernel.
+ * z_swap() is in use it's a simple inline provided by the kernel.
  */
 static ALWAYS_INLINE void
-_set_thread_return_value(struct k_thread *thread, unsigned int value)
+z_set_thread_return_value(struct k_thread *thread, unsigned int value)
 {
 	thread->swap_retval = value;
 }
 #endif
 
 static ALWAYS_INLINE void
-_set_thread_return_value_with_data(struct k_thread *thread,
+z_set_thread_return_value_with_data(struct k_thread *thread,
 				   unsigned int value,
 				   void *data)
 {
-	_set_thread_return_value(thread, value);
+	z_set_thread_return_value(thread, value);
 	thread->base.swap_data = data;
 }
 
-extern void _init_thread_base(struct _thread_base *thread_base,
+extern void z_init_thread_base(struct _thread_base *thread_base,
 			      int priority, u32_t initial_state,
 			      unsigned int options);
 
@@ -229,7 +229,7 @@
 	*((u32_t *)pStack) = STACK_SENTINEL;
 #endif /* CONFIG_STACK_SENTINEL */
 	/* Initialize various struct k_thread members */
-	_init_thread_base(&thread->base, prio, _THREAD_PRESTART, options);
+	z_init_thread_base(&thread->base, prio, _THREAD_PRESTART, options);
 
 	/* static threads overwrite it afterwards with real value */
 	thread->init_data = NULL;
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index ec19e99..662fb1e 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -13,79 +13,79 @@
 #include <stdbool.h>
 
 #ifdef CONFIG_MULTITHREADING
-#define _VALID_PRIO(prio, entry_point) \
-	(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
-		 (_is_prio_higher_or_equal((prio), \
+#define Z_VALID_PRIO(prio, entry_point) \
+	(((prio) == K_IDLE_PRIO && z_is_idle_thread(entry_point)) || \
+		 (z_is_prio_higher_or_equal((prio), \
 			K_LOWEST_APPLICATION_THREAD_PRIO) && \
-		  _is_prio_lower_or_equal((prio), \
+		  z_is_prio_lower_or_equal((prio), \
 			K_HIGHEST_APPLICATION_THREAD_PRIO)))
 
-#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
-	__ASSERT(_VALID_PRIO((prio), (entry_point)), \
+#define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
+	__ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
 		 "invalid priority (%d); allowed range: %d to %d", \
 		 (prio), \
 		 K_LOWEST_APPLICATION_THREAD_PRIO, \
 		 K_HIGHEST_APPLICATION_THREAD_PRIO); \
 	} while (false)
 #else
-#define _VALID_PRIO(prio, entry_point) ((prio) == -1)
-#define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
+#define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
+#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
 #endif
 
-void _sched_init(void);
-void _add_thread_to_ready_q(struct k_thread *thread);
-void _move_thread_to_end_of_prio_q(struct k_thread *thread);
-void _remove_thread_from_ready_q(struct k_thread *thread);
-int _is_thread_time_slicing(struct k_thread *thread);
-void _unpend_thread_no_timeout(struct k_thread *thread);
-int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
+void z_sched_init(void);
+void z_add_thread_to_ready_q(struct k_thread *thread);
+void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
+void z_remove_thread_from_ready_q(struct k_thread *thread);
+int z_is_thread_time_slicing(struct k_thread *thread);
+void z_unpend_thread_no_timeout(struct k_thread *thread);
+int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
 	       _wait_q_t *wait_q, s32_t timeout);
-int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
-void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
-void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
-void _reschedule_irqlock(u32_t key);
-struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
-void _unpend_thread(struct k_thread *thread);
-int _unpend_all(_wait_q_t *wait_q);
-void _thread_priority_set(struct k_thread *thread, int prio);
-void *_get_next_switch_handle(void *interrupted);
-struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
+int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
+void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
+void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
+void z_reschedule_irqlock(u32_t key);
+struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
+void z_unpend_thread(struct k_thread *thread);
+int z_unpend_all(_wait_q_t *wait_q);
+void z_thread_priority_set(struct k_thread *thread, int prio);
+void *z_get_next_switch_handle(void *interrupted);
+struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
 					      struct k_thread *from);
 void idle(void *a, void *b, void *c);
 void z_time_slice(int ticks);
 
-static inline void _pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
+static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
 {
-	(void) _pend_curr_irqlock(_arch_irq_lock(), wait_q, timeout);
+	(void) z_pend_curr_irqlock(z_arch_irq_lock(), wait_q, timeout);
 }
 
-static inline void _reschedule_unlocked(void)
+static inline void z_reschedule_unlocked(void)
 {
-	(void) _reschedule_irqlock(_arch_irq_lock());
+	(void) z_reschedule_irqlock(z_arch_irq_lock());
 }
 
 /* find which one is the next thread to run */
 /* must be called with interrupts locked */
 #ifdef CONFIG_SMP
-extern struct k_thread *_get_next_ready_thread(void);
+extern struct k_thread *z_get_next_ready_thread(void);
 #else
-static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
+static ALWAYS_INLINE struct k_thread *z_get_next_ready_thread(void)
 {
 	return _kernel.ready_q.cache;
 }
 #endif
 
-static inline bool _is_idle_thread(void *entry_point)
+static inline bool z_is_idle_thread(void *entry_point)
 {
 	return entry_point == idle;
 }
 
-static inline bool _is_thread_pending(struct k_thread *thread)
+static inline bool z_is_thread_pending(struct k_thread *thread)
 {
 	return !!(thread->base.thread_state & _THREAD_PENDING);
 }
 
-static inline int _is_thread_prevented_from_running(struct k_thread *thread)
+static inline int z_is_thread_prevented_from_running(struct k_thread *thread)
 {
 	u8_t state = thread->base.thread_state;
 
@@ -94,143 +94,143 @@
 
 }
 
-static inline bool _is_thread_timeout_active(struct k_thread *thread)
+static inline bool z_is_thread_timeout_active(struct k_thread *thread)
 {
-	return !_is_inactive_timeout(&thread->base.timeout);
+	return !z_is_inactive_timeout(&thread->base.timeout);
 }
 
-static inline bool _is_thread_ready(struct k_thread *thread)
+static inline bool z_is_thread_ready(struct k_thread *thread)
 {
-	return !((_is_thread_prevented_from_running(thread)) != 0 ||
-		 _is_thread_timeout_active(thread));
+	return !((z_is_thread_prevented_from_running(thread)) != 0 ||
+		 z_is_thread_timeout_active(thread));
 }
 
-static inline bool _has_thread_started(struct k_thread *thread)
+static inline bool z_has_thread_started(struct k_thread *thread)
 {
 	return (thread->base.thread_state & _THREAD_PRESTART) == 0;
 }
 
-static inline bool _is_thread_state_set(struct k_thread *thread, u32_t state)
+static inline bool z_is_thread_state_set(struct k_thread *thread, u32_t state)
 {
 	return !!(thread->base.thread_state & state);
 }
 
-static inline bool _is_thread_queued(struct k_thread *thread)
+static inline bool z_is_thread_queued(struct k_thread *thread)
 {
-	return _is_thread_state_set(thread, _THREAD_QUEUED);
+	return z_is_thread_state_set(thread, _THREAD_QUEUED);
 }
 
-static inline void _mark_thread_as_suspended(struct k_thread *thread)
+static inline void z_mark_thread_as_suspended(struct k_thread *thread)
 {
 	thread->base.thread_state |= _THREAD_SUSPENDED;
 }
 
-static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
+static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
 {
 	thread->base.thread_state &= ~_THREAD_SUSPENDED;
 }
 
-static inline void _mark_thread_as_started(struct k_thread *thread)
+static inline void z_mark_thread_as_started(struct k_thread *thread)
 {
 	thread->base.thread_state &= ~_THREAD_PRESTART;
 }
 
-static inline void _mark_thread_as_pending(struct k_thread *thread)
+static inline void z_mark_thread_as_pending(struct k_thread *thread)
 {
 	thread->base.thread_state |= _THREAD_PENDING;
 }
 
-static inline void _mark_thread_as_not_pending(struct k_thread *thread)
+static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
 {
 	thread->base.thread_state &= ~_THREAD_PENDING;
 }
 
-static inline void _set_thread_states(struct k_thread *thread, u32_t states)
+static inline void z_set_thread_states(struct k_thread *thread, u32_t states)
 {
 	thread->base.thread_state |= states;
 }
 
-static inline void _reset_thread_states(struct k_thread *thread,
+static inline void z_reset_thread_states(struct k_thread *thread,
 					u32_t states)
 {
 	thread->base.thread_state &= ~states;
 }
 
-static inline void _mark_thread_as_queued(struct k_thread *thread)
+static inline void z_mark_thread_as_queued(struct k_thread *thread)
 {
-	_set_thread_states(thread, _THREAD_QUEUED);
+	z_set_thread_states(thread, _THREAD_QUEUED);
 }
 
-static inline void _mark_thread_as_not_queued(struct k_thread *thread)
+static inline void z_mark_thread_as_not_queued(struct k_thread *thread)
 {
-	_reset_thread_states(thread, _THREAD_QUEUED);
+	z_reset_thread_states(thread, _THREAD_QUEUED);
 }
 
-static inline bool _is_under_prio_ceiling(int prio)
+static inline bool z_is_under_prio_ceiling(int prio)
 {
 	return prio >= CONFIG_PRIORITY_CEILING;
 }
 
-static inline int _get_new_prio_with_ceiling(int prio)
+static inline int z_get_new_prio_with_ceiling(int prio)
 {
-	return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
+	return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
 }
 
-static inline bool _is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
+static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
 {
 	return prio1 <= prio2;
 }
 
-static inline bool _is_prio_higher_or_equal(int prio1, int prio2)
+static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
 {
-	return _is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
+	return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
 }
 
-static inline bool _is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
+static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
 {
 	return prio1 >= prio2;
 }
 
-static inline bool _is_prio1_higher_than_prio2(int prio1, int prio2)
+static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
 {
 	return prio1 < prio2;
 }
 
-static inline bool _is_prio_higher(int prio, int test_prio)
+static inline bool z_is_prio_higher(int prio, int test_prio)
 {
-	return _is_prio1_higher_than_prio2(prio, test_prio);
+	return z_is_prio1_higher_than_prio2(prio, test_prio);
 }
 
-static inline bool _is_prio_lower_or_equal(int prio1, int prio2)
+static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
 {
-	return _is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
+	return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
 }
 
-bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2);
+bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2);
 
 static inline bool _is_valid_prio(int prio, void *entry_point)
 {
-	if (prio == K_IDLE_PRIO && _is_idle_thread(entry_point)) {
+	if (prio == K_IDLE_PRIO && z_is_idle_thread(entry_point)) {
 		return true;
 	}
 
-	if (!_is_prio_higher_or_equal(prio,
-				      K_LOWEST_APPLICATION_THREAD_PRIO)) {
+	if (!z_is_prio_higher_or_equal(prio,
+				       K_LOWEST_APPLICATION_THREAD_PRIO)) {
 		return false;
 	}
 
-	if (!_is_prio_lower_or_equal(prio,
-				     K_HIGHEST_APPLICATION_THREAD_PRIO)) {
+	if (!z_is_prio_lower_or_equal(prio,
+				      K_HIGHEST_APPLICATION_THREAD_PRIO)) {
 		return false;
 	}
 
 	return true;
 }
 
-static ALWAYS_INLINE void _ready_thread(struct k_thread *thread)
+static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread)
 {
-	if (_is_thread_ready(thread)) {
-		_add_thread_to_ready_q(thread);
+	if (z_is_thread_ready(thread)) {
+		z_add_thread_to_ready_q(thread);
 	}
 
 	sys_trace_thread_ready(thread);
@@ -238,17 +238,17 @@
 
 static inline void _ready_one_thread(_wait_q_t *wq)
 {
-	struct k_thread *th = _unpend_first_thread(wq);
+	struct k_thread *th = z_unpend_first_thread(wq);
 
 	if (th != NULL) {
-		_ready_thread(th);
+		z_ready_thread(th);
 	}
 }
 
-static inline void _sched_lock(void)
+static inline void z_sched_lock(void)
 {
 #ifdef CONFIG_PREEMPT_ENABLED
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 	__ASSERT(_current->base.sched_locked != 1, "");
 
 	--_current->base.sched_locked;
@@ -260,10 +260,10 @@
 #endif
 }
 
-static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
+static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
 {
 #ifdef CONFIG_PREEMPT_ENABLED
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 	__ASSERT(_current->base.sched_locked != 0, "");
 
 	compiler_barrier();
@@ -272,7 +272,7 @@
 #endif
 }
 
-static ALWAYS_INLINE bool _is_thread_timeout_expired(struct k_thread *thread)
+static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)
 {
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 	return thread->base.timeout.dticks == _EXPIRED;
@@ -281,12 +281,12 @@
 #endif
 }
 
-static inline struct k_thread *_unpend1_no_timeout(_wait_q_t *wait_q)
+static inline struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
 {
-	struct k_thread *thread = _find_first_thread_to_unpend(wait_q, NULL);
+	struct k_thread *thread = z_find_first_thread_to_unpend(wait_q, NULL);
 
 	if (thread != NULL) {
-		_unpend_thread_no_timeout(thread);
+		z_unpend_thread_no_timeout(thread);
 	}
 
 	return thread;
diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h
index 4d7b8a5..36e457e7 100644
--- a/kernel/include/kswap.h
+++ b/kernel/include/kswap.h
@@ -11,20 +11,20 @@
 #include <kernel_arch_func.h>
 
 #ifdef CONFIG_STACK_SENTINEL
-extern void _check_stack_sentinel(void);
+extern void z_check_stack_sentinel(void);
 #else
-#define _check_stack_sentinel() /**/
+#define z_check_stack_sentinel() /**/
 #endif
 
 /* In SMP, the irq_lock() is a spinlock which is implicitly released
  * and reacquired on context switch to preserve the existing
  * semantics.  This means that whenever we are about to return to a
- * thread (via either _Swap() or interrupt/exception return!) we need
+ * thread (via either z_swap() or interrupt/exception return!) we need
  * to restore the lock state to whatever the thread's counter
  * expects.
  */
-void _smp_reacquire_global_lock(struct k_thread *thread);
-void _smp_release_global_lock(struct k_thread *thread);
+void z_smp_reacquire_global_lock(struct k_thread *thread);
+void z_smp_release_global_lock(struct k_thread *thread);
 
 /* context switching and scheduling-related routines */
 #ifdef CONFIG_USE_SWITCH
@@ -51,7 +51,7 @@
 
 	old_thread = _current;
 
-	_check_stack_sentinel();
+	z_check_stack_sentinel();
 
 #ifdef CONFIG_TRACING
 	sys_trace_thread_switched_out();
@@ -61,7 +61,7 @@
 		k_spin_release(lock);
 	}
 
-	new_thread = _get_next_ready_thread();
+	new_thread = z_get_next_ready_thread();
 
 	if (new_thread != old_thread) {
 		old_thread->swap_retval = -EAGAIN;
@@ -69,10 +69,10 @@
 #ifdef CONFIG_SMP
 		_current_cpu->swap_ok = 0;
 
-		new_thread->base.cpu = _arch_curr_cpu()->id;
+		new_thread->base.cpu = z_arch_curr_cpu()->id;
 
 		if (!is_spinlock) {
-			_smp_release_global_lock(new_thread);
+			z_smp_release_global_lock(new_thread);
 		}
 #endif
 		_current = new_thread;
@@ -85,7 +85,7 @@
 #endif
 
 	if (is_spinlock) {
-		_arch_irq_unlock(key);
+		z_arch_irq_unlock(key);
 	} else {
 		irq_unlock(key);
 	}
@@ -93,32 +93,32 @@
 	return _current->swap_retval;
 }
 
-static inline int _Swap_irqlock(unsigned int key)
+static inline int z_swap_irqlock(unsigned int key)
 {
 	return do_swap(key, NULL, 0);
 }
 
-static inline int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
+static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
 {
 	return do_swap(key.key, lock, 1);
 }
 
-static inline void _Swap_unlocked(void)
+static inline void z_swap_unlocked(void)
 {
 	struct k_spinlock lock = {};
 	k_spinlock_key_t key = k_spin_lock(&lock);
 
-	(void) _Swap(&lock, key);
+	(void) z_swap(&lock, key);
 }
 
 #else /* !CONFIG_USE_SWITCH */
 
 extern int __swap(unsigned int key);
 
-static inline int _Swap_irqlock(unsigned int key)
+static inline int z_swap_irqlock(unsigned int key)
 {
 	int ret;
-	_check_stack_sentinel();
+	z_check_stack_sentinel();
 
 #ifndef CONFIG_ARM
 #ifdef CONFIG_TRACING
@@ -139,15 +139,15 @@
  * can't be in SMP.  The k_spin_release() call is just for validation
  * handling.
  */
-static ALWAYS_INLINE int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
+static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
 {
 	k_spin_release(lock);
-	return _Swap_irqlock(key.key);
+	return z_swap_irqlock(key.key);
 }
 
-static inline void _Swap_unlocked(void)
+static inline void z_swap_unlocked(void)
 {
-	(void) _Swap_irqlock(_arch_irq_lock());
+	(void) z_swap_irqlock(z_arch_irq_lock());
 }
 
 #endif
diff --git a/kernel/include/syscall_handler.h b/kernel/include/syscall_handler.h
index 3479834..de43514 100644
--- a/kernel/include/syscall_handler.h
+++ b/kernel/include/syscall_handler.h
@@ -45,18 +45,18 @@
  *         -EPERM If the caller does not have permissions
  *         -EINVAL Object is not initialized
  */
-int _k_object_validate(struct _k_object *ko, enum k_objects otype,
+int z_object_validate(struct _k_object *ko, enum k_objects otype,
 		       enum _obj_init_check init);
 
 /**
- * Dump out error information on failed _k_object_validate() call
+ * Dump out error information on failed z_object_validate() call
  *
- * @param retval Return value from _k_object_validate()
+ * @param retval Return value from z_object_validate()
  * @param obj Kernel object we were trying to verify
  * @param ko If retval=-EPERM, struct _k_object * that was looked up, or NULL
  * @param otype Expected type of the kernel object
  */
-extern void _dump_object_error(int retval, void *obj, struct _k_object *ko,
+extern void z_dump_object_error(int retval, void *obj, struct _k_object *ko,
 			enum k_objects otype);
 
 /**
@@ -69,7 +69,7 @@
  * @return Kernel object's metadata, or NULL if the parameter wasn't the
  * memory address of a kernel object
  */
-extern struct _k_object *_k_object_find(void *obj);
+extern struct _k_object *z_object_find(void *obj);
 
 typedef void (*_wordlist_cb_func_t)(struct _k_object *ko, void *context);
 
@@ -79,7 +79,7 @@
  * @param func function to run on each struct _k_object
  * @param context Context pointer to pass to each invocation
  */
-extern void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context);
+extern void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context);
 
 /**
  * Copy all kernel object permissions from the parent to the child
@@ -87,7 +87,7 @@
  * @param parent Parent thread, to get permissions from
  * @param child Child thread, to copy permissions to
  */
-extern void _thread_perms_inherit(struct k_thread *parent,
+extern void z_thread_perms_inherit(struct k_thread *parent,
 				  struct k_thread *child);
 
 /**
@@ -96,7 +96,7 @@
  * @param ko Kernel object metadata to update
  * @param thread The thread to grant permission
  */
-extern void _thread_perms_set(struct _k_object *ko, struct k_thread *thread);
+extern void z_thread_perms_set(struct _k_object *ko, struct k_thread *thread);
 
 /**
  * Revoke a thread's permission to a kernel object
@@ -104,17 +104,17 @@
  * @param ko Kernel object metadata to update
  * @param thread The thread to grant permission
  */
-extern void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread);
+extern void z_thread_perms_clear(struct _k_object *ko, struct k_thread *thread);
 
 /*
  * Revoke access to all objects for the provided thread
  *
- * NOTE: Unlike _thread_perms_clear(), this function will not clear
+ * NOTE: Unlike z_thread_perms_clear(), this function will not clear
  * permissions on public objects.
  *
  * @param thread Thread object to revoke access
  */
-extern void _thread_perms_all_clear(struct k_thread *thread);
+extern void z_thread_perms_all_clear(struct k_thread *thread);
 
 /**
  * Clear initialization state of a kernel object
@@ -124,7 +124,7 @@
  *
  * @param object Address of the kernel object
  */
-void _k_object_uninit(void *obj);
+void z_object_uninit(void *obj);
 
 /**
  * Initialize and reset permissions to only access by the caller
@@ -143,7 +143,7 @@
  *
  * @param object Address of the kernel object
  */
-void _k_object_recycle(void *obj);
+void z_object_recycle(void *obj);
 
 /**
  * @brief Obtain the size of a C string passed from user mode
@@ -258,7 +258,7 @@
 #define Z_OOPS(expr) \
 	do { \
 		if (expr) { \
-			_arch_syscall_oops(ssf); \
+			z_arch_syscall_oops(ssf); \
 		} \
 	} while (false)
 
@@ -296,7 +296,7 @@
 #define Z_SYSCALL_VERIFY(expr) Z_SYSCALL_VERIFY_MSG(expr, #expr)
 
 #define Z_SYSCALL_MEMORY(ptr, size, write) \
-	Z_SYSCALL_VERIFY_MSG(_arch_buffer_validate((void *)ptr, size, write) \
+	Z_SYSCALL_VERIFY_MSG(z_arch_buffer_validate((void *)ptr, size, write) \
 			     == 0, \
 			     "Memory region %p (size %u) %s access denied", \
 			     (void *)(ptr), (u32_t)(size), \
@@ -379,18 +379,18 @@
 #define Z_SYSCALL_MEMORY_ARRAY_WRITE(ptr, nmemb, size) \
 	Z_SYSCALL_MEMORY_ARRAY(ptr, nmemb, size, 1)
 
-static inline int _obj_validation_check(struct _k_object *ko,
+static inline int z_obj_validation_check(struct _k_object *ko,
 					void *obj,
 					enum k_objects otype,
 					enum _obj_init_check init)
 {
 	int ret;
 
-	ret = _k_object_validate(ko, otype, init);
+	ret = z_object_validate(ko, otype, init);
 
 #ifdef CONFIG_PRINTK
 	if (ret != 0) {
-		_dump_object_error(ret, obj, ko, otype);
+		z_dump_object_error(ret, obj, ko, otype);
 	}
 #else
 	ARG_UNUSED(obj);
@@ -400,8 +400,7 @@
 }
 
 #define Z_SYSCALL_IS_OBJ(ptr, type, init) \
-	Z_SYSCALL_VERIFY_MSG( \
-	    _obj_validation_check(_k_object_find((void *)ptr), (void *)ptr, \
+	Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(z_object_find((void *)ptr), (void *)ptr, \
 				   type, init) == 0, "access denied")
 
 /**
@@ -454,7 +453,7 @@
 /**
  * @brief Runtime check kernel object pointer for non-init functions
  *
- * Calls _k_object_validate and triggers a kernel oops if the check fails.
+ * Calls z_object_validate and triggers a kernel oops if the check fails.
  * For use in system call handlers which are not init functions; a fatal
  * error will occur if the object is not initialized.
  *
@@ -575,18 +574,18 @@
 				 u32_t arg6_, \
 				 void *ssf)
 
-#define _SYSCALL_CONCAT(arg1, arg2) __SYSCALL_CONCAT(arg1, arg2)
+#define Z_SYSCALL_CONCAT(arg1, arg2) __SYSCALL_CONCAT(arg1, arg2)
 #define __SYSCALL_CONCAT(arg1, arg2) ___SYSCALL_CONCAT(arg1, arg2)
 #define ___SYSCALL_CONCAT(arg1, arg2) arg1##arg2
 
-#define _SYSCALL_NARG(...) __SYSCALL_NARG(__VA_ARGS__, __SYSCALL_RSEQ_N())
+#define Z_SYSCALL_NARG(...) __SYSCALL_NARG(__VA_ARGS__, __SYSCALL_RSEQ_N())
 #define __SYSCALL_NARG(...) __SYSCALL_ARG_N(__VA_ARGS__)
 #define __SYSCALL_ARG_N(_1, _2, _3, _4, _5, _6, _7, N, ...) N
 #define __SYSCALL_RSEQ_N() 6, 5, 4, 3, 2, 1, 0
 
 #define Z_SYSCALL_HANDLER(...) \
-	_SYSCALL_CONCAT(__SYSCALL_HANDLER, \
-			_SYSCALL_NARG(__VA_ARGS__))(__VA_ARGS__)
+	Z_SYSCALL_CONCAT(__SYSCALL_HANDLER, \
+			Z_SYSCALL_NARG(__VA_ARGS__))(__VA_ARGS__)
 
 /*
  * Helper macros for a very common case: calls which just take one argument
@@ -597,24 +596,24 @@
 #define Z_SYSCALL_HANDLER1_SIMPLE(name_, obj_enum_, obj_type_) \
 	__SYSCALL_HANDLER1(name_, arg1) { \
 		Z_OOPS(Z_SYSCALL_OBJ(arg1, obj_enum_)); \
-		return (u32_t)_impl_ ## name_((obj_type_)arg1); \
+		return (u32_t)z_impl_ ## name_((obj_type_)arg1); \
 	}
 
 #define Z_SYSCALL_HANDLER1_SIMPLE_VOID(name_, obj_enum_, obj_type_) \
 	__SYSCALL_HANDLER1(name_, arg1) { \
 		Z_OOPS(Z_SYSCALL_OBJ(arg1, obj_enum_)); \
-		_impl_ ## name_((obj_type_)arg1); \
+		z_impl_ ## name_((obj_type_)arg1); \
 		return 0; \
 	}
 
 #define Z_SYSCALL_HANDLER0_SIMPLE(name_) \
 	__SYSCALL_HANDLER0(name_) { \
-		return (u32_t)_impl_ ## name_(); \
+		return (u32_t)z_impl_ ## name_(); \
 	}
 
 #define Z_SYSCALL_HANDLER0_SIMPLE_VOID(name_) \
 	__SYSCALL_HANDLER0(name_) { \
-		_impl_ ## name_(); \
+		z_impl_ ## name_(); \
 		return 0; \
 	}
 
diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h
index 2b3c98b..63b191c 100644
--- a/kernel/include/timeout_q.h
+++ b/kernel/include/timeout_q.h
@@ -22,38 +22,38 @@
 
 #ifdef CONFIG_SYS_CLOCK_EXISTS
 
-static inline void _init_timeout(struct _timeout *t, _timeout_func_t fn)
+static inline void z_init_timeout(struct _timeout *t, _timeout_func_t fn)
 {
 	sys_dnode_init(&t->node);
 }
 
-void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks);
+void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks);
 
-int _abort_timeout(struct _timeout *to);
+int z_abort_timeout(struct _timeout *to);
 
-static inline bool _is_inactive_timeout(struct _timeout *t)
+static inline bool z_is_inactive_timeout(struct _timeout *t)
 {
 	return !sys_dnode_is_linked(&t->node);
 }
 
-static inline void _init_thread_timeout(struct _thread_base *thread_base)
+static inline void z_init_thread_timeout(struct _thread_base *thread_base)
 {
-	_init_timeout(&thread_base->timeout, NULL);
+	z_init_timeout(&thread_base->timeout, NULL);
 }
 
 extern void z_thread_timeout(struct _timeout *to);
 
-static inline void _add_thread_timeout(struct k_thread *th, s32_t ticks)
+static inline void z_add_thread_timeout(struct k_thread *th, s32_t ticks)
 {
-	_add_timeout(&th->base.timeout, z_thread_timeout, ticks);
+	z_add_timeout(&th->base.timeout, z_thread_timeout, ticks);
 }
 
-static inline int _abort_thread_timeout(struct k_thread *thread)
+static inline int z_abort_thread_timeout(struct k_thread *thread)
 {
-	return _abort_timeout(&thread->base.timeout);
+	return z_abort_timeout(&thread->base.timeout);
 }
 
-s32_t _get_next_timeout_expiry(void);
+s32_t z_get_next_timeout_expiry(void);
 
 void z_set_timeout_expiry(s32_t ticks, bool idle);
 
@@ -62,11 +62,11 @@
 #else
 
 /* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
-#define _init_thread_timeout(t) do {} while (false)
-#define _add_thread_timeout(th, to) do {} while (false && (void *)to && (void *)th)
-#define _abort_thread_timeout(t) (0)
-#define _is_inactive_timeout(t) 0
-#define _get_next_timeout_expiry() (K_FOREVER)
+#define z_init_thread_timeout(t) do {} while (false)
+#define z_add_thread_timeout(th, to) do {} while (false && (void *)to && (void *)th)
+#define z_abort_thread_timeout(t) (0)
+#define z_is_inactive_timeout(t) 0
+#define z_get_next_timeout_expiry() (K_FOREVER)
 #define z_set_timeout_expiry(t, i) do {} while (false)
 
 #endif
diff --git a/kernel/include/wait_q.h b/kernel/include/wait_q.h
index 302909c..b96c290 100644
--- a/kernel/include/wait_q.h
+++ b/kernel/include/wait_q.h
@@ -25,16 +25,16 @@
 #define _WAIT_Q_FOR_EACH(wq, thread_ptr) \
 	RB_FOR_EACH_CONTAINER(&(wq)->waitq.tree, thread_ptr, base.qnode_rb)
 
-static inline void _waitq_init(_wait_q_t *w)
+static inline void z_waitq_init(_wait_q_t *w)
 {
 	w->waitq = (struct _priq_rb) {
 		.tree = {
-			.lessthan_fn = _priq_rb_lessthan
+			.lessthan_fn = z_priq_rb_lessthan
 		}
 	};
 }
 
-static inline struct k_thread *_waitq_head(_wait_q_t *w)
+static inline struct k_thread *z_waitq_head(_wait_q_t *w)
 {
 	return (void *)rb_get_min(&w->waitq.tree);
 }
@@ -45,12 +45,12 @@
 	SYS_DLIST_FOR_EACH_CONTAINER(&((wq)->waitq), thread_ptr, \
 				     base.qnode_dlist)
 
-static inline void _waitq_init(_wait_q_t *w)
+static inline void z_waitq_init(_wait_q_t *w)
 {
 	sys_dlist_init(&w->waitq);
 }
 
-static inline struct k_thread *_waitq_head(_wait_q_t *w)
+static inline struct k_thread *z_waitq_head(_wait_q_t *w)
 {
 	return (void *)sys_dlist_peek_head(&w->waitq);
 }
diff --git a/kernel/init.c b/kernel/init.c
index 19d0b74..53d57f7 100644
--- a/kernel/init.c
+++ b/kernel/init.c
@@ -140,7 +140,7 @@
  *
  * @return N/A
  */
-void _bss_zero(void)
+void z_bss_zero(void)
 {
 	(void)memset(&__bss_start, 0,
 		     ((u32_t) &__bss_end - (u32_t) &__bss_start));
@@ -173,7 +173,7 @@
  *
  * @return N/A
  */
-void _data_copy(void)
+void z_data_copy(void)
 {
 	(void)memcpy(&__data_ram_start, &__data_rom_start,
 		 ((u32_t) &__data_ram_end - (u32_t) &__data_ram_start));
@@ -196,7 +196,7 @@
 	 * __stack_chk_guard is some uninitialized value living in the
 	 * app shared memory sections. Preserve it, and don't make any
 	 * function calls to perform the memory copy. The true canary
-	 * value gets set later in _Cstart().
+	 * value gets set later in z_cstart().
 	 */
 	uintptr_t guard_copy = __stack_chk_guard;
 	u8_t *src = (u8_t *)&_app_smem_rom_start;
@@ -238,7 +238,7 @@
 	static const unsigned int boot_delay;
 #endif
 
-	_sys_device_do_config_level(_SYS_INIT_LEVEL_POST_KERNEL);
+	z_sys_device_do_config_level(_SYS_INIT_LEVEL_POST_KERNEL);
 #if CONFIG_STACK_POINTER_RANDOM
 	z_stack_adjust_initialized = 1;
 #endif
@@ -250,7 +250,7 @@
 	PRINT_BOOT_BANNER();
 
 	/* Final init level before app starts */
-	_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION);
+	z_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION);
 
 #ifdef CONFIG_CPLUSPLUS
 	/* Process the .ctors and .init_array sections */
@@ -260,7 +260,7 @@
 	__do_init_array_aux();
 #endif
 
-	_init_static_threads();
+	z_init_static_threads();
 
 #ifdef CONFIG_SMP
 	smp_init();
@@ -297,10 +297,10 @@
 	thr->base.is_idle = 1;
 #endif
 
-	_setup_new_thread(thr, stack,
+	z_setup_new_thread(thr, stack,
 			  IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
 			  K_LOWEST_THREAD_PRIO, K_ESSENTIAL, IDLE_THREAD_NAME);
-	_mark_thread_as_started(thr);
+	z_mark_thread_as_started(thr);
 }
 #endif
 
@@ -349,7 +349,7 @@
 #endif
 
 	/* _kernel.ready_q is all zeroes */
-	_sched_init();
+	z_sched_init();
 
 #ifndef CONFIG_SMP
 	/*
@@ -364,14 +364,14 @@
 	_kernel.ready_q.cache = _main_thread;
 #endif
 
-	_setup_new_thread(_main_thread, _main_stack,
+	z_setup_new_thread(_main_thread, _main_stack,
 			  MAIN_STACK_SIZE, bg_thread_main,
 			  NULL, NULL, NULL,
 			  CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL, "main");
 	sys_trace_thread_create(_main_thread);
 
-	_mark_thread_as_started(_main_thread);
-	_ready_thread(_main_thread);
+	z_mark_thread_as_started(_main_thread);
+	z_ready_thread(_main_thread);
 
 #ifdef CONFIG_MULTITHREADING
 	init_idle_thread(_idle_thread, _idle_stack);
@@ -410,7 +410,7 @@
 static void switch_to_main_thread(void)
 {
 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
-	_arch_switch_to_main_thread(_main_thread, _main_stack, MAIN_STACK_SIZE,
+	z_arch_switch_to_main_thread(_main_thread, _main_stack, MAIN_STACK_SIZE,
 				    bg_thread_main);
 #else
 	/*
@@ -418,7 +418,7 @@
 	 * current fake thread is not on a wait queue or ready queue, so it
 	 * will never be rescheduled in.
 	 */
-	_Swap_unlocked();
+	z_swap_unlocked();
 #endif
 }
 #endif /* CONFIG_MULTITHREADING */
@@ -473,7 +473,7 @@
  *
  * @return Does not return
  */
-FUNC_NORETURN void _Cstart(void)
+FUNC_NORETURN void z_cstart(void)
 {
 	/* gcov hook needed to get the coverage report.*/
 	gcov_static_init();
@@ -501,8 +501,8 @@
 #endif
 
 	/* perform basic hardware initialization */
-	_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
-	_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
+	z_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
+	z_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
 
 #ifdef CONFIG_STACK_CANARIES
 	__stack_chk_guard = z_early_boot_rand32_get();
diff --git a/kernel/int_latency_bench.c b/kernel/int_latency_bench.c
index 1b737f9..97435b5 100644
--- a/kernel/int_latency_bench.c
+++ b/kernel/int_latency_bench.c
@@ -50,7 +50,7 @@
  * @return N/A
  *
  */
-void _int_latency_start(void)
+void z_int_latency_start(void)
 {
 	/* when interrupts are not already locked, take time stamp */
 	if (!int_locked_timestamp && int_latency_bench_ready) {
@@ -69,7 +69,7 @@
  * @return N/A
  *
  */
-void _int_latency_stop(void)
+void z_int_latency_stop(void)
 {
 	u32_t delta;
 	u32_t delayOverhead;
@@ -143,16 +143,16 @@
 		 * takes
 		 */
 		initial_start_delay = k_cycle_get_32();
-		_int_latency_start();
+		z_int_latency_start();
 		initial_start_delay =
 			k_cycle_get_32() - initial_start_delay - timeToReadTime;
 
 		nesting_delay = k_cycle_get_32();
-		_int_latency_start();
+		z_int_latency_start();
 		nesting_delay = k_cycle_get_32() - nesting_delay - timeToReadTime;
 
 		stop_delay = k_cycle_get_32();
-		_int_latency_stop();
+		z_int_latency_stop();
 		stop_delay = k_cycle_get_32() - stop_delay - timeToReadTime;
 
 		/* re-initialize globals to default values */
diff --git a/kernel/mailbox.c b/kernel/mailbox.c
index b5fe80b..f04a0cb 100644
--- a/kernel/mailbox.c
+++ b/kernel/mailbox.c
@@ -79,7 +79,7 @@
 	int i;
 
 	for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
-		_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
+		z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
 		k_stack_push(&async_msg_free, (u32_t)&async_msg[i]);
 	}
 #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
@@ -103,8 +103,8 @@
 
 void k_mbox_init(struct k_mbox *mbox_ptr)
 {
-	_waitq_init(&mbox_ptr->tx_msg_queue);
-	_waitq_init(&mbox_ptr->rx_msg_queue);
+	z_waitq_init(&mbox_ptr->tx_msg_queue);
+	z_waitq_init(&mbox_ptr->rx_msg_queue);
 	mbox_ptr->lock = (struct k_spinlock) {};
 	SYS_TRACING_OBJ_INIT(k_mbox, mbox_ptr);
 }
@@ -216,10 +216,10 @@
 #endif
 
 	/* synchronous send: wake up sending thread */
-	_set_thread_return_value(sending_thread, 0);
-	_mark_thread_as_not_pending(sending_thread);
-	_ready_thread(sending_thread);
-	_reschedule_unlocked();
+	z_set_thread_return_value(sending_thread, 0);
+	z_mark_thread_as_not_pending(sending_thread);
+	z_ready_thread(sending_thread);
+	z_reschedule_unlocked();
 }
 
 /**
@@ -259,11 +259,11 @@
 
 		if (mbox_message_match(tx_msg, rx_msg) == 0) {
 			/* take receiver out of rx queue */
-			_unpend_thread(receiving_thread);
+			z_unpend_thread(receiving_thread);
 
 			/* ready receiver for execution */
-			_set_thread_return_value(receiving_thread, 0);
-			_ready_thread(receiving_thread);
+			z_set_thread_return_value(receiving_thread, 0);
+			z_ready_thread(receiving_thread);
 
 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
 			/*
@@ -275,7 +275,7 @@
 			 */
 			if ((sending_thread->base.thread_state & _THREAD_DUMMY)
 			    != 0) {
-				_reschedule(&mbox->lock, key);
+				z_reschedule(&mbox->lock, key);
 				return 0;
 			}
 #endif
@@ -284,7 +284,7 @@
 			 * synchronous send: pend current thread (unqueued)
 			 * until the receiver consumes the message
 			 */
-			return _pend_curr(&mbox->lock, key, NULL, K_FOREVER);
+			return z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
 
 		}
 	}
@@ -298,14 +298,14 @@
 #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
 	/* asynchronous send: dummy thread waits on tx queue for receiver */
 	if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0) {
-		_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
+		z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
 		k_spin_unlock(&mbox->lock, key);
 		return 0;
 	}
 #endif
 
 	/* synchronous send: sender waits on tx queue for receiver or timeout */
-	return _pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
+	return z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
 }
 
 int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
@@ -438,7 +438,7 @@
 
 		if (mbox_message_match(tx_msg, rx_msg) == 0) {
 			/* take sender out of mailbox's tx queue */
-			_unpend_thread(sending_thread);
+			z_unpend_thread(sending_thread);
 
 			k_spin_unlock(&mbox->lock, key);
 
@@ -457,7 +457,7 @@
 
 	/* wait until a matching sender appears or a timeout occurs */
 	_current->base.swap_data = rx_msg;
-	result = _pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
+	result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
 
 	/* consume message data immediately, if needed */
 	if (result == 0) {
diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c
index ce5e6e8..220cf37 100644
--- a/kernel/mem_domain.c
+++ b/kernel/mem_domain.c
@@ -133,7 +133,7 @@
 	 * only if it is the current thread.
 	 */
 	if (_current->mem_domain_info.mem_domain == domain) {
-		_arch_mem_domain_destroy(domain);
+		z_arch_mem_domain_destroy(domain);
 	}
 
 	SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
@@ -217,7 +217,7 @@
 	 * only if it is the current thread.
 	 */
 	if (_current->mem_domain_info.mem_domain == domain) {
-		_arch_mem_domain_partition_remove(domain, p_idx);
+		z_arch_mem_domain_partition_remove(domain, p_idx);
 	}
 
 	/* A zero-sized partition denotes it's a free partition */
@@ -244,7 +244,7 @@
 	thread->mem_domain_info.mem_domain = domain;
 
 	if (_current == thread) {
-		_arch_mem_domain_configure(thread);
+		z_arch_mem_domain_configure(thread);
 	}
 
 	k_spin_unlock(&lock, key);
@@ -259,7 +259,7 @@
 
 	key = k_spin_lock(&lock);
 	if (_current == thread) {
-		_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
+		z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
 	}
 
 	sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
@@ -272,7 +272,7 @@
 {
 	ARG_UNUSED(arg);
 
-	max_partitions = _arch_mem_domain_max_partitions_get();
+	max_partitions = z_arch_mem_domain_max_partitions_get();
 	/*
 	 * max_partitions must be less than or equal to
 	 * CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c
index a8ce385..b4ecb78 100644
--- a/kernel/mem_slab.c
+++ b/kernel/mem_slab.c
@@ -64,7 +64,7 @@
 	     slab++) {
 		create_free_list(slab);
 		SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
-		_k_object_init(slab);
+		z_object_init(slab);
 	}
 	return 0;
 }
@@ -84,10 +84,10 @@
 	slab->buffer = buffer;
 	slab->num_used = 0;
 	create_free_list(slab);
-	_waitq_init(&slab->wait_q);
+	z_waitq_init(&slab->wait_q);
 	SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
 
-	_k_object_init(slab);
+	z_object_init(slab);
 }
 
 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
@@ -111,7 +111,7 @@
 		result = -ENOMEM;
 	} else {
 		/* wait for a free block or timeout */
-		result = _pend_curr(&lock, key, &slab->wait_q, timeout);
+		result = z_pend_curr(&lock, key, &slab->wait_q, timeout);
 		if (result == 0) {
 			*mem = _current->base.swap_data;
 		}
@@ -126,12 +126,12 @@
 void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
-	struct k_thread *pending_thread = _unpend_first_thread(&slab->wait_q);
+	struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
 
 	if (pending_thread != NULL) {
-		_set_thread_return_value_with_data(pending_thread, 0, *mem);
-		_ready_thread(pending_thread);
-		_reschedule(&lock, key);
+		z_set_thread_return_value_with_data(pending_thread, 0, *mem);
+		z_ready_thread(pending_thread);
+		z_reschedule(&lock, key);
 	} else {
 		**(char ***)mem = slab->free_list;
 		slab->free_list = *(char **)mem;
diff --git a/kernel/mempool.c b/kernel/mempool.c
index e042087..013947e 100644
--- a/kernel/mempool.c
+++ b/kernel/mempool.c
@@ -30,8 +30,8 @@
 
 static void k_mem_pool_init(struct k_mem_pool *p)
 {
-	_waitq_init(&p->wait_q);
-	_sys_mem_pool_base_init(&p->base);
+	z_waitq_init(&p->wait_q);
+	z_sys_mem_pool_base_init(&p->base);
 }
 
 int init_static_pools(struct device *unused)
@@ -54,10 +54,10 @@
 	int ret;
 	s64_t end = 0;
 
-	__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
+	__ASSERT(!(z_is_in_isr() && timeout != K_NO_WAIT), "");
 
 	if (timeout > 0) {
-		end = z_tick_get() + _ms_to_ticks(timeout);
+		end = z_tick_get() + z_ms_to_ticks(timeout);
 	}
 
 	while (true) {
@@ -71,7 +71,7 @@
 		 * clearly want to block.
 		 */
 		for (int i = 0; i < 2; i++) {
-			ret = _sys_mem_pool_block_alloc(&p->base, size,
+			ret = z_sys_mem_pool_block_alloc(&p->base, size,
 							&level_num, &block_num,
 							&block->data);
 			if (ret != -EAGAIN) {
@@ -92,7 +92,7 @@
 			return ret;
 		}
 
-		_pend_curr_unlocked(&p->wait_q, timeout);
+		z_pend_curr_unlocked(&p->wait_q, timeout);
 
 		if (timeout != K_FOREVER) {
 			timeout = end - z_tick_get();
@@ -111,21 +111,21 @@
 	int need_sched = 0;
 	struct k_mem_pool *p = get_pool(id->pool);
 
-	_sys_mem_pool_block_free(&p->base, id->level, id->block);
+	z_sys_mem_pool_block_free(&p->base, id->level, id->block);
 
 	/* Wake up anyone blocked on this pool and let them repeat
 	 * their allocation attempts
 	 *
-	 * (Note that this spinlock only exists because _unpend_all()
+	 * (Note that this spinlock only exists because z_unpend_all()
 	 * is unsynchronized.  Maybe we want to put the lock into the
 	 * wait_q instead and make the API safe?)
 	 */
 	k_spinlock_key_t key = k_spin_lock(&lock);
 
-	need_sched = _unpend_all(&p->wait_q);
+	need_sched = z_unpend_all(&p->wait_q);
 
 	if (need_sched) {
-		_reschedule(&lock, key);
+		z_reschedule(&lock, key);
 	} else {
 		k_spin_unlock(&lock, key);
 	}
diff --git a/kernel/msg_q.c b/kernel/msg_q.c
index 2bf87e3..460c05c 100644
--- a/kernel/msg_q.c
+++ b/kernel/msg_q.c
@@ -59,14 +59,14 @@
 	q->write_ptr = buffer;
 	q->used_msgs = 0;
 	q->flags = 0;
-	_waitq_init(&q->wait_q);
+	z_waitq_init(&q->wait_q);
 	q->lock = (struct k_spinlock) {};
 	SYS_TRACING_OBJ_INIT(k_msgq, q);
 
-	_k_object_init(q);
+	z_object_init(q);
 }
 
-int _impl_k_msgq_alloc_init(struct k_msgq *q, size_t msg_size,
+int z_impl_k_msgq_alloc_init(struct k_msgq *q, size_t msg_size,
 			    u32_t max_msgs)
 {
 	void *buffer;
@@ -95,13 +95,13 @@
 {
 	Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(q, K_OBJ_MSGQ));
 
-	return _impl_k_msgq_alloc_init((struct k_msgq *)q, msg_size, max_msgs);
+	return z_impl_k_msgq_alloc_init((struct k_msgq *)q, msg_size, max_msgs);
 }
 #endif
 
 void k_msgq_cleanup(struct k_msgq *q)
 {
-	__ASSERT_NO_MSG(!_waitq_head(&q->wait_q));
+	__ASSERT_NO_MSG(!z_waitq_head(&q->wait_q));
 
 	if ((q->flags & K_MSGQ_FLAG_ALLOC) != 0) {
 		k_free(q->buffer_start);
@@ -110,9 +110,9 @@
 }
 
 
-int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
+int z_impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
 {
-	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");
+	__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
 
 	k_spinlock_key_t key = k_spin_lock(&q->lock);
 	struct k_thread *pending_thread;
@@ -120,15 +120,15 @@
 
 	if (q->used_msgs < q->max_msgs) {
 		/* message queue isn't full */
-		pending_thread = _unpend_first_thread(&q->wait_q);
+		pending_thread = z_unpend_first_thread(&q->wait_q);
 		if (pending_thread != NULL) {
 			/* give message to waiting thread */
 			(void)memcpy(pending_thread->base.swap_data, data,
 			       q->msg_size);
 			/* wake up waiting thread */
-			_set_thread_return_value(pending_thread, 0);
-			_ready_thread(pending_thread);
-			_reschedule(&q->lock, key);
+			z_set_thread_return_value(pending_thread, 0);
+			z_ready_thread(pending_thread);
+			z_reschedule(&q->lock, key);
 			return 0;
 		} else {
 			/* put message in queue */
@@ -146,7 +146,7 @@
 	} else {
 		/* wait for put message success, failure, or timeout */
 		_current->base.swap_data = data;
-		return _pend_curr(&q->lock, key, &q->wait_q, timeout);
+		return z_pend_curr(&q->lock, key, &q->wait_q, timeout);
 	}
 
 	k_spin_unlock(&q->lock, key);
@@ -162,11 +162,11 @@
 	Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(data, q->msg_size));
 
-	return _impl_k_msgq_put(q, (void *)data, timeout);
+	return z_impl_k_msgq_put(q, (void *)data, timeout);
 }
 #endif
 
-void _impl_k_msgq_get_attrs(struct k_msgq *q, struct k_msgq_attrs *attrs)
+void z_impl_k_msgq_get_attrs(struct k_msgq *q, struct k_msgq_attrs *attrs)
 {
 	attrs->msg_size = q->msg_size;
 	attrs->max_msgs = q->max_msgs;
@@ -180,14 +180,14 @@
 
 	Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(attrs, sizeof(struct k_msgq_attrs)));
-	_impl_k_msgq_get_attrs(q, (struct k_msgq_attrs *) attrs);
+	z_impl_k_msgq_get_attrs(q, (struct k_msgq_attrs *) attrs);
 	return 0;
 }
 #endif
 
-int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
+int z_impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
 {
-	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");
+	__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
 
 	k_spinlock_key_t key = k_spin_lock(&q->lock);
 	struct k_thread *pending_thread;
@@ -203,7 +203,7 @@
 		q->used_msgs--;
 
 		/* handle first thread waiting to write (if any) */
-		pending_thread = _unpend_first_thread(&q->wait_q);
+		pending_thread = z_unpend_first_thread(&q->wait_q);
 		if (pending_thread != NULL) {
 			/* add thread's message to queue */
 			(void)memcpy(q->write_ptr, pending_thread->base.swap_data,
@@ -215,9 +215,9 @@
 			q->used_msgs++;
 
 			/* wake up waiting thread */
-			_set_thread_return_value(pending_thread, 0);
-			_ready_thread(pending_thread);
-			_reschedule(&q->lock, key);
+			z_set_thread_return_value(pending_thread, 0);
+			z_ready_thread(pending_thread);
+			z_reschedule(&q->lock, key);
 			return 0;
 		}
 		result = 0;
@@ -227,7 +227,7 @@
 	} else {
 		/* wait for get message success or timeout */
 		_current->base.swap_data = data;
-		return _pend_curr(&q->lock, key, &q->wait_q, timeout);
+		return z_pend_curr(&q->lock, key, &q->wait_q, timeout);
 	}
 
 	k_spin_unlock(&q->lock, key);
@@ -243,11 +243,11 @@
 	Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size));
 
-	return _impl_k_msgq_get(q, (void *)data, timeout);
+	return z_impl_k_msgq_get(q, (void *)data, timeout);
 }
 #endif
 
-int _impl_k_msgq_peek(struct k_msgq *q, void *data)
+int z_impl_k_msgq_peek(struct k_msgq *q, void *data)
 {
 	k_spinlock_key_t key = k_spin_lock(&q->lock);
 	int result;
@@ -274,25 +274,25 @@
 	Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size));
 
-	return _impl_k_msgq_peek(q, (void *)data);
+	return z_impl_k_msgq_peek(q, (void *)data);
 }
 #endif
 
-void _impl_k_msgq_purge(struct k_msgq *q)
+void z_impl_k_msgq_purge(struct k_msgq *q)
 {
 	k_spinlock_key_t key = k_spin_lock(&q->lock);
 	struct k_thread *pending_thread;
 
 	/* wake up any threads that are waiting to write */
-	while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) {
-		_set_thread_return_value(pending_thread, -ENOMSG);
-		_ready_thread(pending_thread);
+	while ((pending_thread = z_unpend_first_thread(&q->wait_q)) != NULL) {
+		z_set_thread_return_value(pending_thread, -ENOMSG);
+		z_ready_thread(pending_thread);
 	}
 
 	q->used_msgs = 0;
 	q->read_ptr = q->write_ptr;
 
-	_reschedule(&q->lock, key);
+	z_reschedule(&q->lock, key);
 }
 
 #ifdef CONFIG_USERSPACE
diff --git a/kernel/mutex.c b/kernel/mutex.c
index cd6bf1e..f3c4342 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -75,17 +75,17 @@
 
 #endif /* CONFIG_OBJECT_TRACING */
 
-void _impl_k_mutex_init(struct k_mutex *mutex)
+void z_impl_k_mutex_init(struct k_mutex *mutex)
 {
 	mutex->owner = NULL;
 	mutex->lock_count = 0;
 
 	sys_trace_void(SYS_TRACE_ID_MUTEX_INIT);
 
-	_waitq_init(&mutex->wait_q);
+	z_waitq_init(&mutex->wait_q);
 
 	SYS_TRACING_OBJ_INIT(k_mutex, mutex);
-	_k_object_init(mutex);
+	z_object_init(mutex);
 	sys_trace_end_call(SYS_TRACE_ID_MUTEX_INIT);
 }
 
@@ -93,7 +93,7 @@
 Z_SYSCALL_HANDLER(k_mutex_init, mutex)
 {
 	Z_OOPS(Z_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX));
-	_impl_k_mutex_init((struct k_mutex *)mutex);
+	z_impl_k_mutex_init((struct k_mutex *)mutex);
 
 	return 0;
 }
@@ -101,9 +101,9 @@
 
 static s32_t new_prio_for_inheritance(s32_t target, s32_t limit)
 {
-	int new_prio = _is_prio_higher(target, limit) ? target : limit;
+	int new_prio = z_is_prio_higher(target, limit) ? target : limit;
 
-	new_prio = _get_new_prio_with_ceiling(new_prio);
+	new_prio = z_get_new_prio_with_ceiling(new_prio);
 
 	return new_prio;
 }
@@ -113,21 +113,21 @@
 	if (mutex->owner->base.prio != new_prio) {
 
 		K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n",
-			mutex->owner, _is_thread_ready(mutex->owner) ?
+			mutex->owner, z_is_thread_ready(mutex->owner) ?
 			'y' : 'n',
 			new_prio, mutex->owner->base.prio);
 
-		_thread_priority_set(mutex->owner, new_prio);
+		z_thread_priority_set(mutex->owner, new_prio);
 	}
 }
 
-int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
+int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
 {
 	int new_prio;
 	k_spinlock_key_t key;
 
 	sys_trace_void(SYS_TRACE_ID_MUTEX_LOCK);
-	_sched_lock();
+	z_sched_lock();
 
 	if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
 
@@ -165,11 +165,11 @@
 
 	K_DEBUG("adjusting prio up on mutex %p\n", mutex);
 
-	if (_is_prio_higher(new_prio, mutex->owner->base.prio)) {
+	if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
 		adjust_owner_prio(mutex, new_prio);
 	}
 
-	int got_mutex = _pend_curr(&lock, key, &mutex->wait_q, timeout);
+	int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);
 
 	K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
 
@@ -186,7 +186,7 @@
 
 	K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
 
-	struct k_thread *waiter = _waitq_head(&mutex->wait_q);
+	struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
 
 	new_prio = mutex->owner_orig_prio;
 	new_prio = (waiter != NULL) ?
@@ -209,11 +209,11 @@
 Z_SYSCALL_HANDLER(k_mutex_lock, mutex, timeout)
 {
 	Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
-	return _impl_k_mutex_lock((struct k_mutex *)mutex, (s32_t)timeout);
+	return z_impl_k_mutex_lock((struct k_mutex *)mutex, (s32_t)timeout);
 }
 #endif
 
-void _impl_k_mutex_unlock(struct k_mutex *mutex)
+void z_impl_k_mutex_unlock(struct k_mutex *mutex)
 {
 	struct k_thread *new_owner;
 
@@ -221,7 +221,7 @@
 	__ASSERT(mutex->owner == _current, "");
 
 	sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK);
-	_sched_lock();
+	z_sched_lock();
 
 	RECORD_STATE_CHANGE();
 
@@ -237,7 +237,7 @@
 
 	adjust_owner_prio(mutex, mutex->owner_orig_prio);
 
-	new_owner = _unpend_first_thread(&mutex->wait_q);
+	new_owner = z_unpend_first_thread(&mutex->wait_q);
 
 	mutex->owner = new_owner;
 
@@ -245,11 +245,11 @@
 		mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
 
 	if (new_owner != NULL) {
-		_ready_thread(new_owner);
+		z_ready_thread(new_owner);
 
 		k_spin_unlock(&lock, key);
 
-		_set_thread_return_value(new_owner, 0);
+		z_set_thread_return_value(new_owner, 0);
 
 		/*
 		 * new owner is already of higher or equal prio than first
@@ -273,7 +273,7 @@
 	Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
 	Z_OOPS(Z_SYSCALL_VERIFY(((struct k_mutex *)mutex)->lock_count > 0));
 	Z_OOPS(Z_SYSCALL_VERIFY(((struct k_mutex *)mutex)->owner == _current));
-	_impl_k_mutex_unlock((struct k_mutex *)mutex);
+	z_impl_k_mutex_unlock((struct k_mutex *)mutex);
 	return 0;
 }
 #endif
diff --git a/kernel/pipes.c b/kernel/pipes.c
index a51735a..5dada6f 100644
--- a/kernel/pipes.c
+++ b/kernel/pipes.c
@@ -109,7 +109,7 @@
 		async_msg[i].thread.thread_state = _THREAD_DUMMY;
 		async_msg[i].thread.swap_data = &async_msg[i].desc;
 
-		_init_thread_timeout(&async_msg[i].thread);
+		z_init_thread_timeout(&async_msg[i].thread);
 
 		k_stack_push(&pipe_async_msgs, (u32_t)&async_msg[i]);
 	}
@@ -140,13 +140,13 @@
 	pipe->read_index = 0;
 	pipe->write_index = 0;
 	pipe->flags = 0;
-	_waitq_init(&pipe->wait_q.writers);
-	_waitq_init(&pipe->wait_q.readers);
+	z_waitq_init(&pipe->wait_q.writers);
+	z_waitq_init(&pipe->wait_q.readers);
 	SYS_TRACING_OBJ_INIT(k_pipe, pipe);
-	_k_object_init(pipe);
+	z_object_init(pipe);
 }
 
-int _impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
+int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
 {
 	void *buffer;
 	int ret;
@@ -173,14 +173,14 @@
 {
 	Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(pipe, K_OBJ_PIPE));
 
-	return _impl_k_pipe_alloc_init((struct k_pipe *)pipe, size);
+	return z_impl_k_pipe_alloc_init((struct k_pipe *)pipe, size);
 }
 #endif
 
 void k_pipe_cleanup(struct k_pipe *pipe)
 {
-	__ASSERT_NO_MSG(!_waitq_head(&pipe->wait_q.readers));
-	__ASSERT_NO_MSG(!_waitq_head(&pipe->wait_q.writers));
+	__ASSERT_NO_MSG(!z_waitq_head(&pipe->wait_q.readers));
+	__ASSERT_NO_MSG(!z_waitq_head(&pipe->wait_q.writers));
 
 	if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0) {
 		k_free(pipe->buffer);
@@ -348,7 +348,7 @@
 	sys_dlist_init(xfer_list);
 	num_bytes = 0;
 
-	while ((thread = _waitq_head(wait_q)) != NULL) {
+	while ((thread = z_waitq_head(wait_q)) != NULL) {
 		desc = (struct k_pipe_desc *)thread->base.swap_data;
 		num_bytes += desc->bytes_to_xfer;
 
@@ -368,7 +368,7 @@
 		 * Abort its timeout.
 		 * Add it to the transfer list.
 		 */
-		_unpend_thread(thread);
+		z_unpend_thread(thread);
 		sys_dlist_append(xfer_list, &thread->base.qnode_dlist);
 	}
 
@@ -420,13 +420,13 @@
 	}
 #endif
 
-	_ready_thread(thread);
+	z_ready_thread(thread);
 }
 
 /**
  * @brief Internal API used to send data to a pipe
  */
-int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
+int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
 			 unsigned char *data, size_t bytes_to_write,
 			 size_t *bytes_written, size_t min_xfer,
 			 s32_t timeout)
@@ -456,7 +456,7 @@
 		return -EIO;
 	}
 
-	_sched_lock();
+	z_sched_lock();
 	k_spin_unlock(&pipe->lock, key);
 
 	/*
@@ -484,7 +484,7 @@
 		desc->bytes_to_xfer -= bytes_copied;
 
 		/* The thread's read request has been satisfied. Ready it. */
-		_ready_thread(thread);
+		z_ready_thread(thread);
 
 		thread = (struct k_thread *)sys_dlist_get(&xfer_list);
 	}
@@ -533,15 +533,15 @@
 		 * manipulating the writers wait_q.
 		 */
 		k_spinlock_key_t key = k_spin_lock(&pipe->lock);
-		_sched_unlock_no_reschedule();
+		z_sched_unlock_no_reschedule();
 
 		async_desc->desc.buffer = data + num_bytes_written;
 		async_desc->desc.bytes_to_xfer =
 			bytes_to_write - num_bytes_written;
 
-		_pend_thread((struct k_thread *) &async_desc->thread,
+		z_pend_thread((struct k_thread *) &async_desc->thread,
 			     &pipe->wait_q.writers, K_FOREVER);
-		_reschedule(&pipe->lock, key);
+		z_reschedule(&pipe->lock, key);
 		return 0;
 	}
 #endif
@@ -558,8 +558,8 @@
 		 * manipulating the writers wait_q.
 		 */
 		k_spinlock_key_t key = k_spin_lock(&pipe->lock);
-		_sched_unlock_no_reschedule();
-		(void)_pend_curr(&pipe->lock, key,
+		z_sched_unlock_no_reschedule();
+		(void)z_pend_curr(&pipe->lock, key,
 				 &pipe->wait_q.writers, timeout);
 	} else {
 		k_sched_unlock();
@@ -571,7 +571,7 @@
 				 bytes_to_write);
 }
 
-int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
+int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
 		     size_t *bytes_read, size_t min_xfer, s32_t timeout)
 {
 	struct k_thread    *writer;
@@ -598,7 +598,7 @@
 		return -EIO;
 	}
 
-	_sched_lock();
+	z_sched_lock();
 	k_spin_unlock(&pipe->lock, key);
 
 	num_bytes_read = pipe_buffer_get(pipe, data, bytes_to_read);
@@ -701,8 +701,8 @@
 		_current->base.swap_data = &pipe_desc;
 		k_spinlock_key_t key = k_spin_lock(&pipe->lock);
 
-		_sched_unlock_no_reschedule();
-		(void)_pend_curr(&pipe->lock, key,
+		z_sched_unlock_no_reschedule();
+		(void)z_pend_curr(&pipe->lock, key,
 				 &pipe->wait_q.readers, timeout);
 	} else {
 		k_sched_unlock();
@@ -726,19 +726,19 @@
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE((void *)data, bytes_to_read));
 	Z_OOPS(Z_SYSCALL_VERIFY(min_xfer <= bytes_to_read));
 
-	return _impl_k_pipe_get((struct k_pipe *)pipe, (void *)data,
+	return z_impl_k_pipe_get((struct k_pipe *)pipe, (void *)data,
 				bytes_to_read, bytes_read, min_xfer,
 				timeout);
 }
 #endif
 
-int _impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
+int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
 		     size_t *bytes_written, size_t min_xfer, s32_t timeout)
 {
 	__ASSERT(min_xfer <= bytes_to_write, "");
 	__ASSERT(bytes_written != NULL, "");
 
-	return _k_pipe_put_internal(pipe, NULL, data,
+	return z_pipe_put_internal(pipe, NULL, data,
 				    bytes_to_write, bytes_written,
 				    min_xfer, timeout);
 }
@@ -755,7 +755,7 @@
 	Z_OOPS(Z_SYSCALL_MEMORY_READ((void *)data, bytes_to_write));
 	Z_OOPS(Z_SYSCALL_VERIFY(min_xfer <= bytes_to_write));
 
-	return _impl_k_pipe_put((struct k_pipe *)pipe, (void *)data,
+	return z_impl_k_pipe_put((struct k_pipe *)pipe, (void *)data,
 				bytes_to_write, bytes_written, min_xfer,
 				timeout);
 }
@@ -776,7 +776,7 @@
 	async_desc->desc.sem = sem;
 	async_desc->thread.prio = k_thread_priority_get(_current);
 
-	(void) _k_pipe_put_internal(pipe, async_desc, block->data,
+	(void) z_pipe_put_internal(pipe, async_desc, block->data,
 				    bytes_to_write, &dummy_bytes_written,
 				    bytes_to_write, K_FOREVER);
 }
diff --git a/kernel/poll.c b/kernel/poll.c
index 1ed661b..a51fa95 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -91,15 +91,15 @@
 
 	pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
 	if ((pending == NULL) ||
-		_is_t1_higher_prio_than_t2(pending->poller->thread,
-		poller->thread)) {
+		z_is_t1_higher_prio_than_t2(pending->poller->thread,
+					    poller->thread)) {
 		sys_dlist_append(events, &event->_node);
 		return;
 	}
 
 	SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
-		if (_is_t1_higher_prio_than_t2(poller->thread,
-					       pending->poller->thread)) {
+		if (z_is_t1_higher_prio_than_t2(poller->thread,
+						pending->poller->thread)) {
 			sys_dlist_insert(&pending->_node, &event->_node);
 			return;
 		}
@@ -188,9 +188,9 @@
 	event->state |= state;
 }
 
-int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
+int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
 {
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 	__ASSERT(events != NULL, "NULL events\n");
 	__ASSERT(num_events > 0, "zero events\n");
 
@@ -238,9 +238,9 @@
 		return -EAGAIN;
 	}
 
-	_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
+	_wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
 
-	int swap_rc = _pend_curr(&lock, key, &wait_q, timeout);
+	int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
 
 	/*
 	 * Clear all event registrations. If events happen while we're in this
@@ -348,30 +348,30 @@
 
 	event->poller->is_polling = false;
 
-	if (!_is_thread_pending(thread)) {
+	if (!z_is_thread_pending(thread)) {
 		goto ready_event;
 	}
 
-	if (_is_thread_timeout_expired(thread)) {
+	if (z_is_thread_timeout_expired(thread)) {
 		return -EAGAIN;
 	}
 
-	_unpend_thread(thread);
-	_set_thread_return_value(thread,
+	z_unpend_thread(thread);
+	z_set_thread_return_value(thread,
 				 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
 
-	if (!_is_thread_ready(thread)) {
+	if (!z_is_thread_ready(thread)) {
 		goto ready_event;
 	}
 
-	_ready_thread(thread);
+	z_ready_thread(thread);
 
 ready_event:
 	set_event_ready(event, state);
 	return 0;
 }
 
-void _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
+void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state)
 {
 	struct k_poll_event *poll_event;
 
@@ -381,24 +381,24 @@
 	}
 }
 
-void _impl_k_poll_signal_init(struct k_poll_signal *signal)
+void z_impl_k_poll_signal_init(struct k_poll_signal *signal)
 {
 	sys_dlist_init(&signal->poll_events);
 	signal->signaled = 0;
 	/* signal->result is left unitialized */
-	_k_object_init(signal);
+	z_object_init(signal);
 }
 
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(k_poll_signal_init, signal)
 {
 	Z_OOPS(Z_SYSCALL_OBJ_INIT(signal, K_OBJ_POLL_SIGNAL));
-	_impl_k_poll_signal_init((struct k_poll_signal *)signal);
+	z_impl_k_poll_signal_init((struct k_poll_signal *)signal);
 	return 0;
 }
 #endif
 
-void _impl_k_poll_signal_check(struct k_poll_signal *signal,
+void z_impl_k_poll_signal_check(struct k_poll_signal *signal,
 			       unsigned int *signaled, int *result)
 {
 	*signaled = signal->signaled;
@@ -412,13 +412,13 @@
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
 
-	_impl_k_poll_signal_check((struct k_poll_signal *)signal,
+	z_impl_k_poll_signal_check((struct k_poll_signal *)signal,
 				  (unsigned int *)signaled, (int *)result);
 	return 0;
 }
 #endif
 
-int _impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
+int z_impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 	struct k_poll_event *poll_event;
@@ -434,7 +434,7 @@
 
 	int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
 
-	_reschedule(&lock, key);
+	z_reschedule(&lock, key);
 	return rc;
 }
 
@@ -442,7 +442,7 @@
 Z_SYSCALL_HANDLER(k_poll_signal_raise, signal, result)
 {
 	Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
-	return _impl_k_poll_signal_raise((struct k_poll_signal *)signal, result);
+	return z_impl_k_poll_signal_raise((struct k_poll_signal *)signal, result);
 }
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_poll_signal_reset, K_OBJ_POLL_SIGNAL,
 			       struct k_poll_signal *);
diff --git a/kernel/queue.c b/kernel/queue.c
index a1da9ce..77262f3 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -82,17 +82,17 @@
 
 #endif /* CONFIG_OBJECT_TRACING */
 
-void _impl_k_queue_init(struct k_queue *queue)
+void z_impl_k_queue_init(struct k_queue *queue)
 {
 	sys_sflist_init(&queue->data_q);
 	queue->lock = (struct k_spinlock) {};
-	_waitq_init(&queue->wait_q);
+	z_waitq_init(&queue->wait_q);
 #if defined(CONFIG_POLL)
 	sys_dlist_init(&queue->poll_events);
 #endif
 
 	SYS_TRACING_OBJ_INIT(k_queue, queue);
-	_k_object_init(queue);
+	z_object_init(queue);
 }
 
 #ifdef CONFIG_USERSPACE
@@ -101,7 +101,7 @@
 	struct k_queue *queue = (struct k_queue *)queue_ptr;
 
 	Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
-	_impl_k_queue_init(queue);
+	z_impl_k_queue_init(queue);
 	return 0;
 }
 #endif
@@ -109,25 +109,25 @@
 #if !defined(CONFIG_POLL)
 static void prepare_thread_to_run(struct k_thread *thread, void *data)
 {
-	_ready_thread(thread);
-	_set_thread_return_value_with_data(thread, 0, data);
+	z_ready_thread(thread);
+	z_set_thread_return_value_with_data(thread, 0, data);
 }
 #endif /* CONFIG_POLL */
 
 #ifdef CONFIG_POLL
 static inline void handle_poll_events(struct k_queue *queue, u32_t state)
 {
-	_handle_obj_poll_events(&queue->poll_events, state);
+	z_handle_obj_poll_events(&queue->poll_events, state);
 }
 #endif
 
-void _impl_k_queue_cancel_wait(struct k_queue *queue)
+void z_impl_k_queue_cancel_wait(struct k_queue *queue)
 {
 	k_spinlock_key_t key = k_spin_lock(&queue->lock);
 #if !defined(CONFIG_POLL)
 	struct k_thread *first_pending_thread;
 
-	first_pending_thread = _unpend_first_thread(&queue->wait_q);
+	first_pending_thread = z_unpend_first_thread(&queue->wait_q);
 
 	if (first_pending_thread != NULL) {
 		prepare_thread_to_run(first_pending_thread, NULL);
@@ -136,7 +136,7 @@
 	handle_poll_events(queue, K_POLL_STATE_CANCELLED);
 #endif /* !CONFIG_POLL */
 
-	_reschedule(&queue->lock, key);
+	z_reschedule(&queue->lock, key);
 }
 
 #ifdef CONFIG_USERSPACE
@@ -151,11 +151,11 @@
 #if !defined(CONFIG_POLL)
 	struct k_thread *first_pending_thread;
 
-	first_pending_thread = _unpend_first_thread(&queue->wait_q);
+	first_pending_thread = z_unpend_first_thread(&queue->wait_q);
 
 	if (first_pending_thread != NULL) {
 		prepare_thread_to_run(first_pending_thread, data);
-		_reschedule(&queue->lock, key);
+		z_reschedule(&queue->lock, key);
 		return 0;
 	}
 #endif /* !CONFIG_POLL */
@@ -181,7 +181,7 @@
 	handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
 #endif /* CONFIG_POLL */
 
-	_reschedule(&queue->lock, key);
+	z_reschedule(&queue->lock, key);
 	return 0;
 }
 
@@ -201,7 +201,7 @@
 	(void)queue_insert(queue, NULL, data, false);
 }
 
-s32_t _impl_k_queue_alloc_append(struct k_queue *queue, void *data)
+s32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
 {
 	return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data,
 			    true);
@@ -212,12 +212,12 @@
 {
 	Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
 
-	return _impl_k_queue_alloc_append((struct k_queue *)queue,
+	return z_impl_k_queue_alloc_append((struct k_queue *)queue,
 					  (void *)data);
 }
 #endif
 
-s32_t _impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
+s32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
 {
 	return queue_insert(queue, NULL, data, true);
 }
@@ -227,7 +227,7 @@
 {
 	Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
 
-	return _impl_k_queue_alloc_prepend((struct k_queue *)queue,
+	return z_impl_k_queue_alloc_prepend((struct k_queue *)queue,
 					   (void *)data);
 }
 #endif
@@ -241,13 +241,13 @@
 	struct k_thread *thread = NULL;
 
 	if (head != NULL) {
-		thread = _unpend_first_thread(&queue->wait_q);
+		thread = z_unpend_first_thread(&queue->wait_q);
 	}
 
 	while ((head != NULL) && (thread != NULL)) {
 		prepare_thread_to_run(thread, head);
 		head = *(void **)head;
-		thread = _unpend_first_thread(&queue->wait_q);
+		thread = z_unpend_first_thread(&queue->wait_q);
 	}
 
 	if (head != NULL) {
@@ -259,7 +259,7 @@
 	handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
 #endif /* !CONFIG_POLL */
 
-	_reschedule(&queue->lock, key);
+	z_reschedule(&queue->lock, key);
 }
 
 void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
@@ -318,7 +318,7 @@
 }
 #endif /* CONFIG_POLL */
 
-void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
+void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
 {
 	k_spinlock_key_t key = k_spin_lock(&queue->lock);
 	void *data;
@@ -343,7 +343,7 @@
 	return k_queue_poll(queue, timeout);
 
 #else
-	int ret = _pend_curr(&queue->lock, key, &queue->wait_q, timeout);
+	int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
 
 	return (ret != 0) ? NULL : _current->base.swap_data;
 #endif /* CONFIG_POLL */
@@ -356,7 +356,7 @@
 
 	Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
 
-	return (u32_t)_impl_k_queue_get((struct k_queue *)queue, timeout);
+	return (u32_t)z_impl_k_queue_get((struct k_queue *)queue, timeout);
 }
 
 Z_SYSCALL_HANDLER1_SIMPLE(k_queue_is_empty, K_OBJ_QUEUE, struct k_queue *);
diff --git a/kernel/sched.c b/kernel/sched.c
index 63ceca0..829fe20 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -15,31 +15,31 @@
 #include <stdbool.h>
 
 #if defined(CONFIG_SCHED_DUMB)
-#define _priq_run_add		_priq_dumb_add
-#define _priq_run_remove	_priq_dumb_remove
+#define _priq_run_add		z_priq_dumb_add
+#define _priq_run_remove	z_priq_dumb_remove
 # if defined(CONFIG_SCHED_CPU_MASK)
 #  define _priq_run_best	_priq_dumb_mask_best
 # else
-#  define _priq_run_best	_priq_dumb_best
+#  define _priq_run_best	z_priq_dumb_best
 # endif
 #elif defined(CONFIG_SCHED_SCALABLE)
-#define _priq_run_add		_priq_rb_add
-#define _priq_run_remove	_priq_rb_remove
-#define _priq_run_best		_priq_rb_best
+#define _priq_run_add		z_priq_rb_add
+#define _priq_run_remove	z_priq_rb_remove
+#define _priq_run_best		z_priq_rb_best
 #elif defined(CONFIG_SCHED_MULTIQ)
-#define _priq_run_add		_priq_mq_add
-#define _priq_run_remove	_priq_mq_remove
-#define _priq_run_best		_priq_mq_best
+#define _priq_run_add		z_priq_mq_add
+#define _priq_run_remove	z_priq_mq_remove
+#define _priq_run_best		z_priq_mq_best
 #endif
 
 #if defined(CONFIG_WAITQ_SCALABLE)
-#define _priq_wait_add		_priq_rb_add
-#define _priq_wait_remove	_priq_rb_remove
-#define _priq_wait_best		_priq_rb_best
+#define z_priq_wait_add		z_priq_rb_add
+#define _priq_wait_remove	z_priq_rb_remove
+#define _priq_wait_best		z_priq_rb_best
 #elif defined(CONFIG_WAITQ_DUMB)
-#define _priq_wait_add		_priq_dumb_add
-#define _priq_wait_remove	_priq_dumb_remove
-#define _priq_wait_best		_priq_dumb_best
+#define z_priq_wait_add		z_priq_dumb_add
+#define _priq_wait_remove	z_priq_dumb_remove
+#define _priq_wait_best		z_priq_dumb_best
 #endif
 
 /* the only struct z_kernel instance */
@@ -52,7 +52,7 @@
 			!__i.key;					\
 			k_spin_unlock(lck, __key), __i.key = 1)
 
-static inline int _is_preempt(struct k_thread *thread)
+static inline int is_preempt(struct k_thread *thread)
 {
 #ifdef CONFIG_PREEMPT_ENABLED
 	/* explanation in kernel_struct.h */
@@ -73,13 +73,13 @@
 }
 
 #if CONFIG_ASSERT
-static inline int _is_thread_dummy(struct k_thread *thread)
+static inline int is_thread_dummy(struct k_thread *thread)
 {
 	return !!(thread->base.thread_state & _THREAD_DUMMY);
 }
 #endif
 
-static inline bool _is_idle(struct k_thread *thread)
+static inline bool is_idle(struct k_thread *thread)
 {
 #ifdef CONFIG_SMP
 	return thread->base.is_idle;
@@ -90,7 +90,7 @@
 #endif
 }
 
-bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
+bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
 {
 	if (t1->base.prio < t2->base.prio) {
 		return true;
@@ -127,7 +127,7 @@
 	__ASSERT(_current != NULL, "");
 
 	/* Or if we're pended/suspended/dummy (duh) */
-	if (_is_thread_prevented_from_running(_current)) {
+	if (z_is_thread_prevented_from_running(_current)) {
 		return true;
 	}
 
@@ -137,14 +137,14 @@
 	 * hit this.
 	 */
 	if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
-	    && _is_thread_timeout_active(th)) {
+	    && z_is_thread_timeout_active(th)) {
 		return true;
 	}
 
 	/* Otherwise we have to be running a preemptible thread or
 	 * switching to a metairq
 	 */
-	if (_is_preempt(_current) || is_metairq(th)) {
+	if (is_preempt(_current) || is_metairq(th)) {
 		return true;
 	}
 
@@ -152,7 +152,7 @@
 	 * preemptible priorities (this is sort of an API glitch).
 	 * They must always be preemptible.
 	 */
-	if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) && _is_idle(_current)) {
+	if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) && is_idle(_current)) {
 		return true;
 	}
 
@@ -182,7 +182,7 @@
 	/* In uniprocessor mode, we can leave the current thread in
 	 * the queue (actually we have to, otherwise the assembly
 	 * context switch code for all architectures would be
-	 * responsible for putting it back in _Swap and ISR return!),
+	 * responsible for putting it back in z_swap and ISR return!),
 	 * which makes this choice simple.
 	 */
 	struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq);
@@ -199,8 +199,8 @@
 	 * "ready", it means "is _current already added back to the
 	 * queue such that we don't want to re-add it".
 	 */
-	int queued = _is_thread_queued(_current);
-	int active = !_is_thread_prevented_from_running(_current);
+	int queued = z_is_thread_queued(_current);
+	int active = !z_is_thread_prevented_from_running(_current);
 
 	/* Choose the best thread that is not current */
 	struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq);
@@ -210,7 +210,7 @@
 
 	if (active) {
 		if (!queued &&
-		    !_is_t1_higher_prio_than_t2(th, _current)) {
+		    !z_is_t1_higher_prio_than_t2(th, _current)) {
 			th = _current;
 		}
 
@@ -220,16 +220,16 @@
 	}
 
 	/* Put _current back into the queue */
-	if (th != _current && active && !_is_idle(_current) && !queued) {
+	if (th != _current && active && !is_idle(_current) && !queued) {
 		_priq_run_add(&_kernel.ready_q.runq, _current);
-		_mark_thread_as_queued(_current);
+		z_mark_thread_as_queued(_current);
 	}
 
 	/* Take the new _current out of the queue */
-	if (_is_thread_queued(th)) {
+	if (z_is_thread_queued(th)) {
 		_priq_run_remove(&_kernel.ready_q.runq, th);
 	}
-	_mark_thread_as_not_queued(th);
+	z_mark_thread_as_not_queued(th);
 
 	return th;
 #endif
@@ -241,7 +241,7 @@
 static int slice_max_prio;
 
 #ifdef CONFIG_SWAP_NONATOMIC
-/* If _Swap() isn't atomic, then it's possible for a timer interrupt
+/* If z_swap() isn't atomic, then it's possible for a timer interrupt
  * to try to timeslice away _current after it has already pended
  * itself but before the corresponding context switch.  Treat that as
  * a noop condition in z_time_slice().
@@ -264,7 +264,7 @@
 {
 	LOCKED(&sched_spinlock) {
 		_current_cpu->slice_ticks = 0;
-		slice_time = _ms_to_ticks(slice);
+		slice_time = z_ms_to_ticks(slice);
 		slice_max_prio = prio;
 		reset_time_slice();
 	}
@@ -272,10 +272,10 @@
 
 static inline int sliceable(struct k_thread *t)
 {
-	return _is_preempt(t)
-		&& !_is_prio_higher(t->base.prio, slice_max_prio)
-		&& !_is_idle(t)
-		&& !_is_thread_timeout_active(t);
+	return is_preempt(t)
+		&& !z_is_prio_higher(t->base.prio, slice_max_prio)
+		&& !is_idle(t)
+		&& !z_is_thread_timeout_active(t);
 }
 
 /* Called out of each timer interrupt */
@@ -292,7 +292,7 @@
 
 	if (slice_time && sliceable(_current)) {
 		if (ticks >= _current_cpu->slice_ticks) {
-			_move_thread_to_end_of_prio_q(_current);
+			z_move_thread_to_end_of_prio_q(_current);
 			reset_time_slice();
 		} else {
 			_current_cpu->slice_ticks -= ticks;
@@ -328,31 +328,31 @@
 #endif
 }
 
-void _add_thread_to_ready_q(struct k_thread *thread)
+void z_add_thread_to_ready_q(struct k_thread *thread)
 {
 	LOCKED(&sched_spinlock) {
 		_priq_run_add(&_kernel.ready_q.runq, thread);
-		_mark_thread_as_queued(thread);
+		z_mark_thread_as_queued(thread);
 		update_cache(0);
 	}
 }
 
-void _move_thread_to_end_of_prio_q(struct k_thread *thread)
+void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
 {
 	LOCKED(&sched_spinlock) {
 		_priq_run_remove(&_kernel.ready_q.runq, thread);
 		_priq_run_add(&_kernel.ready_q.runq, thread);
-		_mark_thread_as_queued(thread);
+		z_mark_thread_as_queued(thread);
 		update_cache(thread == _current);
 	}
 }
 
-void _remove_thread_from_ready_q(struct k_thread *thread)
+void z_remove_thread_from_ready_q(struct k_thread *thread)
 {
 	LOCKED(&sched_spinlock) {
-		if (_is_thread_queued(thread)) {
+		if (z_is_thread_queued(thread)) {
 			_priq_run_remove(&_kernel.ready_q.runq, thread);
-			_mark_thread_as_not_queued(thread);
+			z_mark_thread_as_not_queued(thread);
 			update_cache(thread == _current);
 		}
 	}
@@ -360,26 +360,26 @@
 
 static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
 {
-	_remove_thread_from_ready_q(thread);
-	_mark_thread_as_pending(thread);
+	z_remove_thread_from_ready_q(thread);
+	z_mark_thread_as_pending(thread);
 
 	if (wait_q != NULL) {
 		thread->base.pended_on = wait_q;
-		_priq_wait_add(&wait_q->waitq, thread);
+		z_priq_wait_add(&wait_q->waitq, thread);
 	}
 
 	if (timeout != K_FOREVER) {
-		s32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout);
+		s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(timeout);
 
-		_add_thread_timeout(thread, ticks);
+		z_add_thread_timeout(thread, ticks);
 	}
 
 	sys_trace_thread_pend(thread);
 }
 
-void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
+void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
 {
-	__ASSERT_NO_MSG(thread == _current || _is_thread_dummy(thread));
+	__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
 	pend(thread, wait_q, timeout);
 }
 
@@ -390,7 +390,7 @@
 	return thread->base.pended_on;
 }
 
-ALWAYS_INLINE struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
+ALWAYS_INLINE struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
 						     struct k_thread *from)
 {
 	ARG_UNUSED(from);
@@ -404,11 +404,11 @@
 	return ret;
 }
 
-ALWAYS_INLINE void _unpend_thread_no_timeout(struct k_thread *thread)
+ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
 {
 	LOCKED(&sched_spinlock) {
 		_priq_wait_remove(&pended_on(thread)->waitq, thread);
-		_mark_thread_as_not_pending(thread);
+		z_mark_thread_as_not_pending(thread);
 	}
 
 	thread->base.pended_on = NULL;
@@ -421,47 +421,47 @@
 	struct k_thread *th = CONTAINER_OF(to, struct k_thread, base.timeout);
 
 	if (th->base.pended_on != NULL) {
-		_unpend_thread_no_timeout(th);
+		z_unpend_thread_no_timeout(th);
 	}
-	_mark_thread_as_started(th);
-	_ready_thread(th);
+	z_mark_thread_as_started(th);
+	z_ready_thread(th);
 }
 #endif
 
-int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout)
+int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout)
 {
 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
 	pending_current = _current;
 #endif
 	pend(_current, wait_q, timeout);
-	return _Swap_irqlock(key);
+	return z_swap_irqlock(key);
 }
 
-int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
+int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
 	       _wait_q_t *wait_q, s32_t timeout)
 {
 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
 	pending_current = _current;
 #endif
 	pend(_current, wait_q, timeout);
-	return _Swap(lock, key);
+	return z_swap(lock, key);
 }
 
-struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
+struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
 {
-	struct k_thread *t = _unpend1_no_timeout(wait_q);
+	struct k_thread *t = z_unpend1_no_timeout(wait_q);
 
 	if (t != NULL) {
-		(void)_abort_thread_timeout(t);
+		(void)z_abort_thread_timeout(t);
 	}
 
 	return t;
 }
 
-void _unpend_thread(struct k_thread *thread)
+void z_unpend_thread(struct k_thread *thread)
 {
-	_unpend_thread_no_timeout(thread);
-	(void)_abort_thread_timeout(thread);
+	z_unpend_thread_no_timeout(thread);
+	(void)z_abort_thread_timeout(thread);
 }
 
 /* FIXME: this API is glitchy when used in SMP.  If the thread is
@@ -471,12 +471,12 @@
  * priorities on either _current or a pended thread, though, so it's
  * fine for now.
  */
-void _thread_priority_set(struct k_thread *thread, int prio)
+void z_thread_priority_set(struct k_thread *thread, int prio)
 {
 	bool need_sched = 0;
 
 	LOCKED(&sched_spinlock) {
-		need_sched = _is_thread_ready(thread);
+		need_sched = z_is_thread_ready(thread);
 
 		if (need_sched) {
 			_priq_run_remove(&_kernel.ready_q.runq, thread);
@@ -490,7 +490,7 @@
 	sys_trace_thread_priority_set(thread);
 
 	if (need_sched && _current->base.sched_locked == 0) {
-		_reschedule_unlocked();
+		z_reschedule_unlocked();
 	}
 }
 
@@ -503,22 +503,22 @@
 	_current_cpu->swap_ok = 0;
 #endif
 
-	return !_is_in_isr();
+	return !z_is_in_isr();
 }
 
-void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
+void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
 {
 	if (resched()) {
-		_Swap(lock, key);
+		z_swap(lock, key);
 	} else {
 		k_spin_unlock(lock, key);
 	}
 }
 
-void _reschedule_irqlock(u32_t key)
+void z_reschedule_irqlock(u32_t key)
 {
 	if (resched()) {
-		_Swap_irqlock(key);
+		z_swap_irqlock(key);
 	} else {
 		irq_unlock(key);
 	}
@@ -527,7 +527,7 @@
 void k_sched_lock(void)
 {
 	LOCKED(&sched_spinlock) {
-		_sched_lock();
+		z_sched_lock();
 	}
 }
 
@@ -535,7 +535,7 @@
 {
 #ifdef CONFIG_PREEMPT_ENABLED
 	__ASSERT(_current->base.sched_locked != 0, "");
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 
 	LOCKED(&sched_spinlock) {
 		++_current->base.sched_locked;
@@ -545,12 +545,12 @@
 	K_DEBUG("scheduler unlocked (%p:%d)\n",
 		_current, _current->base.sched_locked);
 
-	_reschedule_unlocked();
+	z_reschedule_unlocked();
 #endif
 }
 
 #ifdef CONFIG_SMP
-struct k_thread *_get_next_ready_thread(void)
+struct k_thread *z_get_next_ready_thread(void)
 {
 	struct k_thread *ret = 0;
 
@@ -563,7 +563,7 @@
 #endif
 
 #ifdef CONFIG_USE_SWITCH
-void *_get_next_switch_handle(void *interrupted)
+void *z_get_next_switch_handle(void *interrupted)
 {
 	_current->switch_handle = interrupted;
 
@@ -588,26 +588,26 @@
 #ifdef CONFIG_TRACING
 	sys_trace_thread_switched_out();
 #endif
-	_current = _get_next_ready_thread();
+	_current = z_get_next_ready_thread();
 #ifdef CONFIG_TRACING
 	sys_trace_thread_switched_in();
 #endif
 #endif
 
-	_check_stack_sentinel();
+	z_check_stack_sentinel();
 
 	return _current->switch_handle;
 }
 #endif
 
-ALWAYS_INLINE void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
+ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
 {
 	struct k_thread *t;
 
-	__ASSERT_NO_MSG(!_is_idle(thread));
+	__ASSERT_NO_MSG(!is_idle(thread));
 
 	SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
-		if (_is_t1_higher_prio_than_t2(thread, t)) {
+		if (z_is_t1_higher_prio_than_t2(thread, t)) {
 			sys_dlist_insert(&t->base.qnode_dlist,
 					 &thread->base.qnode_dlist);
 			return;
@@ -617,21 +617,21 @@
 	sys_dlist_append(pq, &thread->base.qnode_dlist);
 }
 
-void _priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
+void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
 {
 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_DUMB)
 	if (pq == &_kernel.ready_q.runq && thread == _current &&
-	    _is_thread_prevented_from_running(thread)) {
+	    z_is_thread_prevented_from_running(thread)) {
 		return;
 	}
 #endif
 
-	__ASSERT_NO_MSG(!_is_idle(thread));
+	__ASSERT_NO_MSG(!is_idle(thread));
 
 	sys_dlist_remove(&thread->base.qnode_dlist);
 }
 
-struct k_thread *_priq_dumb_best(sys_dlist_t *pq)
+struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
 {
 	struct k_thread *t = NULL;
 	sys_dnode_t *n = sys_dlist_peek_head(pq);
@@ -642,27 +642,27 @@
 	return t;
 }
 
-bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
+bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
 {
 	struct k_thread *ta, *tb;
 
 	ta = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
 	tb = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
 
-	if (_is_t1_higher_prio_than_t2(ta, tb)) {
+	if (z_is_t1_higher_prio_than_t2(ta, tb)) {
 		return true;
-	} else if (_is_t1_higher_prio_than_t2(tb, ta)) {
+	} else if (z_is_t1_higher_prio_than_t2(tb, ta)) {
 		return false;
 	} else {
 		return ta->base.order_key < tb->base.order_key ? 1 : 0;
 	}
 }
 
-void _priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
+void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
 {
 	struct k_thread *t;
 
-	__ASSERT_NO_MSG(!_is_idle(thread));
+	__ASSERT_NO_MSG(!is_idle(thread));
 
 	thread->base.order_key = pq->next_order_key++;
 
@@ -681,15 +681,15 @@
 	rb_insert(&pq->tree, &thread->base.qnode_rb);
 }
 
-void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
+void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
 {
 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_SCALABLE)
 	if (pq == &_kernel.ready_q.runq && thread == _current &&
-	    _is_thread_prevented_from_running(thread)) {
+	    z_is_thread_prevented_from_running(thread)) {
 		return;
 	}
 #endif
-	__ASSERT_NO_MSG(!_is_idle(thread));
+	__ASSERT_NO_MSG(!is_idle(thread));
 
 	rb_remove(&pq->tree, &thread->base.qnode_rb);
 
@@ -698,7 +698,7 @@
 	}
 }
 
-struct k_thread *_priq_rb_best(struct _priq_rb *pq)
+struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
 {
 	struct k_thread *t = NULL;
 	struct rbnode *n = rb_get_min(&pq->tree);
@@ -715,7 +715,7 @@
 # endif
 #endif
 
-ALWAYS_INLINE void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread)
+ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread)
 {
 	int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
 
@@ -723,11 +723,11 @@
 	pq->bitmask |= (1 << priority_bit);
 }
 
-ALWAYS_INLINE void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread)
+ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread)
 {
 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_MULTIQ)
 	if (pq == &_kernel.ready_q.runq && thread == _current &&
-	    _is_thread_prevented_from_running(thread)) {
+	    z_is_thread_prevented_from_running(thread)) {
 		return;
 	}
 #endif
@@ -739,7 +739,7 @@
 	}
 }
 
-struct k_thread *_priq_mq_best(struct _priq_mq *pq)
+struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
 {
 	if (!pq->bitmask) {
 		return NULL;
@@ -755,21 +755,21 @@
 	return t;
 }
 
-int _unpend_all(_wait_q_t *wait_q)
+int z_unpend_all(_wait_q_t *wait_q)
 {
 	int need_sched = 0;
 	struct k_thread *th;
 
-	while ((th = _waitq_head(wait_q)) != NULL) {
-		_unpend_thread(th);
-		_ready_thread(th);
+	while ((th = z_waitq_head(wait_q)) != NULL) {
+		z_unpend_thread(th);
+		z_ready_thread(th);
 		need_sched = 1;
 	}
 
 	return need_sched;
 }
 
-void _sched_init(void)
+void z_sched_init(void)
 {
 #ifdef CONFIG_SCHED_DUMB
 	sys_dlist_init(&_kernel.ready_q.runq);
@@ -778,7 +778,7 @@
 #ifdef CONFIG_SCHED_SCALABLE
 	_kernel.ready_q.runq = (struct _priq_rb) {
 		.tree = {
-			.lessthan_fn = _priq_rb_lessthan,
+			.lessthan_fn = z_priq_rb_lessthan,
 		}
 	};
 #endif
@@ -795,7 +795,7 @@
 #endif
 }
 
-int _impl_k_thread_priority_get(k_tid_t thread)
+int z_impl_k_thread_priority_get(k_tid_t thread)
 {
 	return thread->base.prio;
 }
@@ -805,18 +805,18 @@
 			  struct k_thread *);
 #endif
 
-void _impl_k_thread_priority_set(k_tid_t tid, int prio)
+void z_impl_k_thread_priority_set(k_tid_t tid, int prio)
 {
 	/*
 	 * Use NULL, since we cannot know what the entry point is (we do not
 	 * keep track of it) and idle cannot change its priority.
 	 */
-	_ASSERT_VALID_PRIO(prio, NULL);
-	__ASSERT(!_is_in_isr(), "");
+	Z_ASSERT_VALID_PRIO(prio, NULL);
+	__ASSERT(!z_is_in_isr(), "");
 
 	struct k_thread *thread = (struct k_thread *)tid;
 
-	_thread_priority_set(thread, prio);
+	z_thread_priority_set(thread, prio);
 }
 
 #ifdef CONFIG_USERSPACE
@@ -831,19 +831,19 @@
 				    "thread priority may only be downgraded (%d < %d)",
 				    prio, thread->base.prio));
 
-	_impl_k_thread_priority_set((k_tid_t)thread, prio);
+	z_impl_k_thread_priority_set((k_tid_t)thread, prio);
 	return 0;
 }
 #endif
 
 #ifdef CONFIG_SCHED_DEADLINE
-void _impl_k_thread_deadline_set(k_tid_t tid, int deadline)
+void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
 {
 	struct k_thread *th = tid;
 
 	LOCKED(&sched_spinlock) {
 		th->base.prio_deadline = k_cycle_get_32() + deadline;
-		if (_is_thread_queued(th)) {
+		if (z_is_thread_queued(th)) {
 			_priq_run_remove(&_kernel.ready_q.runq, th);
 			_priq_run_add(&_kernel.ready_q.runq, th);
 		}
@@ -860,17 +860,17 @@
 				    "invalid thread deadline %d",
 				    (int)deadline));
 
-	_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
+	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
 	return 0;
 }
 #endif
 #endif
 
-void _impl_k_yield(void)
+void z_impl_k_yield(void)
 {
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 
-	if (!_is_idle(_current)) {
+	if (!is_idle(_current)) {
 		LOCKED(&sched_spinlock) {
 			_priq_run_remove(&_kernel.ready_q.runq, _current);
 			_priq_run_add(&_kernel.ready_q.runq, _current);
@@ -878,20 +878,20 @@
 		}
 	}
 
-	_Swap_unlocked();
+	z_swap_unlocked();
 }
 
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER0_SIMPLE_VOID(k_yield);
 #endif
 
-s32_t _impl_k_sleep(s32_t duration)
+s32_t z_impl_k_sleep(s32_t duration)
 {
 #ifdef CONFIG_MULTITHREADING
 	u32_t expected_wakeup_time;
 	s32_t ticks;
 
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 	__ASSERT(duration != K_FOREVER, "");
 
 	K_DEBUG("thread %p for %d ns\n", _current, duration);
@@ -902,7 +902,7 @@
 		return 0;
 	}
 
-	ticks = _TICK_ALIGN + _ms_to_ticks(duration);
+	ticks = _TICK_ALIGN + z_ms_to_ticks(duration);
 	expected_wakeup_time = ticks + z_tick_get_32();
 
 	/* Spinlock purely for local interrupt locking to prevent us
@@ -915,10 +915,10 @@
 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
 	pending_current = _current;
 #endif
-	_remove_thread_from_ready_q(_current);
-	_add_thread_timeout(_current, ticks);
+	z_remove_thread_from_ready_q(_current);
+	z_add_thread_timeout(_current, ticks);
 
-	(void)_Swap(&local_lock, key);
+	(void)z_swap(&local_lock, key);
 
 	ticks = expected_wakeup_time - z_tick_get_32();
 	if (ticks > 0) {
@@ -938,24 +938,24 @@
 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(duration != K_FOREVER,
 				    "sleeping forever not allowed"));
 
-	return _impl_k_sleep(duration);
+	return z_impl_k_sleep(duration);
 }
 #endif
 
-void _impl_k_wakeup(k_tid_t thread)
+void z_impl_k_wakeup(k_tid_t thread)
 {
-	if (_is_thread_pending(thread)) {
+	if (z_is_thread_pending(thread)) {
 		return;
 	}
 
-	if (_abort_thread_timeout(thread) < 0) {
+	if (z_abort_thread_timeout(thread) < 0) {
 		return;
 	}
 
-	_ready_thread(thread);
+	z_ready_thread(thread);
 
-	if (!_is_in_isr()) {
-		_reschedule_unlocked();
+	if (!z_is_in_isr()) {
+		z_reschedule_unlocked();
 	}
 }
 
@@ -963,7 +963,7 @@
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_wakeup, K_OBJ_THREAD, k_tid_t);
 #endif
 
-k_tid_t _impl_k_current_get(void)
+k_tid_t z_impl_k_current_get(void)
 {
 	return _current;
 }
@@ -972,9 +972,9 @@
 Z_SYSCALL_HANDLER0_SIMPLE(k_current_get);
 #endif
 
-int _impl_k_is_preempt_thread(void)
+int z_impl_k_is_preempt_thread(void)
 {
-	return !_is_in_isr() && _is_preempt(_current);
+	return !z_is_in_isr() && is_preempt(_current);
 }
 
 #ifdef CONFIG_USERSPACE
@@ -993,7 +993,7 @@
 	int ret = 0;
 
 	LOCKED(&sched_spinlock) {
-		if (_is_thread_prevented_from_running(t)) {
+		if (z_is_thread_prevented_from_running(t)) {
 			t->base.cpu_mask |= enable_mask;
 			t->base.cpu_mask  &= ~disable_mask;
 		} else {
diff --git a/kernel/sem.c b/kernel/sem.c
index e73b654..e06d4ef 100644
--- a/kernel/sem.c
+++ b/kernel/sem.c
@@ -64,7 +64,7 @@
 
 #endif /* CONFIG_OBJECT_TRACING */
 
-void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
+void z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
 		      unsigned int limit)
 {
 	__ASSERT(limit != 0U, "limit cannot be zero");
@@ -73,14 +73,14 @@
 	sys_trace_void(SYS_TRACE_ID_SEMA_INIT);
 	sem->count = initial_count;
 	sem->limit = limit;
-	_waitq_init(&sem->wait_q);
+	z_waitq_init(&sem->wait_q);
 #if defined(CONFIG_POLL)
 	sys_dlist_init(&sem->poll_events);
 #endif
 
 	SYS_TRACING_OBJ_INIT(k_sem, sem);
 
-	_k_object_init(sem);
+	z_object_init(sem);
 	sys_trace_end_call(SYS_TRACE_ID_SEMA_INIT);
 }
 
@@ -89,7 +89,7 @@
 {
 	Z_OOPS(Z_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM));
 	Z_OOPS(Z_SYSCALL_VERIFY(limit != 0 && initial_count <= limit));
-	_impl_k_sem_init((struct k_sem *)sem, initial_count, limit);
+	z_impl_k_sem_init((struct k_sem *)sem, initial_count, limit);
 	return 0;
 }
 #endif
@@ -97,7 +97,7 @@
 static inline void handle_poll_events(struct k_sem *sem)
 {
 #ifdef CONFIG_POLL
-	_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
+	z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
 #else
 	ARG_UNUSED(sem);
 #endif
@@ -110,34 +110,34 @@
 
 static void do_sem_give(struct k_sem *sem)
 {
-	struct k_thread *thread = _unpend_first_thread(&sem->wait_q);
+	struct k_thread *thread = z_unpend_first_thread(&sem->wait_q);
 
 	if (thread != NULL) {
-		_ready_thread(thread);
-		_set_thread_return_value(thread, 0);
+		z_ready_thread(thread);
+		z_set_thread_return_value(thread, 0);
 	} else {
 		increment_count_up_to_limit(sem);
 		handle_poll_events(sem);
 	}
 }
 
-void _impl_k_sem_give(struct k_sem *sem)
+void z_impl_k_sem_give(struct k_sem *sem)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 
 	sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
 	do_sem_give(sem);
 	sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
-	_reschedule(&lock, key);
+	z_reschedule(&lock, key);
 }
 
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_sem_give, K_OBJ_SEM, struct k_sem *);
 #endif
 
-int _impl_k_sem_take(struct k_sem *sem, s32_t timeout)
+int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
 {
-	__ASSERT(((_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
+	__ASSERT(((z_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
 
 	sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
 	k_spinlock_key_t key = k_spin_lock(&lock);
@@ -157,7 +157,7 @@
 
 	sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
 
-	int ret = _pend_curr(&lock, key, &sem->wait_q, timeout);
+	int ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
 	return ret;
 }
 
@@ -165,7 +165,7 @@
 Z_SYSCALL_HANDLER(k_sem_take, sem, timeout)
 {
 	Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
-	return _impl_k_sem_take((struct k_sem *)sem, timeout);
+	return z_impl_k_sem_take((struct k_sem *)sem, timeout);
 }
 
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_sem_reset, K_OBJ_SEM, struct k_sem *);
diff --git a/kernel/smp.c b/kernel/smp.c
index d0778f7..f029987 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,9 +13,9 @@
 #ifdef CONFIG_SMP
 static atomic_t global_lock;
 
-unsigned int _smp_global_lock(void)
+unsigned int z_smp_global_lock(void)
 {
-	unsigned int key = _arch_irq_lock();
+	unsigned int key = z_arch_irq_lock();
 
 	if (!_current->base.global_lock_count) {
 		while (!atomic_cas(&global_lock, 0, 1)) {
@@ -27,7 +27,7 @@
 	return key;
 }
 
-void _smp_global_unlock(unsigned int key)
+void z_smp_global_unlock(unsigned int key)
 {
 	if (_current->base.global_lock_count) {
 		_current->base.global_lock_count--;
@@ -37,13 +37,13 @@
 		}
 	}
 
-	_arch_irq_unlock(key);
+	z_arch_irq_unlock(key);
 }
 
-void _smp_reacquire_global_lock(struct k_thread *thread)
+void z_smp_reacquire_global_lock(struct k_thread *thread)
 {
 	if (thread->base.global_lock_count) {
-		_arch_irq_lock();
+		z_arch_irq_lock();
 
 		while (!atomic_cas(&global_lock, 0, 1)) {
 		}
@@ -51,8 +51,8 @@
 }
 
 
-/* Called from within _Swap(), so assumes lock already held */
-void _smp_release_global_lock(struct k_thread *thread)
+/* Called from within z_swap(), so assumes lock already held */
+void z_smp_release_global_lock(struct k_thread *thread)
 {
 	if (!thread->base.global_lock_count) {
 		atomic_clear(&global_lock);
@@ -83,9 +83,9 @@
 		.base.thread_state = _THREAD_DUMMY,
 	};
 
-	_arch_curr_cpu()->current = &dummy_thread;
+	z_arch_curr_cpu()->current = &dummy_thread;
 	smp_timer_init();
-	_Swap_unlocked();
+	z_swap_unlocked();
 
 	CODE_UNREACHABLE;
 }
@@ -98,17 +98,17 @@
 	(void)atomic_clear(&start_flag);
 
 #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
-	_arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE,
+	z_arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE,
 			smp_init_top, &start_flag);
 #endif
 
 #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
-	_arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE,
+	z_arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE,
 			smp_init_top, &start_flag);
 #endif
 
 #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
-	_arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE,
+	z_arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE,
 			smp_init_top, &start_flag);
 #endif
 
diff --git a/kernel/stack.c b/kernel/stack.c
index 16d4ffcd1..0dda2ec 100644
--- a/kernel/stack.c
+++ b/kernel/stack.c
@@ -49,16 +49,16 @@
 void k_stack_init(struct k_stack *stack, u32_t *buffer,
 		  u32_t num_entries)
 {
-	_waitq_init(&stack->wait_q);
+	z_waitq_init(&stack->wait_q);
 	stack->lock = (struct k_spinlock) {};
 	stack->next = stack->base = buffer;
 	stack->top = stack->base + num_entries;
 
 	SYS_TRACING_OBJ_INIT(k_stack, stack);
-	_k_object_init(stack);
+	z_object_init(stack);
 }
 
-s32_t _impl_k_stack_alloc_init(struct k_stack *stack, u32_t num_entries)
+s32_t z_impl_k_stack_alloc_init(struct k_stack *stack, u32_t num_entries)
 {
 	void *buffer;
 	s32_t ret;
@@ -81,13 +81,13 @@
 	Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK));
 	Z_OOPS(Z_SYSCALL_VERIFY(num_entries > 0));
 
-	return _impl_k_stack_alloc_init((struct k_stack *)stack, num_entries);
+	return z_impl_k_stack_alloc_init((struct k_stack *)stack, num_entries);
 }
 #endif
 
 void k_stack_cleanup(struct k_stack *stack)
 {
-	__ASSERT_NO_MSG(_waitq_head(&stack->wait_q) == NULL);
+	__ASSERT_NO_MSG(z_waitq_head(&stack->wait_q) == NULL);
 
 	if ((stack->flags & K_STACK_FLAG_ALLOC) != (u8_t)0) {
 		k_free(stack->base);
@@ -96,7 +96,7 @@
 	}
 }
 
-void _impl_k_stack_push(struct k_stack *stack, u32_t data)
+void z_impl_k_stack_push(struct k_stack *stack, u32_t data)
 {
 	struct k_thread *first_pending_thread;
 	k_spinlock_key_t key;
@@ -105,14 +105,14 @@
 
 	key = k_spin_lock(&stack->lock);
 
-	first_pending_thread = _unpend_first_thread(&stack->wait_q);
+	first_pending_thread = z_unpend_first_thread(&stack->wait_q);
 
 	if (first_pending_thread != NULL) {
-		_ready_thread(first_pending_thread);
+		z_ready_thread(first_pending_thread);
 
-		_set_thread_return_value_with_data(first_pending_thread,
+		z_set_thread_return_value_with_data(first_pending_thread,
 						   0, (void *)data);
-		_reschedule(&stack->lock, key);
+		z_reschedule(&stack->lock, key);
 		return;
 	} else {
 		*(stack->next) = data;
@@ -131,12 +131,12 @@
 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(stack->next != stack->top,
 				    "stack is full"));
 
-	_impl_k_stack_push(stack, data);
+	z_impl_k_stack_push(stack, data);
 	return 0;
 }
 #endif
 
-int _impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
+int z_impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
 {
 	k_spinlock_key_t key;
 	int result;
@@ -155,7 +155,7 @@
 		return -EBUSY;
 	}
 
-	result = _pend_curr(&stack->lock, key, &stack->wait_q, timeout);
+	result = z_pend_curr(&stack->lock, key, &stack->wait_q, timeout);
 	if (result == -EAGAIN) {
 		return -EAGAIN;
 	}
@@ -170,7 +170,7 @@
 	Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, sizeof(u32_t)));
 
-	return _impl_k_stack_pop((struct k_stack *)stack, (u32_t *)data,
+	return z_impl_k_stack_pop((struct k_stack *)stack, (u32_t *)data,
 				 timeout);
 }
 #endif
diff --git a/kernel/thread.c b/kernel/thread.c
index 06cd689..8b12beb 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -66,14 +66,14 @@
 
 bool k_is_in_isr(void)
 {
-	return _is_in_isr();
+	return z_is_in_isr();
 }
 
 /*
  * This function tags the current thread as essential to system operation.
  * Exceptions raised by this thread will be treated as a fatal system error.
  */
-void _thread_essential_set(void)
+void z_thread_essential_set(void)
 {
 	_current->base.user_options |= K_ESSENTIAL;
 }
@@ -83,7 +83,7 @@
  * Exceptions raised by this thread may be recoverable.
  * (This is the default tag for a thread.)
  */
-void _thread_essential_clear(void)
+void z_thread_essential_clear(void)
 {
 	_current->base.user_options &= ~K_ESSENTIAL;
 }
@@ -93,13 +93,13 @@
  *
  * Returns true if current thread is essential, false if it is not.
  */
-bool _is_thread_essential(void)
+bool z_is_thread_essential(void)
 {
 	return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
 }
 
 #ifdef CONFIG_SYS_CLOCK_EXISTS
-void _impl_k_busy_wait(u32_t usec_to_wait)
+void z_impl_k_busy_wait(u32_t usec_to_wait)
 {
 #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
 	/* use 64-bit math to prevent overflow when multiplying */
@@ -126,19 +126,19 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(k_busy_wait, usec_to_wait)
 {
-	_impl_k_busy_wait(usec_to_wait);
+	z_impl_k_busy_wait(usec_to_wait);
 	return 0;
 }
 #endif /* CONFIG_USERSPACE */
 #endif /* CONFIG_SYS_CLOCK_EXISTS */
 
 #ifdef CONFIG_THREAD_CUSTOM_DATA
-void _impl_k_thread_custom_data_set(void *value)
+void z_impl_k_thread_custom_data_set(void *value)
 {
 	_current->custom_data = value;
 }
 
-void *_impl_k_thread_custom_data_get(void)
+void *z_impl_k_thread_custom_data_get(void)
 {
 	return _current->custom_data;
 }
@@ -149,7 +149,7 @@
 /*
  * Remove a thread from the kernel's list of active threads.
  */
-void _thread_monitor_exit(struct k_thread *thread)
+void z_thread_monitor_exit(struct k_thread *thread)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 
@@ -173,7 +173,7 @@
 #endif
 
 #ifdef CONFIG_THREAD_NAME
-void _impl_k_thread_name_set(struct k_thread *thread, const char *value)
+void z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
 {
 	if (thread == NULL) {
 		_current->name = value;
@@ -182,19 +182,19 @@
 	}
 }
 
-const char *_impl_k_thread_name_get(struct k_thread *thread)
+const char *z_impl_k_thread_name_get(struct k_thread *thread)
 {
 	return (const char *)thread->name;
 }
 
 #else
-void _impl_k_thread_name_set(k_tid_t thread_id, const char *value)
+void z_impl_k_thread_name_set(k_tid_t thread_id, const char *value)
 {
 	ARG_UNUSED(thread_id);
 	ARG_UNUSED(value);
 }
 
-const char *_impl_k_thread_name_get(k_tid_t thread_id)
+const char *z_impl_k_thread_name_get(k_tid_t thread_id)
 {
 	ARG_UNUSED(thread_id);
 	return NULL;
@@ -209,7 +209,7 @@
 	char *name_copy = NULL;
 
 	name_copy = z_user_string_alloc_copy((char *)data, 64);
-	_impl_k_thread_name_set((struct k_thread *)thread, name_copy);
+	z_impl_k_thread_name_set((struct k_thread *)thread, name_copy);
 	return 0;
 }
 
@@ -219,7 +219,7 @@
 #ifdef CONFIG_THREAD_CUSTOM_DATA
 Z_SYSCALL_HANDLER(k_thread_custom_data_set, data)
 {
-	_impl_k_thread_custom_data_set((void *)data);
+	z_impl_k_thread_custom_data_set((void *)data);
 	return 0;
 }
 
@@ -237,14 +237,14 @@
  *
  * 1) In k_yield() if the current thread is not swapped out
  * 2) After servicing a non-nested interrupt
- * 3) In _Swap(), check the sentinel in the outgoing thread
+ * 3) In z_swap(), check the sentinel in the outgoing thread
  *
  * Item 2 requires support in arch/ code.
  *
  * If the check fails, the thread will be terminated appropriately through
  * the system fatal error handler.
  */
-void _check_stack_sentinel(void)
+void z_check_stack_sentinel(void)
 {
 	u32_t *stack;
 
@@ -256,24 +256,24 @@
 	if (*stack != STACK_SENTINEL) {
 		/* Restore it so further checks don't trigger this same error */
 		*stack = STACK_SENTINEL;
-		_k_except_reason(_NANO_ERR_STACK_CHK_FAIL);
+		z_except_reason(_NANO_ERR_STACK_CHK_FAIL);
 	}
 }
 #endif
 
 #ifdef CONFIG_MULTITHREADING
-void _impl_k_thread_start(struct k_thread *thread)
+void z_impl_k_thread_start(struct k_thread *thread)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock); /* protect kernel queues */
 
-	if (_has_thread_started(thread)) {
+	if (z_has_thread_started(thread)) {
 		k_spin_unlock(&lock, key);
 		return;
 	}
 
-	_mark_thread_as_started(thread);
-	_ready_thread(thread);
-	_reschedule(&lock, key);
+	z_mark_thread_as_started(thread);
+	z_ready_thread(thread);
+	z_reschedule(&lock, key);
 }
 
 #ifdef CONFIG_USERSPACE
@@ -288,9 +288,9 @@
 	if (delay == 0) {
 		k_thread_start(thread);
 	} else {
-		s32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay);
+		s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(delay);
 
-		_add_thread_timeout(thread, ticks);
+		z_add_thread_timeout(thread, ticks);
 	}
 #else
 	ARG_UNUSED(delay);
@@ -317,7 +317,7 @@
 		random_val = sys_rand32_get();
 	}
 
-	/* Don't need to worry about alignment of the size here, _new_thread()
+	/* Don't need to worry about alignment of the size here, z_new_thread()
 	 * is required to do it
 	 *
 	 * FIXME: Not the best way to get a random number in a range.
@@ -343,7 +343,7 @@
  * The caller must guarantee that the stack_size passed here corresponds
  * to the amount of stack memory available for the thread.
  */
-void _setup_new_thread(struct k_thread *new_thread,
+void z_setup_new_thread(struct k_thread *new_thread,
 		       k_thread_stack_t *stack, size_t stack_size,
 		       k_thread_entry_t entry,
 		       void *p1, void *p2, void *p3,
@@ -359,12 +359,12 @@
 #endif
 #endif
 
-	_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
+	z_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
 		    prio, options);
 
 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
 #ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
-	/* don't set again if the arch's own code in _new_thread() has
+	/* don't set again if the arch's own code in z_new_thread() has
 	 * already set the pointer.
 	 */
 	new_thread->userspace_local_data =
@@ -389,8 +389,8 @@
 	new_thread->name = name;
 #endif
 #ifdef CONFIG_USERSPACE
-	_k_object_init(new_thread);
-	_k_object_init(stack);
+	z_object_init(new_thread);
+	z_object_init(stack);
 	new_thread->stack_obj = stack;
 
 	/* Any given thread has access to itself */
@@ -414,7 +414,7 @@
 	}
 
 	if ((options & K_INHERIT_PERMS) != 0) {
-		_thread_perms_inherit(_current, new_thread);
+		z_thread_perms_inherit(_current, new_thread);
 	}
 #endif
 #ifdef CONFIG_SCHED_DEADLINE
@@ -425,15 +425,15 @@
 }
 
 #ifdef CONFIG_MULTITHREADING
-k_tid_t _impl_k_thread_create(struct k_thread *new_thread,
+k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
 			      k_thread_stack_t *stack,
 			      size_t stack_size, k_thread_entry_t entry,
 			      void *p1, void *p2, void *p3,
 			      int prio, u32_t options, s32_t delay)
 {
-	__ASSERT(!_is_in_isr(), "Threads may not be created in ISRs");
+	__ASSERT(!z_is_in_isr(), "Threads may not be created in ISRs");
 
-	_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
+	z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
 			  prio, options, NULL);
 
 	if (delay != K_FOREVER) {
@@ -462,8 +462,8 @@
 
 	/* The thread and stack objects *must* be in an uninitialized state */
 	Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
-	stack_object = _k_object_find(stack);
-	Z_OOPS(Z_SYSCALL_VERIFY_MSG(_obj_validation_check(stack_object, stack,
+	stack_object = z_object_find(stack);
+	Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
 						K_OBJ__THREAD_STACK_ELEMENT,
 						_OBJ_INIT_FALSE) == 0,
 				    "bad stack object"));
@@ -511,10 +511,10 @@
 	 * than the caller
 	 */
 	Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
-	Z_OOPS(Z_SYSCALL_VERIFY(_is_prio_lower_or_equal(prio,
+	Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
 							_current->base.prio)));
 
-	_setup_new_thread((struct k_thread *)new_thread, stack, stack_size,
+	z_setup_new_thread((struct k_thread *)new_thread, stack, stack_size,
 			  (k_thread_entry_t)entry, (void *)p1,
 			  (void *)margs->arg6, (void *)margs->arg7, prio,
 			  options, NULL);
@@ -528,25 +528,25 @@
 #endif /* CONFIG_USERSPACE */
 #endif /* CONFIG_MULTITHREADING */
 
-void _k_thread_single_suspend(struct k_thread *thread)
+void z_thread_single_suspend(struct k_thread *thread)
 {
-	if (_is_thread_ready(thread)) {
-		_remove_thread_from_ready_q(thread);
+	if (z_is_thread_ready(thread)) {
+		z_remove_thread_from_ready_q(thread);
 	}
 
-	_mark_thread_as_suspended(thread);
+	z_mark_thread_as_suspended(thread);
 }
 
-void _impl_k_thread_suspend(struct k_thread *thread)
+void z_impl_k_thread_suspend(struct k_thread *thread)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 
-	_k_thread_single_suspend(thread);
+	z_thread_single_suspend(thread);
 
 	sys_trace_thread_suspend(thread);
 
 	if (thread == _current) {
-		_reschedule(&lock, key);
+		z_reschedule(&lock, key);
 	} else {
 		k_spin_unlock(&lock, key);
 	}
@@ -556,40 +556,40 @@
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_suspend, K_OBJ_THREAD, k_tid_t);
 #endif
 
-void _k_thread_single_resume(struct k_thread *thread)
+void z_thread_single_resume(struct k_thread *thread)
 {
-	_mark_thread_as_not_suspended(thread);
-	_ready_thread(thread);
+	z_mark_thread_as_not_suspended(thread);
+	z_ready_thread(thread);
 }
 
-void _impl_k_thread_resume(struct k_thread *thread)
+void z_impl_k_thread_resume(struct k_thread *thread)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 
-	_k_thread_single_resume(thread);
+	z_thread_single_resume(thread);
 
 	sys_trace_thread_resume(thread);
-	_reschedule(&lock, key);
+	z_reschedule(&lock, key);
 }
 
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_resume, K_OBJ_THREAD, k_tid_t);
 #endif
 
-void _k_thread_single_abort(struct k_thread *thread)
+void z_thread_single_abort(struct k_thread *thread)
 {
 	if (thread->fn_abort != NULL) {
 		thread->fn_abort();
 	}
 
-	if (_is_thread_ready(thread)) {
-		_remove_thread_from_ready_q(thread);
+	if (z_is_thread_ready(thread)) {
+		z_remove_thread_from_ready_q(thread);
 	} else {
-		if (_is_thread_pending(thread)) {
-			_unpend_thread_no_timeout(thread);
+		if (z_is_thread_pending(thread)) {
+			z_unpend_thread_no_timeout(thread);
 		}
-		if (_is_thread_timeout_active(thread)) {
-			(void)_abort_thread_timeout(thread);
+		if (z_is_thread_timeout_active(thread)) {
+			(void)z_abort_thread_timeout(thread);
 		}
 	}
 
@@ -601,11 +601,11 @@
 	/* Clear initialized state so that this thread object may be re-used
 	 * and triggers errors if API calls are made on it from user threads
 	 */
-	_k_object_uninit(thread->stack_obj);
-	_k_object_uninit(thread);
+	z_object_uninit(thread->stack_obj);
+	z_object_uninit(thread);
 
 	/* Revoke permissions on thread's ID so that it may be recycled */
-	_thread_perms_all_clear(thread);
+	z_thread_perms_all_clear(thread);
 #endif
 }
 
@@ -629,10 +629,10 @@
 }
 #endif /* CONFIG_USERSPACE */
 
-void _init_static_threads(void)
+void z_init_static_threads(void)
 {
 	_FOREACH_STATIC_THREAD(thread_data) {
-		_setup_new_thread(
+		z_setup_new_thread(
 			thread_data->init_thread,
 			thread_data->init_stack,
 			thread_data->init_stack_size,
@@ -672,7 +672,7 @@
 }
 #endif
 
-void _init_thread_base(struct _thread_base *thread_base, int priority,
+void z_init_thread_base(struct _thread_base *thread_base, int priority,
 		       u32_t initial_state, unsigned int options)
 {
 	/* k_q_node is initialized upon first insertion in a list */
@@ -686,14 +686,14 @@
 
 	/* swap_data does not need to be initialized */
 
-	_init_thread_timeout(thread_base);
+	z_init_thread_timeout(thread_base);
 }
 
 FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
 					    void *p1, void *p2, void *p3)
 {
 	_current->base.user_options |= K_USER;
-	_thread_essential_clear();
+	z_thread_essential_clear();
 #ifdef CONFIG_THREAD_MONITOR
 	_current->entry.pEntry = entry;
 	_current->entry.parameter1 = p1;
@@ -701,10 +701,10 @@
 	_current->entry.parameter3 = p3;
 #endif
 #ifdef CONFIG_USERSPACE
-	_arch_user_mode_enter(entry, p1, p2, p3);
+	z_arch_user_mode_enter(entry, p1, p2, p3);
 #else
 	/* XXX In this case we do not reset the stack */
-	_thread_entry(entry, p1, p2, p3);
+	z_thread_entry(entry, p1, p2, p3);
 #endif
 }
 
diff --git a/kernel/thread_abort.c b/kernel/thread_abort.c
index 14e1967..162f519 100644
--- a/kernel/thread_abort.c
+++ b/kernel/thread_abort.c
@@ -22,10 +22,10 @@
 #include <misc/__assert.h>
 #include <syscall_handler.h>
 
-extern void _k_thread_single_abort(struct k_thread *thread);
+extern void z_thread_single_abort(struct k_thread *thread);
 
 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
-void _impl_k_thread_abort(k_tid_t thread)
+void z_impl_k_thread_abort(k_tid_t thread)
 {
 	/* We aren't trying to synchronize data access here (these
 	 * APIs are internally synchronized).  The original lock seems
@@ -40,10 +40,10 @@
 	__ASSERT((thread->base.user_options & K_ESSENTIAL) == 0,
 		 "essential thread aborted");
 
-	_k_thread_single_abort(thread);
-	_thread_monitor_exit(thread);
+	z_thread_single_abort(thread);
+	z_thread_monitor_exit(thread);
 
-	_reschedule(&lock, key);
+	z_reschedule(&lock, key);
 }
 #endif
 
@@ -55,7 +55,7 @@
 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
 				    "aborting essential thread %p", thread));
 
-	_impl_k_thread_abort((struct k_thread *)thread);
+	z_impl_k_thread_abort((struct k_thread *)thread);
 	return 0;
 }
 #endif
diff --git a/kernel/timeout.c b/kernel/timeout.c
index 9154caa..d4f85b0 100644
--- a/kernel/timeout.c
+++ b/kernel/timeout.c
@@ -72,7 +72,7 @@
 	return ret;
 }
 
-void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
+void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
 {
 	__ASSERT(!sys_dnode_is_linked(&to->node), "");
 	to->fn = fn;
@@ -103,7 +103,7 @@
 	}
 }
 
-int _abort_timeout(struct _timeout *to)
+int z_abort_timeout(struct _timeout *to)
 {
 	int ret = -EINVAL;
 
@@ -121,7 +121,7 @@
 {
 	s32_t ticks = 0;
 
-	if (_is_inactive_timeout(timeout)) {
+	if (z_is_inactive_timeout(timeout)) {
 		return 0;
 	}
 
@@ -137,7 +137,7 @@
 	return ticks - elapsed();
 }
 
-s32_t _get_next_timeout_expiry(void)
+s32_t z_get_next_timeout_expiry(void)
 {
 	s32_t ret = K_FOREVER;
 
@@ -234,7 +234,7 @@
 #endif
 }
 
-u32_t _impl_k_uptime_get_32(void)
+u32_t z_impl_k_uptime_get_32(void)
 {
 	return __ticks_to_ms(z_tick_get_32());
 }
@@ -242,11 +242,11 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(k_uptime_get_32)
 {
-	return _impl_k_uptime_get_32();
+	return z_impl_k_uptime_get_32();
 }
 #endif
 
-s64_t _impl_k_uptime_get(void)
+s64_t z_impl_k_uptime_get(void)
 {
 	return __ticks_to_ms(z_tick_get());
 }
@@ -257,7 +257,7 @@
 	u64_t *ret = (u64_t *)ret_p;
 
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)));
-	*ret = _impl_k_uptime_get();
+	*ret = z_impl_k_uptime_get();
 	return 0;
 }
 #endif
diff --git a/kernel/timer.c b/kernel/timer.c
index 8c858d4..776181c 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -47,7 +47,7 @@
  *
  * @return N/A
  */
-void _timer_expiration_handler(struct _timeout *t)
+void z_timer_expiration_handler(struct _timeout *t)
 {
 	struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
 	struct k_thread *thread;
@@ -57,7 +57,7 @@
 	 * since we're already aligned to a tick boundary
 	 */
 	if (timer->period > 0) {
-		_add_timeout(&timer->timeout, _timer_expiration_handler,
+		z_add_timeout(&timer->timeout, z_timer_expiration_handler,
 			     timer->period);
 	}
 
@@ -69,7 +69,7 @@
 		timer->expiry_fn(timer);
 	}
 
-	thread = _waitq_head(&timer->wait_q);
+	thread = z_waitq_head(&timer->wait_q);
 
 	if (thread == NULL) {
 		return;
@@ -83,11 +83,11 @@
 	 * thread level, which of course cannot interrupt the current
 	 * context.
 	 */
-	_unpend_thread_no_timeout(thread);
+	z_unpend_thread_no_timeout(thread);
 
-	_ready_thread(thread);
+	z_ready_thread(thread);
 
-	_set_thread_return_value(thread, 0);
+	z_set_thread_return_value(thread, 0);
 }
 
 
@@ -99,30 +99,30 @@
 	timer->stop_fn = stop_fn;
 	timer->status = 0;
 
-	_waitq_init(&timer->wait_q);
-	_init_timeout(&timer->timeout, _timer_expiration_handler);
+	z_waitq_init(&timer->wait_q);
+	z_init_timeout(&timer->timeout, z_timer_expiration_handler);
 	SYS_TRACING_OBJ_INIT(k_timer, timer);
 
 	timer->user_data = NULL;
 
-	_k_object_init(timer);
+	z_object_init(timer);
 }
 
 
-void _impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period)
+void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period)
 {
 	__ASSERT(duration >= 0 && period >= 0 &&
 		 (duration != 0 || period != 0), "invalid parameters\n");
 
 	volatile s32_t period_in_ticks, duration_in_ticks;
 
-	period_in_ticks = _ms_to_ticks(period);
-	duration_in_ticks = _ms_to_ticks(duration);
+	period_in_ticks = z_ms_to_ticks(period);
+	duration_in_ticks = z_ms_to_ticks(duration);
 
-	(void)_abort_timeout(&timer->timeout);
+	(void)z_abort_timeout(&timer->timeout);
 	timer->period = period_in_ticks;
 	timer->status = 0;
-	_add_timeout(&timer->timeout, _timer_expiration_handler,
+	z_add_timeout(&timer->timeout, z_timer_expiration_handler,
 		     duration_in_ticks);
 }
 
@@ -137,14 +137,14 @@
 	Z_OOPS(Z_SYSCALL_VERIFY(duration >= 0 && period >= 0 &&
 				(duration != 0 || period != 0)));
 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
-	_impl_k_timer_start((struct k_timer *)timer, duration, period);
+	z_impl_k_timer_start((struct k_timer *)timer, duration, period);
 	return 0;
 }
 #endif
 
-void _impl_k_timer_stop(struct k_timer *timer)
+void z_impl_k_timer_stop(struct k_timer *timer)
 {
-	int inactive = _abort_timeout(&timer->timeout) != 0;
+	int inactive = z_abort_timeout(&timer->timeout) != 0;
 
 	if (inactive) {
 		return;
@@ -154,11 +154,11 @@
 		timer->stop_fn(timer);
 	}
 
-	struct k_thread *pending_thread = _unpend1_no_timeout(&timer->wait_q);
+	struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
 
 	if (pending_thread != NULL) {
-		_ready_thread(pending_thread);
-		_reschedule_unlocked();
+		z_ready_thread(pending_thread);
+		z_reschedule_unlocked();
 	}
 }
 
@@ -166,7 +166,7 @@
 Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_timer_stop, K_OBJ_TIMER, struct k_timer *);
 #endif
 
-u32_t _impl_k_timer_status_get(struct k_timer *timer)
+u32_t z_impl_k_timer_status_get(struct k_timer *timer)
 {
 	k_spinlock_key_t key = k_spin_lock(&lock);
 	u32_t result = timer->status;
@@ -181,17 +181,17 @@
 Z_SYSCALL_HANDLER1_SIMPLE(k_timer_status_get, K_OBJ_TIMER, struct k_timer *);
 #endif
 
-u32_t _impl_k_timer_status_sync(struct k_timer *timer)
+u32_t z_impl_k_timer_status_sync(struct k_timer *timer)
 {
-	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(!z_is_in_isr(), "");
 
 	k_spinlock_key_t key = k_spin_lock(&lock);
 	u32_t result = timer->status;
 
 	if (result == 0) {
-		if (!_is_inactive_timeout(&timer->timeout)) {
+		if (!z_is_inactive_timeout(&timer->timeout)) {
 			/* wait for timer to expire or stop */
-			(void)_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
+			(void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
 
 			/* get updated timer status */
 			key = k_spin_lock(&lock);
@@ -220,7 +220,7 @@
 Z_SYSCALL_HANDLER(k_timer_user_data_set, timer, user_data)
 {
 	Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
-	_impl_k_timer_user_data_set((struct k_timer *)timer, (void *)user_data);
+	z_impl_k_timer_user_data_set((struct k_timer *)timer, (void *)user_data);
 	return 0;
 }
 #endif
diff --git a/kernel/userspace.c b/kernel/userspace.c
index 24e32ca..77dbe15 100644
--- a/kernel/userspace.c
+++ b/kernel/userspace.c
@@ -105,8 +105,8 @@
 	u8_t data[]; /* The object itself */
 };
 
-extern struct _k_object *_k_object_gperf_find(void *obj);
-extern void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
+extern struct _k_object *z_object_gperf_find(void *obj);
+extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
 					     void *context);
 
 static bool node_lessthan(struct rbnode *a, struct rbnode *b);
@@ -193,7 +193,7 @@
  *
  * @return true if successful, false if failed
  **/
-static bool _thread_idx_alloc(u32_t *tidx)
+static bool thread_idx_alloc(u32_t *tidx)
 {
 	int i;
 	int idx;
@@ -210,7 +210,7 @@
 					       *tidx);
 
 			/* Clear permission from all objects */
-			_k_object_wordlist_foreach(clear_perms_cb,
+			z_object_wordlist_foreach(clear_perms_cb,
 						   (void *)*tidx);
 
 			return true;
@@ -232,15 +232,15 @@
  *
  * @param tidx The thread index to be freed
  **/
-static void _thread_idx_free(u32_t tidx)
+static void thread_idx_free(u32_t tidx)
 {
 	/* To prevent leaked permission when index is recycled */
-	_k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
+	z_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
 
 	sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
 }
 
-void *_impl_k_object_alloc(enum k_objects otype)
+void *z_impl_k_object_alloc(enum k_objects otype)
 {
 	struct dyn_obj *dyn_obj;
 	u32_t tidx;
@@ -265,7 +265,7 @@
 
 	/* Need to grab a new thread index for k_thread */
 	if (otype == K_OBJ_THREAD) {
-		if (!_thread_idx_alloc(&tidx)) {
+		if (!thread_idx_alloc(&tidx)) {
 			k_free(dyn_obj);
 			return NULL;
 		}
@@ -276,7 +276,7 @@
 	/* The allocating thread implicitly gets permission on kernel objects
 	 * that it allocates
 	 */
-	_thread_perms_set(&dyn_obj->kobj, _current);
+	z_thread_perms_set(&dyn_obj->kobj, _current);
 
 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
 
@@ -304,7 +304,7 @@
 		sys_dlist_remove(&dyn_obj->obj_list);
 
 		if (dyn_obj->kobj.type == K_OBJ_THREAD) {
-			_thread_idx_free(dyn_obj->kobj.data);
+			thread_idx_free(dyn_obj->kobj.data);
 		}
 	}
 	k_spin_unlock(&objfree_lock, key);
@@ -314,11 +314,11 @@
 	}
 }
 
-struct _k_object *_k_object_find(void *obj)
+struct _k_object *z_object_find(void *obj)
 {
 	struct _k_object *ret;
 
-	ret = _k_object_gperf_find(obj);
+	ret = z_object_gperf_find(obj);
 
 	if (ret == NULL) {
 		struct dyn_obj *dynamic_obj;
@@ -332,11 +332,11 @@
 	return ret;
 }
 
-void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
+void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
 {
 	struct dyn_obj *obj, *next;
 
-	_k_object_gperf_wordlist_foreach(func, context);
+	z_object_gperf_wordlist_foreach(func, context);
 
 	k_spinlock_key_t key = k_spin_lock(&lists_lock);
 
@@ -351,7 +351,7 @@
 {
 	struct _k_object *ko;
 
-	ko = _k_object_find(t);
+	ko = z_object_find(t);
 
 	if (ko == NULL) {
 		return -1;
@@ -418,7 +418,7 @@
 	}
 }
 
-void _thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
+void z_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
 {
 	struct perm_ctx ctx = {
 		thread_index_get(parent),
@@ -427,11 +427,11 @@
 	};
 
 	if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
-		_k_object_wordlist_foreach(wordlist_cb, &ctx);
+		z_object_wordlist_foreach(wordlist_cb, &ctx);
 	}
 }
 
-void _thread_perms_set(struct _k_object *ko, struct k_thread *thread)
+void z_thread_perms_set(struct _k_object *ko, struct k_thread *thread)
 {
 	int index = thread_index_get(thread);
 
@@ -440,7 +440,7 @@
 	}
 }
 
-void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread)
+void z_thread_perms_clear(struct _k_object *ko, struct k_thread *thread)
 {
 	int index = thread_index_get(thread);
 
@@ -457,12 +457,12 @@
 	unref_check(ko, id);
 }
 
-void _thread_perms_all_clear(struct k_thread *thread)
+void z_thread_perms_all_clear(struct k_thread *thread)
 {
 	int index = thread_index_get(thread);
 
 	if (index != -1) {
-		_k_object_wordlist_foreach(clear_perms_cb, (void *)index);
+		z_object_wordlist_foreach(clear_perms_cb, (void *)index);
 	}
 }
 
@@ -493,7 +493,7 @@
 	printk("]\n");
 }
 
-void _dump_object_error(int retval, void *obj, struct _k_object *ko,
+void z_dump_object_error(int retval, void *obj, struct _k_object *ko,
 			enum k_objects otype)
 {
 	switch (retval) {
@@ -515,39 +515,39 @@
 	}
 }
 
-void _impl_k_object_access_grant(void *object, struct k_thread *thread)
+void z_impl_k_object_access_grant(void *object, struct k_thread *thread)
 {
-	struct _k_object *ko = _k_object_find(object);
+	struct _k_object *ko = z_object_find(object);
 
 	if (ko != NULL) {
-		_thread_perms_set(ko, thread);
+		z_thread_perms_set(ko, thread);
 	}
 }
 
 void k_object_access_revoke(void *object, struct k_thread *thread)
 {
-	struct _k_object *ko = _k_object_find(object);
+	struct _k_object *ko = z_object_find(object);
 
 	if (ko != NULL) {
-		_thread_perms_clear(ko, thread);
+		z_thread_perms_clear(ko, thread);
 	}
 }
 
-void _impl_k_object_release(void *object)
+void z_impl_k_object_release(void *object)
 {
 	k_object_access_revoke(object, _current);
 }
 
 void k_object_access_all_grant(void *object)
 {
-	struct _k_object *ko = _k_object_find(object);
+	struct _k_object *ko = z_object_find(object);
 
 	if (ko != NULL) {
 		ko->flags |= K_OBJ_FLAG_PUBLIC;
 	}
 }
 
-int _k_object_validate(struct _k_object *ko, enum k_objects otype,
+int z_object_validate(struct _k_object *ko, enum k_objects otype,
 		       enum _obj_init_check init)
 {
 	if (unlikely((ko == NULL) ||
@@ -580,19 +580,19 @@
 	return 0;
 }
 
-void _k_object_init(void *obj)
+void z_object_init(void *obj)
 {
 	struct _k_object *ko;
 
 	/* By the time we get here, if the caller was from userspace, all the
-	 * necessary checks have been done in _k_object_validate(), which takes
+	 * necessary checks have been done in z_object_validate(), which takes
 	 * place before the object is initialized.
 	 *
 	 * This function runs after the object has been initialized and
 	 * finalizes it
 	 */
 
-	ko = _k_object_find(obj);
+	ko = z_object_find(obj);
 	if (ko == NULL) {
 		/* Supervisor threads can ignore rules about kernel objects
 		 * and may declare them on stacks, etc. Such objects will never
@@ -605,23 +605,23 @@
 	ko->flags |= K_OBJ_FLAG_INITIALIZED;
 }
 
-void _k_object_recycle(void *obj)
+void z_object_recycle(void *obj)
 {
-	struct _k_object *ko = _k_object_find(obj);
+	struct _k_object *ko = z_object_find(obj);
 
 	if (ko != NULL) {
 		(void)memset(ko->perms, 0, sizeof(ko->perms));
-		_thread_perms_set(ko, k_current_get());
+		z_thread_perms_set(ko, k_current_get());
 		ko->flags |= K_OBJ_FLAG_INITIALIZED;
 	}
 }
 
-void _k_object_uninit(void *obj)
+void z_object_uninit(void *obj)
 {
 	struct _k_object *ko;
 
-	/* See comments in _k_object_init() */
-	ko = _k_object_find(obj);
+	/* See comments in z_object_init() */
+	ko = z_object_find(obj);
 	if (ko == NULL) {
 		return;
 	}
@@ -765,7 +765,7 @@
 				  u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
 {
 	printk("Bad system call id %u invoked\n", bad_id);
-	_arch_syscall_oops(ssf);
+	z_arch_syscall_oops(ssf);
 	CODE_UNREACHABLE;
 }
 
@@ -773,7 +773,7 @@
 				 u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
 {
 	printk("Unimplemented system call\n");
-	_arch_syscall_oops(ssf);
+	z_arch_syscall_oops(ssf);
 	CODE_UNREACHABLE;
 }
 
diff --git a/kernel/userspace_handler.c b/kernel/userspace_handler.c
index 22d7744..7919a07 100644
--- a/kernel/userspace_handler.c
+++ b/kernel/userspace_handler.c
@@ -13,15 +13,15 @@
 	struct _k_object *ko;
 	int ret;
 
-	ko = _k_object_find(obj);
+	ko = z_object_find(obj);
 
 	/* This can be any kernel object and it doesn't have to be
 	 * initialized
 	 */
-	ret = _k_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_ANY);
+	ret = z_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_ANY);
 	if (ret != 0) {
 #ifdef CONFIG_PRINTK
-		_dump_object_error(ret, obj, ko, K_OBJ_ANY);
+		z_dump_object_error(ret, obj, ko, K_OBJ_ANY);
 #endif
 		return NULL;
 	}
@@ -33,7 +33,7 @@
  * syscall_dispatch.c declares weak handlers results in build errors if these
  * are located in userspace.c. Just put in a separate file.
  *
- * To avoid double _k_object_find() lookups, we don't call the implementation
+ * To avoid double z_object_find() lookups, we don't call the implementation
  * function, but call a level deeper.
  */
 Z_SYSCALL_HANDLER(k_object_access_grant, object, thread)
@@ -44,7 +44,7 @@
 	ko = validate_any_object((void *)object);
 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied",
 				    (void *)object));
-	_thread_perms_set(ko, (struct k_thread *)thread);
+	z_thread_perms_set(ko, (struct k_thread *)thread);
 
 	return 0;
 }
@@ -56,7 +56,7 @@
 	ko = validate_any_object((void *)object);
 	Z_OOPS(Z_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied",
 				    (void *)object));
-	_thread_perms_clear(ko, _current);
+	z_thread_perms_clear(ko, _current);
 
 	return 0;
 }
@@ -67,5 +67,5 @@
 				    otype != K_OBJ__THREAD_STACK_ELEMENT,
 				    "bad object type %d requested", otype));
 
-	return (u32_t)_impl_k_object_alloc(otype);
+	return (u32_t)z_impl_k_object_alloc(otype);
 }
diff --git a/kernel/work_q.c b/kernel/work_q.c
index 9196a0a..6ecd613 100644
--- a/kernel/work_q.c
+++ b/kernel/work_q.c
@@ -44,7 +44,7 @@
 void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
 {
 	k_work_init(&work->work, handler);
-	_init_timeout(&work->timeout, work_timeout);
+	z_init_timeout(&work->timeout, work_timeout);
 	work->work_q = NULL;
 }
 
@@ -58,7 +58,7 @@
 			return -EINVAL;
 		}
 	} else {
-		(void)_abort_timeout(&work->timeout);
+		(void)z_abort_timeout(&work->timeout);
 	}
 
 	/* Detach from workqueue */
@@ -103,8 +103,8 @@
 	}
 
 	/* Add timeout */
-	_add_timeout(&work->timeout, work_timeout,
-		     _TICK_ALIGN + _ms_to_ticks(delay));
+	z_add_timeout(&work->timeout, work_timeout,
+		     _TICK_ALIGN + z_ms_to_ticks(delay));
 
 done:
 	k_spin_unlock(&work_q->lock, key);
diff --git a/lib/cmsis_rtos_v1/cmsis_kernel.c b/lib/cmsis_rtos_v1/cmsis_kernel.c
index 2d81c6f..4b9ae26 100644
--- a/lib/cmsis_rtos_v1/cmsis_kernel.c
+++ b/lib/cmsis_rtos_v1/cmsis_kernel.c
@@ -42,5 +42,5 @@
  */
 int32_t osKernelRunning(void)
 {
-	return _has_thread_started(_main_thread);
+	return z_has_thread_started(_main_thread);
 }
diff --git a/lib/cmsis_rtos_v2/thread.c b/lib/cmsis_rtos_v2/thread.c
index db23706..e7d5044 100644
--- a/lib/cmsis_rtos_v2/thread.c
+++ b/lib/cmsis_rtos_v2/thread.c
@@ -547,7 +547,7 @@
 
 	__ASSERT(!k_is_in_isr(), "");
 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
-		if (get_cmsis_thread_id(thread) && _is_thread_queued(thread)) {
+		if (get_cmsis_thread_id(thread) && z_is_thread_queued(thread)) {
 			count++;
 		}
 	}
diff --git a/lib/gui/lvgl/lvgl.c b/lib/gui/lvgl/lvgl.c
index 4369374..13bec38 100644
--- a/lib/gui/lvgl/lvgl.c
+++ b/lib/gui/lvgl/lvgl.c
@@ -42,7 +42,7 @@
 	ARG_UNUSED(file);
 	ARG_UNUSED(line);
 
-	_LOG(zephyr_level, "%s", dsc);
+	Z_LOG(zephyr_level, "%s", dsc);
 }
 #endif
 
diff --git a/lib/libc/minimal/source/stdout/stdout_console.c b/lib/libc/minimal/source/stdout/stdout_console.c
index ab505e1..9d20131 100644
--- a/lib/libc/minimal/source/stdout/stdout_console.c
+++ b/lib/libc/minimal/source/stdout/stdout_console.c
@@ -25,7 +25,7 @@
 	_stdout_hook = hook;
 }
 
-int _impl__zephyr_fputc(int c, FILE *stream)
+int z_impl__zephyr_fputc(int c, FILE *stream)
 {
 	return (stdout == stream) ? _stdout_hook(c) : EOF;
 }
@@ -33,7 +33,7 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(_zephyr_fputc, c, stream)
 {
-	return _impl__zephyr_fputc(c, (FILE *)stream);
+	return z_impl__zephyr_fputc(c, (FILE *)stream);
 }
 #endif
 
@@ -52,7 +52,7 @@
 	return len == ret ? 0 : EOF;
 }
 
-size_t _impl__zephyr_fwrite(const void *_MLIBC_RESTRICT ptr, size_t size,
+size_t z_impl__zephyr_fwrite(const void *_MLIBC_RESTRICT ptr, size_t size,
 			    size_t nitems, FILE *_MLIBC_RESTRICT stream)
 {
 	size_t i;
@@ -82,11 +82,11 @@
 }
 
 #ifdef CONFIG_USERSPACE
-Z_SYSCALL_HANDLER(_zephyr_fwrite, ptr, size, nitems, stream)
+Z_SYSCALL_HANDLER(z_zephyr_fwrite, ptr, size, nitems, stream)
 {
 
 	Z_OOPS(Z_SYSCALL_MEMORY_ARRAY_READ(ptr, nitems, size));
-	return _impl__zephyr_fwrite((const void *_MLIBC_RESTRICT)ptr, size,
+	return z_impl__zephyr_fwrite((const void *_MLIBC_RESTRICT)ptr, size,
 				    nitems, (FILE *_MLIBC_RESTRICT)stream);
 }
 #endif
@@ -94,7 +94,7 @@
 size_t fwrite(const void *_MLIBC_RESTRICT ptr, size_t size, size_t nitems,
 			  FILE *_MLIBC_RESTRICT stream)
 {
-	return _zephyr_fwrite(ptr, size, nitems, stream);
+	return z_zephyr_fwrite(ptr, size, nitems, stream);
 }
 
 
diff --git a/lib/libc/newlib/libc-hooks.c b/lib/libc/newlib/libc-hooks.c
index 5782719..d6bef58 100644
--- a/lib/libc/newlib/libc-hooks.c
+++ b/lib/libc/newlib/libc-hooks.c
@@ -104,7 +104,7 @@
 	_stdin_hook = hook;
 }
 
-int _impl__zephyr_read_stdin(char *buf, int nbytes)
+int z_impl_zephyr_read_stdin(char *buf, int nbytes)
 {
 	int i = 0;
 
@@ -119,14 +119,14 @@
 }
 
 #ifdef CONFIG_USERSPACE
-Z_SYSCALL_HANDLER(_zephyr_read_stdin, buf, nbytes)
+Z_SYSCALL_HANDLER(zephyr_read_stdin, buf, nbytes)
 {
 	Z_OOPS(Z_SYSCALL_MEMORY_WRITE(buf, nbytes));
-	return _impl__zephyr_read_stdin((char *)buf, nbytes);
+	return z_impl_zephyr_read_stdin((char *)buf, nbytes);
 }
 #endif
 
-int _impl__zephyr_write_stdout(const void *buffer, int nbytes)
+int z_impl_zephyr_write_stdout(const void *buffer, int nbytes)
 {
 	const char *buf = buffer;
 	int i;
@@ -141,10 +141,10 @@
 }
 
 #ifdef CONFIG_USERSPACE
-Z_SYSCALL_HANDLER(_zephyr_write_stdout, buf, nbytes)
+Z_SYSCALL_HANDLER(zephyr_write_stdout, buf, nbytes)
 {
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(buf, nbytes));
-	return _impl__zephyr_write_stdout((const void *)buf, nbytes);
+	return z_impl_zephyr_write_stdout((const void *)buf, nbytes);
 }
 #endif
 
@@ -153,7 +153,7 @@
 {
 	ARG_UNUSED(fd);
 
-	return _zephyr_read_stdin(buf, nbytes);
+	return z_impl_zephyr_read_stdin(buf, nbytes);
 }
 FUNC_ALIAS(_read, read, int);
 
@@ -161,7 +161,7 @@
 {
 	ARG_UNUSED(fd);
 
-	return _zephyr_write_stdout(buf, nbytes);
+	return z_impl_zephyr_write_stdout(buf, nbytes);
 }
 FUNC_ALIAS(_write, write, int);
 
diff --git a/lib/os/fdtable.c b/lib/os/fdtable.c
index 52a625a..059f6c7 100644
--- a/lib/os/fdtable.c
+++ b/lib/os/fdtable.c
@@ -256,7 +256,7 @@
  * fd operations for stdio/stdout/stderr
  */
 
-int _impl__zephyr_write_stdout(const char *buf, int nbytes);
+int z_impl_zephyr_write_stdout(const char *buf, int nbytes);
 
 static ssize_t stdinout_read_vmeth(void *obj, void *buffer, size_t count)
 {
@@ -268,7 +268,7 @@
 #if defined(CONFIG_BOARD_NATIVE_POSIX)
 	return write(1, buffer, count);
 #elif defined(CONFIG_NEWLIB_LIBC)
-	return _impl__zephyr_write_stdout(buffer, count);
+	return z_impl_zephyr_write_stdout(buffer, count);
 #else
 	return 0;
 #endif
diff --git a/lib/os/mempool.c b/lib/os/mempool.c
index 80f914a..6561269 100644
--- a/lib/os/mempool.c
+++ b/lib/os/mempool.c
@@ -82,7 +82,7 @@
 	return ((u8_t *)block + bsz - 1 - (u8_t *)p->buf) < buf_size(p);
 }
 
-void _sys_mem_pool_base_init(struct sys_mem_pool_base *p)
+void z_sys_mem_pool_base_init(struct sys_mem_pool_base *p)
 {
 	int i;
 	size_t buflen = p->n_max * p->max_sz, sz = p->max_sz;
@@ -233,7 +233,7 @@
 	return block;
 }
 
-int _sys_mem_pool_block_alloc(struct sys_mem_pool_base *p, size_t size,
+int z_sys_mem_pool_block_alloc(struct sys_mem_pool_base *p, size_t size,
 			      u32_t *level_p, u32_t *block_p, void **data_p)
 {
 	int i, from_l, alloc_l = -1, free_l = -1;
@@ -306,13 +306,13 @@
 	return 0;
 }
 
-void _sys_mem_pool_block_free(struct sys_mem_pool_base *p, u32_t level,
+void z_sys_mem_pool_block_free(struct sys_mem_pool_base *p, u32_t level,
 			      u32_t block)
 {
 	size_t lsizes[LVL_ARRAY_SZ(p->n_levels)];
 	int i;
 
-	/* As in _sys_mem_pool_block_alloc(), we build a table of level sizes
+	/* As in z_sys_mem_pool_block_alloc(), we build a table of level sizes
 	 * to avoid having to store it in precious RAM bytes.
 	 * Overhead here is somewhat higher because block_free()
 	 * doesn't inherently need to traverse all the larger
@@ -339,7 +339,7 @@
 	k_mutex_lock(p->mutex, K_FOREVER);
 
 	size += sizeof(struct sys_mem_pool_block);
-	if (_sys_mem_pool_block_alloc(&p->base, size, &level, &block,
+	if (z_sys_mem_pool_block_alloc(&p->base, size, &level, &block,
 				      (void **)&ret)) {
 		ret = NULL;
 		goto out;
@@ -368,7 +368,7 @@
 	p = blk->pool;
 
 	k_mutex_lock(p->mutex, K_FOREVER);
-	_sys_mem_pool_block_free(&p->base, blk->level, blk->block);
+	z_sys_mem_pool_block_free(&p->base, blk->level, blk->block);
 	k_mutex_unlock(p->mutex);
 }
 
diff --git a/lib/os/printk.c b/lib/os/printk.c
index 4225561..093faa5 100644
--- a/lib/os/printk.c
+++ b/lib/os/printk.c
@@ -336,7 +336,7 @@
 }
 #endif
 
-void _impl_k_str_out(char *c, size_t n)
+void z_impl_k_str_out(char *c, size_t n)
 {
 	int i;
 
@@ -349,7 +349,7 @@
 Z_SYSCALL_HANDLER(k_str_out, c, n)
 {
 	Z_OOPS(Z_SYSCALL_MEMORY_READ(c, n));
-	_impl_k_str_out((char *)c, n);
+	z_impl_k_str_out((char *)c, n);
 
 	return 0;
 }
diff --git a/lib/os/rb.c b/lib/os/rb.c
index be54853..d7e6713 100644
--- a/lib/os/rb.c
+++ b/lib/os/rb.c
@@ -97,7 +97,7 @@
 	return sz;
 }
 
-struct rbnode *_rb_get_minmax(struct rbtree *tree, int side)
+struct rbnode *z_rb_get_minmax(struct rbtree *tree, int side)
 {
 	struct rbnode *n;
 
@@ -491,22 +491,22 @@
 }
 
 #ifndef CONFIG_MISRA_SANE
-void _rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie)
+void z_rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie)
 {
 	if (node != NULL) {
-		_rb_walk(get_child(node, 0), visit_fn, cookie);
+		z_rb_walk(get_child(node, 0), visit_fn, cookie);
 		visit_fn(node, cookie);
-		_rb_walk(get_child(node, 1), visit_fn, cookie);
+		z_rb_walk(get_child(node, 1), visit_fn, cookie);
 	}
 }
 #endif
 
-struct rbnode *_rb_child(struct rbnode *node, int side)
+struct rbnode *z_rb_child(struct rbnode *node, int side)
 {
 	return get_child(node, side);
 }
 
-int _rb_is_black(struct rbnode *node)
+int z_rb_is_black(struct rbnode *node)
 {
 	return is_black(node);
 }
@@ -551,7 +551,7 @@
  * case of top == -1 indicates that the stack is uninitialized and we
  * need to push an initial stack starting at the root.
  */
-struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f)
+struct rbnode *z_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f)
 {
 	struct rbnode *n;
 
diff --git a/lib/os/thread_entry.c b/lib/os/thread_entry.c
index 384bdca..2f27dd5 100644
--- a/lib/os/thread_entry.c
+++ b/lib/os/thread_entry.c
@@ -23,7 +23,7 @@
  * This routine does not return, and is marked as such so the compiler won't
  * generate preamble code that is only used by functions that actually return.
  */
-FUNC_NORETURN void _thread_entry(k_thread_entry_t entry,
+FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
 				 void *p1, void *p2, void *p3)
 {
 	entry(p1, p2, p3);
diff --git a/lib/posix/pthread_barrier.c b/lib/posix/pthread_barrier.c
index d23f0e5..2126256 100644
--- a/lib/posix/pthread_barrier.c
+++ b/lib/posix/pthread_barrier.c
@@ -19,13 +19,13 @@
 	if (b->count >= b->max) {
 		b->count = 0;
 
-		while (_waitq_head(&b->wait_q)) {
+		while (z_waitq_head(&b->wait_q)) {
 			_ready_one_thread(&b->wait_q);
 		}
-		_reschedule_irqlock(key);
+		z_reschedule_irqlock(key);
 		ret = PTHREAD_BARRIER_SERIAL_THREAD;
 	} else {
-		(void) _pend_curr_irqlock(key, &b->wait_q, K_FOREVER);
+		(void) z_pend_curr_irqlock(key, &b->wait_q, K_FOREVER);
 	}
 
 	return ret;
diff --git a/lib/posix/pthread_cond.c b/lib/posix/pthread_cond.c
index 4b193ac..320d517 100644
--- a/lib/posix/pthread_cond.c
+++ b/lib/posix/pthread_cond.c
@@ -18,7 +18,7 @@
 	mut->lock_count = 0;
 	mut->owner = NULL;
 	_ready_one_thread(&mut->wait_q);
-	ret = _pend_curr_irqlock(key, &cv->wait_q, timeout);
+	ret = z_pend_curr_irqlock(key, &cv->wait_q, timeout);
 
 	/* FIXME: this extra lock (and the potential context switch it
 	 * can cause) could be optimized out.  At the point of the
@@ -49,7 +49,7 @@
 	int key = irq_lock();
 
 	_ready_one_thread(&cv->wait_q);
-	_reschedule_irqlock(key);
+	z_reschedule_irqlock(key);
 
 	return 0;
 }
@@ -58,11 +58,11 @@
 {
 	int key = irq_lock();
 
-	while (_waitq_head(&cv->wait_q)) {
+	while (z_waitq_head(&cv->wait_q)) {
 		_ready_one_thread(&cv->wait_q);
 	}
 
-	_reschedule_irqlock(key);
+	z_reschedule_irqlock(key);
 
 	return 0;
 }
diff --git a/lib/posix/pthread_mutex.c b/lib/posix/pthread_mutex.c
index 39a9e44..c7597e4 100644
--- a/lib/posix/pthread_mutex.c
+++ b/lib/posix/pthread_mutex.c
@@ -48,7 +48,7 @@
 		return EINVAL;
 	}
 
-	rc = _pend_curr_irqlock(key, &m->wait_q, timeout);
+	rc = z_pend_curr_irqlock(key, &m->wait_q, timeout);
 	if (rc != 0) {
 		rc = ETIMEDOUT;
 	}
@@ -95,7 +95,7 @@
 
 	m->type = mattr->type;
 
-	_waitq_init(&m->wait_q);
+	z_waitq_init(&m->wait_q);
 
 	return 0;
 }
@@ -135,13 +135,13 @@
 	m->lock_count--;
 
 	if (m->lock_count == 0) {
-		thread = _unpend_first_thread(&m->wait_q);
+		thread = z_unpend_first_thread(&m->wait_q);
 		if (thread) {
 			m->owner = (pthread_t)thread;
 			m->lock_count++;
-			_ready_thread(thread);
-			_set_thread_return_value(thread, 0);
-			_reschedule_irqlock(key);
+			z_ready_thread(thread);
+			z_set_thread_return_value(thread, 0);
+			z_reschedule_irqlock(key);
 			return 0;
 		}
 		m->owner = NULL;
diff --git a/samples/boards/96b_argonkey/microphone/src/main.c b/samples/boards/96b_argonkey/microphone/src/main.c
index 2f8a5e6..6994484 100644
--- a/samples/boards/96b_argonkey/microphone/src/main.c
+++ b/samples/boards/96b_argonkey/microphone/src/main.c
@@ -178,8 +178,8 @@
 			pcm_l = (char)(pcm_out[j] & 0xFF);
 			pcm_h = (char)((pcm_out[j] >> 8) & 0xFF);
 
-			_impl_k_str_out(&pcm_l, 1);
-			_impl_k_str_out(&pcm_h, 1);
+			z_impl_k_str_out(&pcm_l, 1);
+			z_impl_k_str_out(&pcm_h, 1);
 		}
 	}
 #endif
diff --git a/scripts/gen_kobject_list.py b/scripts/gen_kobject_list.py
index 5ac0455..ba36ce9 100755
--- a/scripts/gen_kobject_list.py
+++ b/scripts/gen_kobject_list.py
@@ -63,7 +63,7 @@
 
 
 header = """%compare-lengths
-%define lookup-function-name _k_object_lookup
+%define lookup-function-name z_object_lookup
 %language=ANSI-C
 %global-table
 %struct-type
@@ -83,12 +83,12 @@
 # turned into a string, we told gperf to expect binary strings that are not
 # NULL-terminated.
 footer = """%%
-struct _k_object *_k_object_gperf_find(void *obj)
+struct _k_object *z_object_gperf_find(void *obj)
 {
-    return _k_object_lookup((const char *)obj, sizeof(void *));
+    return z_object_lookup((const char *)obj, sizeof(void *));
 }
 
-void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
+void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
 {
     int i;
 
@@ -100,11 +100,11 @@
 }
 
 #ifndef CONFIG_DYNAMIC_OBJECTS
-struct _k_object *_k_object_find(void *obj)
-	ALIAS_OF(_k_object_gperf_find);
+struct _k_object *z_object_find(void *obj)
+	ALIAS_OF(z_object_gperf_find);
 
-void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
-	ALIAS_OF(_k_object_gperf_wordlist_foreach);
+void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
+	ALIAS_OF(z_object_gperf_wordlist_foreach);
 #endif
 """
 
diff --git a/scripts/gen_syscall_header.py b/scripts/gen_syscall_header.py
index cab36cd..fbf0176 100755
--- a/scripts/gen_syscall_header.py
+++ b/scripts/gen_syscall_header.py
@@ -70,15 +70,17 @@
     # from gc-sections; these references will not consume space.
 
     sys.stdout.write(
-        "static _GENERIC_SECTION(hndlr_ref) __used void *href = (void *)&hdlr_##name; \\\n")
+        "static Z_GENERIC_SECTION(hndlr_ref) __used void *href = (void *)&hdlr_##name; \\\n")
     tabs(tabcount)
     if (ret != Retval.VOID):
         sys.stdout.write("return (ret)")
     else:
         sys.stdout.write("return (void)")
     if (argc <= 6 and ret != Retval.U64):
-        sys.stdout.write("_arch")
-    sys.stdout.write("_syscall%s_invoke%d(" %
+        sys.stdout.write("z_arch_syscall%s_invoke%d(" %
+                     (("_ret64" if ret == Retval.U64 else ""), argc))
+    else:
+        sys.stdout.write("z_syscall%s_invoke%d(" %
                      (("_ret64" if ret == Retval.U64 else ""), argc))
     for i in range(argc):
         sys.stdout.write("(u32_t)p%d, " % (i))
@@ -88,7 +90,7 @@
 def gen_call_impl(ret, argc):
     if (ret != Retval.VOID):
         sys.stdout.write("return ")
-    sys.stdout.write("_impl_##name(")
+    sys.stdout.write("z_impl_##name(")
     for i in range(argc):
         sys.stdout.write("p%d" % (i))
         if i != (argc - 1):
@@ -106,7 +108,7 @@
     newline()
 
     if not user_only:
-        gen_fn(ret, argc, "_impl_##name", extern=True)
+        gen_fn(ret, argc, "z_impl_##name", extern=True)
         sys.stdout.write(";")
         newline()
 
diff --git a/soc/arc/quark_se_c1000_ss/power.c b/soc/arc/quark_se_c1000_ss/power.c
index c45023d..107603e 100644
--- a/soc/arc/quark_se_c1000_ss/power.c
+++ b/soc/arc/quark_se_c1000_ss/power.c
@@ -73,8 +73,8 @@
 	case SYS_POWER_STATE_LOW_POWER_2:
 		{
 			/* Expire the timer as it is disabled in SS2. */
-			u32_t limit = _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
-			_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, limit - 1);
+			u32_t limit = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
+			z_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, limit - 1);
 		}
 	case SYS_POWER_STATE_LOW_POWER_1:
 		__builtin_arc_seti(0);
diff --git a/soc/arm/nordic_nrf/nrf51/soc.c b/soc/arm/nordic_nrf/nrf51/soc.c
index a548f78..cb3021a 100644
--- a/soc/arm/nordic_nrf/nrf51/soc.c
+++ b/soc/arm/nordic_nrf/nrf51/soc.c
@@ -21,8 +21,8 @@
 #include <logging/log.h>
 
 #ifdef CONFIG_RUNTIME_NMI
-extern void _NmiInit(void);
-#define NMI_INIT() _NmiInit()
+extern void z_NmiInit(void);
+#define NMI_INIT() z_NmiInit()
 #else
 #define NMI_INIT()
 #endif
diff --git a/soc/arm/nordic_nrf/nrf52/soc.c b/soc/arm/nordic_nrf/nrf52/soc.c
index aa00eaf..106a91a 100644
--- a/soc/arm/nordic_nrf/nrf52/soc.c
+++ b/soc/arm/nordic_nrf/nrf52/soc.c
@@ -21,8 +21,8 @@
 #include <logging/log.h>
 
 #ifdef CONFIG_RUNTIME_NMI
-extern void _NmiInit(void);
-#define NMI_INIT() _NmiInit()
+extern void z_NmiInit(void);
+#define NMI_INIT() z_NmiInit()
 #else
 #define NMI_INIT()
 #endif
diff --git a/soc/arm/nordic_nrf/nrf91/soc.c b/soc/arm/nordic_nrf/nrf91/soc.c
index f931f0d..62a201b 100644
--- a/soc/arm/nordic_nrf/nrf91/soc.c
+++ b/soc/arm/nordic_nrf/nrf91/soc.c
@@ -20,8 +20,8 @@
 #include <logging/log.h>
 
 #ifdef CONFIG_RUNTIME_NMI
-extern void _NmiInit(void);
-#define NMI_INIT() _NmiInit()
+extern void z_NmiInit(void);
+#define NMI_INIT() z_NmiInit()
 #else
 #define NMI_INIT()
 #endif
diff --git a/soc/posix/inf_clock/soc.c b/soc/posix/inf_clock/soc.c
index 8835e0a..1e5a10c 100644
--- a/soc/posix/inf_clock/soc.c
+++ b/soc/posix/inf_clock/soc.c
@@ -167,7 +167,7 @@
 
 
 /**
- * Just a wrapper function to call Zephyr's _Cstart()
+ * Just a wrapper function to call Zephyr's z_cstart()
  * called from posix_boot_cpu()
  */
 static void *zephyr_wrapper(void *a)
@@ -186,7 +186,7 @@
 	posix_init_multithreading();
 
 	/* Start Zephyr: */
-	_Cstart();
+	z_cstart();
 	CODE_UNREACHABLE;
 
 	return NULL;
diff --git a/soc/riscv32/openisa_rv32m1/soc.c b/soc/riscv32/openisa_rv32m1/soc.c
index a8d70fc..2024833 100644
--- a/soc/riscv32/openisa_rv32m1/soc.c
+++ b/soc/riscv32/openisa_rv32m1/soc.c
@@ -66,7 +66,7 @@
 	EVENT_UNIT->SLPCTRL |= EVENT_SLPCTRL_SYSRSTREQST_MASK;
 }
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	if (IS_ENABLED(CONFIG_MULTI_LEVEL_INTERRUPTS)) {
 		unsigned int level = rv32m1_irq_level(irq);
@@ -84,7 +84,7 @@
 	}
 }
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	if (IS_ENABLED(CONFIG_MULTI_LEVEL_INTERRUPTS)) {
 		unsigned int level = rv32m1_irq_level(irq);
@@ -102,7 +102,7 @@
 	}
 }
 
-int _arch_irq_is_enabled(unsigned int irq)
+int z_arch_irq_is_enabled(unsigned int irq)
 {
 	if (IS_ENABLED(CONFIG_MULTI_LEVEL_INTERRUPTS)) {
 		unsigned int level = rv32m1_irq_level(irq);
@@ -135,7 +135,7 @@
  * SoC-level interrupt initialization. Clear any pending interrupts or
  * events, and find the INTMUX device if necessary.
  *
- * This gets called as almost the first thing _Cstart() does, so it
+ * This gets called as almost the first thing z_cstart() does, so it
  * will happen before any calls to the _arch_irq_xxx() routines above.
  */
 void soc_interrupt_init(void)
diff --git a/soc/riscv32/riscv-privilege/common/soc_common_irq.c b/soc/riscv32/riscv-privilege/common/soc_common_irq.c
index e2dcc9c..e5f323c 100644
--- a/soc/riscv32/riscv-privilege/common/soc_common_irq.c
+++ b/soc/riscv32/riscv-privilege/common/soc_common_irq.c
@@ -11,7 +11,7 @@
  */
 #include <irq.h>
 
-void _arch_irq_enable(unsigned int irq)
+void z_arch_irq_enable(unsigned int irq)
 {
 	u32_t mie;
 
@@ -31,7 +31,7 @@
 			  : "r" (1 << irq));
 }
 
-void _arch_irq_disable(unsigned int irq)
+void z_arch_irq_disable(unsigned int irq)
 {
 	u32_t mie;
 
@@ -51,7 +51,7 @@
 			  : "r" (1 << irq));
 };
 
-int _arch_irq_is_enabled(unsigned int irq)
+int z_arch_irq_is_enabled(unsigned int irq)
 {
 	u32_t mie;
 
diff --git a/soc/x86/intel_quark/quark_se/eoi.c b/soc/x86/intel_quark/quark_se/eoi.c
index 3fe952b..8626e27 100644
--- a/soc/x86/intel_quark/quark_se/eoi.c
+++ b/soc/x86/intel_quark/quark_se/eoi.c
@@ -19,7 +19,7 @@
 #include <sys_io.h>
 #include <interrupt_controller/ioapic_priv.h>
 
-void _lakemont_eoi(void)
+void z_lakemont_eoi(void)
 {
 	/* It is difficult to know whether the IRQ being serviced is
 	 * a level interrupt handled by the IOAPIC; the only information
diff --git a/soc/xtensa/esp32/esp32-mp.c b/soc/xtensa/esp32/esp32-mp.c
index c65de06..55029b7 100644
--- a/soc/xtensa/esp32/esp32-mp.c
+++ b/soc/xtensa/esp32/esp32-mp.c
@@ -190,7 +190,7 @@
 	smp_log("ESP32: APPCPU start sequence complete");
 }
 
-void _arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
+void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
 		     void (*fn)(int, void *), void *arg)
 {
 	volatile struct cpustart_rec sr;
diff --git a/soc/xtensa/esp32/soc.c b/soc/xtensa/esp32/soc.c
index afe5caa..bb0b370 100644
--- a/soc/xtensa/esp32/soc.c
+++ b/soc/xtensa/esp32/soc.c
@@ -16,7 +16,7 @@
 #include <toolchain/gcc.h>
 #include <zephyr/types.h>
 
-extern void _Cstart(void);
+extern void z_cstart(void);
 
 /*
  * This is written in C rather than assembly since, during the port bring up,
@@ -70,7 +70,7 @@
 
 
 	/* Start Zephyr */
-	_Cstart();
+	z_cstart();
 
 	CODE_UNREACHABLE;
 }
diff --git a/soc/xtensa/intel_s1000/soc.c b/soc/xtensa/intel_s1000/soc.c
index 14c11b7..b5e9a86 100644
--- a/soc/xtensa/intel_s1000/soc.c
+++ b/soc/xtensa/intel_s1000/soc.c
@@ -19,7 +19,7 @@
 
 static u32_t ref_clk_freq;
 
-void _soc_irq_enable(u32_t irq)
+void z_soc_irq_enable(u32_t irq)
 {
 	struct device *dev_cavs, *dev_ictl;
 
@@ -38,7 +38,7 @@
 		break;
 	default:
 		/* regular interrupt */
-		_xtensa_irq_enable(XTENSA_IRQ_NUMBER(irq));
+		z_xtensa_irq_enable(XTENSA_IRQ_NUMBER(irq));
 		return;
 	}
 
@@ -50,7 +50,7 @@
 	/* If the control comes here it means the specified interrupt
 	 * is in either CAVS interrupt logic or DW interrupt controller
 	 */
-	_xtensa_irq_enable(XTENSA_IRQ_NUMBER(irq));
+	z_xtensa_irq_enable(XTENSA_IRQ_NUMBER(irq));
 
 	switch (CAVS_IRQ_NUMBER(irq)) {
 	case DW_ICTL_IRQ_CAVS_OFFSET:
@@ -78,7 +78,7 @@
 	irq_enable_next_level(dev_ictl, INTR_CNTL_IRQ_NUM(irq));
 }
 
-void _soc_irq_disable(u32_t irq)
+void z_soc_irq_disable(u32_t irq)
 {
 	struct device *dev_cavs, *dev_ictl;
 
@@ -97,7 +97,7 @@
 		break;
 	default:
 		/* regular interrupt */
-		_xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
+		z_xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
 		return;
 	}
 
@@ -120,7 +120,7 @@
 
 		/* Disable the parent IRQ if all children are disabled */
 		if (!irq_is_enabled_next_level(dev_cavs)) {
-			_xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
+			z_xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
 		}
 		return;
 	}
@@ -142,7 +142,7 @@
 		irq_disable_next_level(dev_cavs, CAVS_IRQ_NUMBER(irq));
 
 		if (!irq_is_enabled_next_level(dev_cavs)) {
-			_xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
+			z_xtensa_irq_disable(XTENSA_IRQ_NUMBER(irq));
 		}
 	}
 }
diff --git a/soc/xtensa/intel_s1000/soc.h b/soc/xtensa/intel_s1000/soc.h
index ed5eb38..8e776ca 100644
--- a/soc/xtensa/intel_s1000/soc.h
+++ b/soc/xtensa/intel_s1000/soc.h
@@ -210,8 +210,8 @@
 #define SOC_DCACHE_INVALIDATE(addr, size)	\
 	xthal_dcache_region_invalidate((addr), (size))
 
-extern void _soc_irq_enable(u32_t irq);
-extern void _soc_irq_disable(u32_t irq);
+extern void z_soc_irq_enable(u32_t irq);
+extern void z_soc_irq_disable(u32_t irq);
 
 extern u32_t soc_get_ref_clk_freq(void);
 
diff --git a/subsys/bluetooth/host/hci_core.c b/subsys/bluetooth/host/hci_core.c
index 42f083a..9e48b67 100644
--- a/subsys/bluetooth/host/hci_core.c
+++ b/subsys/bluetooth/host/hci_core.c
@@ -63,7 +63,7 @@
 static void init_work(struct k_work *work);
 
 struct bt_dev bt_dev = {
-	.init          = _K_WORK_INITIALIZER(init_work),
+	.init          = Z_WORK_INITIALIZER(init_work),
 	/* Give cmd_sem allowing to send first HCI_Reset cmd, the only
 	 * exception is if the controller requests to wait for an
 	 * initial Command Complete for NOP.
diff --git a/subsys/bluetooth/host/mesh/proxy.c b/subsys/bluetooth/host/mesh/proxy.c
index a7c8653..e971201 100644
--- a/subsys/bluetooth/host/mesh/proxy.c
+++ b/subsys/bluetooth/host/mesh/proxy.c
@@ -91,7 +91,7 @@
 } clients[CONFIG_BT_MAX_CONN] = {
 	[0 ... (CONFIG_BT_MAX_CONN - 1)] = {
 #if defined(CONFIG_BT_MESH_GATT_PROXY)
-		.send_beacons = _K_WORK_INITIALIZER(proxy_send_beacons),
+		.send_beacons = Z_WORK_INITIALIZER(proxy_send_beacons),
 #endif
 	},
 };
diff --git a/subsys/logging/log_msg.c b/subsys/logging/log_msg.c
index 21e4d45..d06cd90 100644
--- a/subsys/logging/log_msg.c
+++ b/subsys/logging/log_msg.c
@@ -156,7 +156,7 @@
 {
 	struct log_msg_cont *cont;
 	struct log_msg_cont **next;
-	struct  log_msg *msg = _log_msg_std_alloc();
+	struct  log_msg *msg = z_log_msg_std_alloc();
 	int n = (int)nargs;
 
 	if ((msg == NULL) || nargs <= LOG_MSG_NARGS_SINGLE_CHUNK) {
diff --git a/subsys/logging/log_output.c b/subsys/logging/log_output.c
index 84677f0..4518bd6 100644
--- a/subsys/logging/log_output.c
+++ b/subsys/logging/log_output.c
@@ -20,10 +20,10 @@
 #define HEXDUMP_BYTES_IN_LINE 8
 
 #define  DROPPED_COLOR_PREFIX \
-	_LOG_EVAL(CONFIG_LOG_BACKEND_SHOW_COLOR, (LOG_COLOR_CODE_RED), ())
+	Z_LOG_EVAL(CONFIG_LOG_BACKEND_SHOW_COLOR, (LOG_COLOR_CODE_RED), ())
 
 #define DROPPED_COLOR_POSTFIX \
-	_LOG_EVAL(CONFIG_LOG_BACKEND_SHOW_COLOR, (LOG_COLOR_CODE_DEFAULT), ())
+	Z_LOG_EVAL(CONFIG_LOG_BACKEND_SHOW_COLOR, (LOG_COLOR_CODE_DEFAULT), ())
 
 static const char *const severity[] = {
 	NULL,
diff --git a/subsys/net/lib/sockets/getaddrinfo.c b/subsys/net/lib/sockets/getaddrinfo.c
index d6a305a..d220cdd 100644
--- a/subsys/net/lib/sockets/getaddrinfo.c
+++ b/subsys/net/lib/sockets/getaddrinfo.c
@@ -80,7 +80,7 @@
 }
 
 
-int _impl_z_zsock_getaddrinfo_internal(const char *host, const char *service,
+int z_impl_z_zsock_getaddrinfo_internal(const char *host, const char *service,
 				       const struct zsock_addrinfo *hints,
 				       struct zsock_addrinfo *res)
 {
@@ -193,7 +193,7 @@
 		}
 	}
 
-	ret = _impl_z_zsock_getaddrinfo_internal(host_copy, service_copy,
+	ret = z_impl_z_zsock_getaddrinfo_internal(host_copy, service_copy,
 						 hints ? &hints_copy : NULL,
 						 (struct zsock_addrinfo *)res);
 out:
diff --git a/subsys/net/lib/sockets/sockets.c b/subsys/net/lib/sockets/sockets.c
index 14a81d1..a4699e8 100644
--- a/subsys/net/lib/sockets/sockets.c
+++ b/subsys/net/lib/sockets/sockets.c
@@ -106,7 +106,7 @@
 	/* Set net context object as initialized and grant access to the
 	 * calling thread (and only the calling thread)
 	 */
-	_k_object_recycle(ctx);
+	z_object_recycle(ctx);
 #endif
 
 	z_finalize_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable);
@@ -116,7 +116,7 @@
 	return fd;
 }
 
-int _impl_zsock_socket(int family, int type, int proto)
+int z_impl_zsock_socket(int family, int type, int proto)
 {
 #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
 	if (((proto >= IPPROTO_TLS_1_0) && (proto <= IPPROTO_TLS_1_2)) ||
@@ -146,14 +146,14 @@
 	/* implementation call to net_context_get() should do all necessary
 	 * checking
 	 */
-	return _impl_zsock_socket(family, type, proto);
+	return z_impl_zsock_socket(family, type, proto);
 }
 #endif /* CONFIG_USERSPACE */
 
 int zsock_close_ctx(struct net_context *ctx)
 {
 #ifdef CONFIG_USERSPACE
-	_k_object_uninit(ctx);
+	z_object_uninit(ctx);
 #endif
 	/* Reset callbacks to avoid any race conditions while
 	 * flushing queues. No need to check return values here,
@@ -173,7 +173,7 @@
 	return 0;
 }
 
-int _impl_zsock_close(int sock)
+int z_impl_zsock_close(int sock)
 {
 	const struct fd_op_vtable *vtable;
 	void *ctx = z_get_fd_obj_and_vtable(sock, &vtable);
@@ -192,11 +192,11 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(zsock_close, sock)
 {
-	return _impl_zsock_close(sock);
+	return z_impl_zsock_close(sock);
 }
 #endif /* CONFIG_USERSPACE */
 
-int _impl_zsock_shutdown(int sock, int how)
+int z_impl_zsock_shutdown(int sock, int how)
 {
 	/* shutdown() is described by POSIX as just disabling recv() and/or
 	 * send() operations on socket. Of course, real-world software mostly
@@ -213,7 +213,7 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(zsock_shutdown, sock, how)
 {
-	return _impl_zsock_shutdown(sock, how);
+	return z_impl_zsock_shutdown(sock, how);
 }
 #endif /* CONFIG_USERSPACE */
 
@@ -289,7 +289,7 @@
 	return 0;
 }
 
-int _impl_zsock_bind(int sock, const struct sockaddr *addr, socklen_t addrlen)
+int z_impl_zsock_bind(int sock, const struct sockaddr *addr, socklen_t addrlen)
 {
 	VTABLE_CALL(bind, sock, addr, addrlen);
 }
@@ -302,7 +302,7 @@
 	Z_OOPS(Z_SYSCALL_VERIFY(addrlen <= sizeof(dest_addr_copy)));
 	Z_OOPS(z_user_from_copy(&dest_addr_copy, (void *)addr, addrlen));
 
-	return _impl_zsock_bind(sock, (struct sockaddr *)&dest_addr_copy,
+	return z_impl_zsock_bind(sock, (struct sockaddr *)&dest_addr_copy,
 				addrlen);
 }
 #endif /* CONFIG_USERSPACE */
@@ -318,7 +318,7 @@
 	return 0;
 }
 
-int _impl_zsock_connect(int sock, const struct sockaddr *addr,
+int z_impl_zsock_connect(int sock, const struct sockaddr *addr,
 			socklen_t addrlen)
 {
 	VTABLE_CALL(connect, sock, addr, addrlen);
@@ -332,7 +332,7 @@
 	Z_OOPS(Z_SYSCALL_VERIFY(addrlen <= sizeof(dest_addr_copy)));
 	Z_OOPS(z_user_from_copy(&dest_addr_copy, (void *)addr, addrlen));
 
-	return _impl_zsock_connect(sock, (struct sockaddr *)&dest_addr_copy,
+	return z_impl_zsock_connect(sock, (struct sockaddr *)&dest_addr_copy,
 				   addrlen);
 }
 #endif /* CONFIG_USERSPACE */
@@ -345,7 +345,7 @@
 	return 0;
 }
 
-int _impl_zsock_listen(int sock, int backlog)
+int z_impl_zsock_listen(int sock, int backlog)
 {
 	VTABLE_CALL(listen, sock, backlog);
 }
@@ -353,7 +353,7 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(zsock_listen, sock, backlog)
 {
-	return _impl_zsock_listen(sock, backlog);
+	return z_impl_zsock_listen(sock, backlog);
 }
 #endif /* CONFIG_USERSPACE */
 
@@ -370,7 +370,7 @@
 	struct net_context *ctx = k_fifo_get(&parent->accept_q, K_FOREVER);
 
 #ifdef CONFIG_USERSPACE
-	_k_object_recycle(ctx);
+	z_object_recycle(ctx);
 #endif
 
 	if (addr != NULL && addrlen != NULL) {
@@ -397,7 +397,7 @@
 	return fd;
 }
 
-int _impl_zsock_accept(int sock, struct sockaddr *addr, socklen_t *addrlen)
+int z_impl_zsock_accept(int sock, struct sockaddr *addr, socklen_t *addrlen)
 {
 	VTABLE_CALL(accept, sock, addr, addrlen);
 }
@@ -416,7 +416,7 @@
 		return -1;
 	}
 
-	ret = _impl_zsock_accept(sock, (struct sockaddr *)addr, &addrlen_copy);
+	ret = z_impl_zsock_accept(sock, (struct sockaddr *)addr, &addrlen_copy);
 
 	if (ret >= 0 &&
 	    z_user_to_copy((void *)addrlen, &addrlen_copy,
@@ -467,7 +467,7 @@
 	return status;
 }
 
-ssize_t _impl_zsock_sendto(int sock, const void *buf, size_t len, int flags,
+ssize_t z_impl_zsock_sendto(int sock, const void *buf, size_t len, int flags,
 			   const struct sockaddr *dest_addr, socklen_t addrlen)
 {
 	VTABLE_CALL(sendto, sock, buf, len, flags, dest_addr, addrlen);
@@ -485,7 +485,7 @@
 					addrlen));
 	}
 
-	return _impl_zsock_sendto(sock, (const void *)buf, len, flags,
+	return z_impl_zsock_sendto(sock, (const void *)buf, len, flags,
 			dest_addr ? (struct sockaddr *)&dest_addr_copy : NULL,
 			addrlen);
 }
@@ -770,7 +770,7 @@
 	return 0;
 }
 
-ssize_t _impl_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags,
+ssize_t z_impl_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags,
 			     struct sockaddr *src_addr, socklen_t *addrlen)
 {
 	VTABLE_CALL(recvfrom, sock, buf, max_len, flags, src_addr, addrlen);
@@ -796,7 +796,7 @@
 	}
 	Z_OOPS(src_addr && Z_SYSCALL_MEMORY_WRITE(src_addr, addrlen_copy));
 
-	ret = _impl_zsock_recvfrom(sock, (void *)buf, max_len, flags,
+	ret = z_impl_zsock_recvfrom(sock, (void *)buf, max_len, flags,
 				   (struct sockaddr *)src_addr,
 				   addrlen_param ? &addrlen_copy : NULL);
 
@@ -812,7 +812,7 @@
 /* As this is limited function, we don't follow POSIX signature, with
  * "..." instead of last arg.
  */
-int _impl_zsock_fcntl(int sock, int cmd, int flags)
+int z_impl_zsock_fcntl(int sock, int cmd, int flags)
 {
 	const struct fd_op_vtable *vtable;
 	void *obj;
@@ -828,7 +828,7 @@
 #ifdef CONFIG_USERSPACE
 Z_SYSCALL_HANDLER(zsock_fcntl, sock, cmd, flags)
 {
-	return _impl_zsock_fcntl(sock, cmd, flags);
+	return z_impl_zsock_fcntl(sock, cmd, flags);
 }
 #endif
 
@@ -889,7 +889,7 @@
 	return timeout - elapsed;
 }
 
-int _impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int timeout)
+int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int timeout)
 {
 	bool retry;
 	int ret = 0;
@@ -1024,7 +1024,7 @@
 		return -1;
 	}
 
-	ret = _impl_zsock_poll(fds_copy, nfds, timeout);
+	ret = z_impl_zsock_poll(fds_copy, nfds, timeout);
 
 	if (ret >= 0) {
 		z_user_to_copy((void *)fds, fds_copy, fds_size);
@@ -1035,7 +1035,7 @@
 }
 #endif
 
-int _impl_zsock_inet_pton(sa_family_t family, const char *src, void *dst)
+int z_impl_zsock_inet_pton(sa_family_t family, const char *src, void *dst)
 {
 	if (net_addr_pton(family, src, dst) == 0) {
 		return 1;
@@ -1065,7 +1065,7 @@
 	}
 
 	Z_OOPS(z_user_string_copy(src_copy, (char *)src, sizeof(src_copy)));
-	ret = _impl_zsock_inet_pton(family, src_copy, dst_copy);
+	ret = z_impl_zsock_inet_pton(family, src_copy, dst_copy);
 	Z_OOPS(z_user_to_copy((void *)dst, dst_copy, dst_size));
 
 	return ret;
diff --git a/subsys/net/lib/sockets/sockets_can.c b/subsys/net/lib/sockets/sockets_can.c
index 32eaea3..2995068 100644
--- a/subsys/net/lib/sockets/sockets_can.c
+++ b/subsys/net/lib/sockets/sockets_can.c
@@ -68,7 +68,7 @@
 	/* Set net context object as initialized and grant access to the
 	 * calling thread (and only the calling thread)
 	 */
-	_k_object_recycle(ctx);
+	z_object_recycle(ctx);
 #endif
 
 	z_finalize_fd(fd, ctx,
diff --git a/subsys/net/lib/sockets/sockets_packet.c b/subsys/net/lib/sockets/sockets_packet.c
index 41018d3..a492ace 100644
--- a/subsys/net/lib/sockets/sockets_packet.c
+++ b/subsys/net/lib/sockets/sockets_packet.c
@@ -69,7 +69,7 @@
 	/* Set net context object as initialized and grant access to the
 	 * calling thread (and only the calling thread)
 	 */
-	_k_object_recycle(ctx);
+	z_object_recycle(ctx);
 #endif
 
 	z_finalize_fd(fd, ctx,
diff --git a/subsys/net/lib/sockets/sockets_tls.c b/subsys/net/lib/sockets/sockets_tls.c
index cffe2ff..9d04b42 100644
--- a/subsys/net/lib/sockets/sockets_tls.c
+++ b/subsys/net/lib/sockets/sockets_tls.c
@@ -1151,7 +1151,7 @@
 	/* Set net context object as initialized and grant access to the
 	 * calling thread (and only the calling thread)
 	 */
-	_k_object_recycle(ctx);
+	z_object_recycle(ctx);
 #endif
 
 	if (tls_proto != 0) {
@@ -1270,7 +1270,7 @@
 	child = k_fifo_get(&parent->accept_q, K_FOREVER);
 
 	#ifdef CONFIG_USERSPACE
-		_k_object_recycle(child);
+		z_object_recycle(child);
 	#endif
 
 	if (addr != NULL && addrlen != NULL) {
diff --git a/subsys/testsuite/ztest/include/ztest_test.h b/subsys/testsuite/ztest/include/ztest_test.h
index c3fbb4a..2b99f63 100644
--- a/subsys/testsuite/ztest/include/ztest_test.h
+++ b/subsys/testsuite/ztest/include/ztest_test.h
@@ -48,7 +48,7 @@
  *
  * Normally a test passes just by returning without an assertion failure.
  * However, if the success case for your test involves a fatal fault,
- * you can call this function from _SysFatalErrorHandler to indicate that
+ * you can call this function from z_SysFatalErrorHandler to indicate that
  * the test passed before aborting the thread.
  */
 void ztest_test_pass(void);
diff --git a/subsys/testsuite/ztest/src/ztest.c b/subsys/testsuite/ztest/src/ztest.c
index 155ff1b..a800fd7 100644
--- a/subsys/testsuite/ztest/src/ztest.c
+++ b/subsys/testsuite/ztest/src/ztest.c
@@ -221,7 +221,7 @@
 	 * another test case to be run after the current one finishes, the
 	 * thread_stack will be reused for that new test case while the current
 	 * test case has not finished running yet (it has given the semaphore,
-	 * but has _not_ gone back to _thread_entry() and completed it's "abort
+	 * but has _not_ gone back to z_thread_entry() and completed it's "abort
 	 * phase": this will corrupt the kernel ready queue.
 	 */
 	k_sem_take(&test_end_signal, K_FOREVER);
diff --git a/tests/benchmarks/sched/src/main.c b/tests/benchmarks/sched/src/main.c
index 2c59464..07fb2cf 100644
--- a/tests/benchmarks/sched/src/main.c
+++ b/tests/benchmarks/sched/src/main.c
@@ -13,14 +13,14 @@
  * of specific low level scheduling primitives independent of overhead
  * from application or API abstractions.  It works very simply: a main
  * thread creates a "partner" thread at a higher priority, the partner
- * then sleeps using _pend_curr_irqlock().  From this initial
+ * then sleeps using z_pend_curr_irqlock().  From this initial
  * state:
  *
- * 1. The main thread calls _unpend_first_thread()
- * 2. The main thread calls _ready_thread()
+ * 1. The main thread calls z_unpend_first_thread()
+ * 2. The main thread calls z_ready_thread()
  * 3. The main thread calls k_yield()
  *    (the kernel switches to the partner thread)
- * 4. The partner thread then runs and calls _pend_curr_irqlock() again
+ * 4. The partner thread then runs and calls z_pend_curr_irqlock() again
  *    (the kernel switches to the main thread)
  * 5. The main thread returns from k_yield()
  *
@@ -90,14 +90,14 @@
 	while (true) {
 		unsigned int key = irq_lock();
 
-		_pend_curr_irqlock(key, &waitq, K_FOREVER);
+		z_pend_curr_irqlock(key, &waitq, K_FOREVER);
 		stamp(PARTNER_AWAKE_PENDING);
 	}
 }
 
 void main(void)
 {
-	_waitq_init(&waitq);
+	z_waitq_init(&waitq);
 
 	int main_prio = k_thread_priority_get(k_current_get());
 	int partner_prio = main_prio - 1;
@@ -115,12 +115,12 @@
 
 	for (int i = 0; i < N_RUNS + N_SETTLE; i++) {
 		stamp(UNPENDING);
-		_unpend_first_thread(&waitq);
+		z_unpend_first_thread(&waitq);
 		stamp(UNPENDED_READYING);
-		_ready_thread(th);
+		z_ready_thread(th);
 		stamp(READIED_YIELDING);
 
-		/* _ready_thread() does not reschedule, so this is
+		/* z_ready_thread() does not reschedule, so this is
 		 * guaranteed to be the point where we will yield to
 		 * the new thread, which (being higher priority) will
 		 * run immediately, and we'll wake up synchronously as
diff --git a/tests/benchmarks/timing_info/src/semaphore_bench.c b/tests/benchmarks/timing_info/src/semaphore_bench.c
index d770521..9e02252 100644
--- a/tests/benchmarks/timing_info/src/semaphore_bench.c
+++ b/tests/benchmarks/timing_info/src/semaphore_bench.c
@@ -63,7 +63,7 @@
 	k_sleep(1000);
 
 
-	/* u64_t test_time1 = _tsc_read(); */
+	/* u64_t test_time1 = z_tsc_read(); */
 	sem_end_time = (__common_var_swap_end_time);
 	u32_t sem_cycles = sem_end_time - sem_start_time;
 
diff --git a/tests/benchmarks/timing_info/src/timing_info.h b/tests/benchmarks/timing_info/src/timing_info.h
index 8a323f5..e631b3f 100644
--- a/tests/benchmarks/timing_info/src/timing_info.h
+++ b/tests/benchmarks/timing_info/src/timing_info.h
@@ -46,7 +46,7 @@
 
 #elif CONFIG_X86
 #define TIMING_INFO_PRE_READ()
-#define TIMING_INFO_OS_GET_TIME()      (_tsc_read())
+#define TIMING_INFO_OS_GET_TIME()      (z_tsc_read())
 #define TIMING_INFO_GET_TIMER_VALUE()  (TIMING_INFO_OS_GET_TIME())
 #define SUBTRACT_CLOCK_CYCLES(val)     (val)
 
@@ -59,7 +59,7 @@
 #elif CONFIG_ARC
 #define TIMING_INFO_PRE_READ()
 #define TIMING_INFO_OS_GET_TIME()     (k_cycle_get_32())
-#define TIMING_INFO_GET_TIMER_VALUE() (_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
+#define TIMING_INFO_GET_TIMER_VALUE() (z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
 #define SUBTRACT_CLOCK_CYCLES(val)    ((u32_t)val)
 
 #elif CONFIG_NIOS2
diff --git a/tests/benchmarks/timing_info/src/userspace_bench.c b/tests/benchmarks/timing_info/src/userspace_bench.c
index bc89172..35d15ec3 100644
--- a/tests/benchmarks/timing_info/src/userspace_bench.c
+++ b/tests/benchmarks/timing_info/src/userspace_bench.c
@@ -32,7 +32,7 @@
 
 /******************************************************************************/
 /* syscall needed to read timer value when in user space */
-u32_t _impl_userspace_read_timer_value(void)
+u32_t z_impl_userspace_read_timer_value(void)
 {
 	TIMING_INFO_PRE_READ();
 	return TIMING_INFO_GET_TIMER_VALUE();
@@ -158,7 +158,7 @@
 K_APP_BMEM(bench_ptn) u32_t syscall_overhead_start_time,
 	syscall_overhead_end_time;
 
-int _impl_k_dummy_syscall(void)
+int z_impl_k_dummy_syscall(void)
 {
 	return 0;
 }
@@ -210,7 +210,7 @@
 u32_t validation_overhead_obj_start_time;
 u32_t validation_overhead_obj_end_time;
 
-int _impl_validation_overhead_syscall(void)
+int z_impl_validation_overhead_syscall(void)
 {
 	return 0;
 }
diff --git a/tests/cmsis_rtos_v2/src/thread_apis.c b/tests/cmsis_rtos_v2/src/thread_apis.c
index 294bb2d..51c61aa 100644
--- a/tests/cmsis_rtos_v2/src/thread_apis.c
+++ b/tests/cmsis_rtos_v2/src/thread_apis.c
@@ -254,7 +254,7 @@
 static void thread5(void *argument)
 {
 	printk(" * Thread B started.\n");
-	osDelay(_ms_to_ticks(DELAY_MS));
+	osDelay(z_ms_to_ticks(DELAY_MS));
 	printk(" * Thread B joining...\n");
 }
 
@@ -319,13 +319,13 @@
 	thread = osThreadNew(thread5, NULL, NULL); /* osThreadDetached */
 	zassert_not_null(thread, "Failed to create thread with osThreadNew!");
 
-	osDelay(_ms_to_ticks(DELAY_MS - DELTA_MS));
+	osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS));
 
 	status = osThreadJoin(thread);
 	zassert_equal(status, osErrorResource,
 		      "Incorrect status returned from osThreadJoin!");
 
-	osDelay(_ms_to_ticks(DELTA_MS));
+	osDelay(z_ms_to_ticks(DELTA_MS));
 }
 
 void thread6(void *argument)
@@ -352,12 +352,12 @@
 	tB = osThreadNew(thread6, tA, &attr);
 	zassert_not_null(tB, "Failed to create thread with osThreadNew!");
 
-	osDelay(_ms_to_ticks(DELAY_MS - DELTA_MS));
+	osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS));
 
 	status = osThreadDetach(tA);
 	zassert_equal(status, osOK, "osThreadDetach failed.");
 
-	osDelay(_ms_to_ticks(DELTA_MS));
+	osDelay(z_ms_to_ticks(DELTA_MS));
 }
 
 void test_thread_joinable_terminate(void)
@@ -374,10 +374,10 @@
 	tB = osThreadNew(thread6, tA, &attr);
 	zassert_not_null(tB, "Failed to create thread with osThreadNew!");
 
-	osDelay(_ms_to_ticks(DELAY_MS - DELTA_MS));
+	osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS));
 
 	status = osThreadTerminate(tA);
 	zassert_equal(status, osOK, "osThreadTerminate failed.");
 
-	osDelay(_ms_to_ticks(DELTA_MS));
+	osDelay(z_ms_to_ticks(DELTA_MS));
 }
diff --git a/tests/kernel/arm_irq_vector_table/src/arm_irq_vector_table.c b/tests/kernel/arm_irq_vector_table/src/arm_irq_vector_table.c
index 8f52b82..fabcd64 100644
--- a/tests/kernel/arm_irq_vector_table/src/arm_irq_vector_table.c
+++ b/tests/kernel/arm_irq_vector_table/src/arm_irq_vector_table.c
@@ -116,7 +116,7 @@
  * NVIC_SetPendingIRQ(), to trigger the pending interrupt. And we check
  * that the corresponding interrupt handler is getting called or not.
  *
- * @see irq_enable(), _irq_priority_set(), NVIC_SetPendingIRQ()
+ * @see irq_enable(), z_irq_priority_set(), NVIC_SetPendingIRQ()
  *
  */
 void test_arm_irq_vector_table(void)
@@ -125,7 +125,7 @@
 
 	for (int ii = 0; ii < 3; ii++) {
 		irq_enable(_ISR_OFFSET + ii);
-		_irq_priority_set(_ISR_OFFSET + ii, 0, 0);
+		z_irq_priority_set(_ISR_OFFSET + ii, 0, 0);
 		k_sem_init(&sem[ii], 0, UINT_MAX);
 	}
 
diff --git a/tests/kernel/common/src/irq_offload.c b/tests/kernel/common/src/irq_offload.c
index 8ef2543..52648b9 100644
--- a/tests/kernel/common/src/irq_offload.c
+++ b/tests/kernel/common/src/irq_offload.c
@@ -25,7 +25,7 @@
 	u32_t x = (u32_t)param;
 
 	/* Make sure we're in IRQ context */
-	zassert_true(_is_in_isr(), "Not in IRQ context!");
+	zassert_true(z_is_in_isr(), "Not in IRQ context!");
 
 	sentinel = x;
 }
diff --git a/tests/kernel/fatal/src/main.c b/tests/kernel/fatal/src/main.c
index 06e404b..6ace4d5 100644
--- a/tests/kernel/fatal/src/main.c
+++ b/tests/kernel/fatal/src/main.c
@@ -40,7 +40,7 @@
 static volatile int crash_reason;
 
 /* On some architectures, k_thread_abort(_current) will return instead
- * of _Swap'ing away.
+ * of z_swap'ing away.
  *
  * On ARM the PendSV exception is queued and immediately fires upon
  * completing the exception path; the faulting thread is never run
@@ -51,7 +51,7 @@
  * interrupt exit code.
  *
  * In both cases the thread is guaranteed never to run again once we
- * return from the _SysFatalErrorHandler().
+ * return from the z_SysFatalErrorHandler().
  */
 #if !(defined(CONFIG_ARM) || defined(CONFIG_XTENSA_ASM2) \
 	|| defined(CONFIG_ARC) || defined(CONFIG_X86_64))
@@ -61,7 +61,7 @@
 #ifdef ERR_IS_NORETURN
 FUNC_NORETURN
 #endif
-void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
+void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
 {
 	TC_PRINT("Caught system error -- reason %d\n", reason);
 	crash_reason = reason;
@@ -155,7 +155,7 @@
 	/* Test that stack overflow check due to swap works */
 	blow_up_stack();
 	TC_PRINT("swapping...\n");
-	_Swap_unlocked();
+	z_swap_unlocked();
 	TC_ERROR("should never see this\n");
 	rv = TC_FAIL;
 	irq_unlock(key);
diff --git a/tests/kernel/fifo/fifo_timeout/src/main.c b/tests/kernel/fifo/fifo_timeout/src/main.c
index 8b2d5e1..136bafe 100644
--- a/tests/kernel/fifo/fifo_timeout/src/main.c
+++ b/tests/kernel/fifo/fifo_timeout/src/main.c
@@ -178,7 +178,7 @@
 				diff_ms = test_data[j].timeout - data->timeout;
 			}
 
-			if (_ms_to_ticks(diff_ms) == 1) {
+			if (z_ms_to_ticks(diff_ms) == 1) {
 				TC_PRINT(
 				" thread (q order: %d, t/o: %d, fifo %p)\n",
 				data->q_order, data->timeout, data->fifo);
diff --git a/tests/kernel/fp_sharing/src/main.c b/tests/kernel/fp_sharing/src/main.c
index cc6781c..d4e721f 100644
--- a/tests/kernel/fp_sharing/src/main.c
+++ b/tests/kernel/fp_sharing/src/main.c
@@ -24,7 +24,7 @@
  * FUTURE IMPROVEMENTS
  * On architectures where the non-integer capabilities are provided in a
  *  hierarchy, for example on IA-32 the USE_FP and USE_SSE options are provided,
- * this test should be enhanced to ensure that the architectures' _Swap()
+ * this test should be enhanced to ensure that the architectures' z_swap()
  * routine doesn't context switch more registers that it needs to (which would
  * represent a performance issue).  For example, on the IA-32, the test should
  * issue a k_fp_disable() from main(), and then indicate that only x87 FPU
diff --git a/tests/kernel/gen_isr_table/src/main.c b/tests/kernel/gen_isr_table/src/main.c
index 0cd393a..182bf91 100644
--- a/tests/kernel/gen_isr_table/src/main.c
+++ b/tests/kernel/gen_isr_table/src/main.c
@@ -11,7 +11,7 @@
 
 extern u32_t _irq_vector_table[];
 
-#if defined(_ARCH_IRQ_DIRECT_CONNECT) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE)
+#if defined(Z_ARCH_IRQ_DIRECT_CONNECT) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE)
 #define HAS_DIRECT_IRQS
 #endif
 
@@ -68,7 +68,7 @@
 #elif defined(CONFIG_CPU_ARCV2)
 void trigger_irq(int irq)
 {
-	_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, irq);
 }
 #else
 /* So far, Nios II does not support this */
diff --git a/tests/kernel/interrupt/src/interrupt.h b/tests/kernel/interrupt/src/interrupt.h
index 38b66d7..26718a6 100644
--- a/tests/kernel/interrupt/src/interrupt.h
+++ b/tests/kernel/interrupt/src/interrupt.h
@@ -37,7 +37,7 @@
 static void trigger_irq(int irq)
 {
 	printk("Triggering irq : %d\n", irq);
-	_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, irq);
+	z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT, irq);
 }
 #else
 /* for not supported architecture */
diff --git a/tests/kernel/interrupt/src/nested_irq.c b/tests/kernel/interrupt/src/nested_irq.c
index 827e266..dcbc915 100644
--- a/tests/kernel/interrupt/src/nested_irq.c
+++ b/tests/kernel/interrupt/src/nested_irq.c
@@ -103,7 +103,7 @@
 {
 	ARG_UNUSED(param);
 
-	zassert_true(_is_in_isr(), "Not in IRQ context!");
+	zassert_true(z_is_in_isr(), "Not in IRQ context!");
 	k_timer_init(&timer, timer_handler, NULL);
 	k_busy_wait(MS_TO_US(1));
 	k_timer_start(&timer, DURATION, 0);
diff --git a/tests/kernel/mem_protect/mem_protect/src/common.c b/tests/kernel/mem_protect/mem_protect/src/common.c
index d4bbcea..e026362 100644
--- a/tests/kernel/mem_protect/mem_protect/src/common.c
+++ b/tests/kernel/mem_protect/mem_protect/src/common.c
@@ -19,7 +19,7 @@
 
 ZTEST_BMEM bool valid_fault;
 
-void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
+void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
 {
 	printk("Caught system error -- reason %d %d\n", reason, valid_fault);
 	if (valid_fault) {
diff --git a/tests/kernel/mem_protect/mem_protect/src/kobject.c b/tests/kernel/mem_protect/mem_protect/src/kobject.c
index 3f74c96..e7997aa 100644
--- a/tests/kernel/mem_protect/mem_protect/src/kobject.c
+++ b/tests/kernel/mem_protect/mem_protect/src/kobject.c
@@ -56,7 +56,7 @@
 void test_kobject_access_grant(void *p1, void *p2, void *p3)
 {
 
-	_k_object_init(random_sem_type);
+	z_object_init(random_sem_type);
 	k_thread_access_grant(k_current_get(),
 			      &kobject_sem,
 			      &kobject_mutex,
@@ -425,7 +425,7 @@
  * @ingroup kernel_memprotect_tests
  *
  * @see k_object_access_grant(), k_object_access_revoke(),
- * _k_object_find()
+ * z_object_find()
  */
 #define ERROR_STR_TEST_10 "Access granted/revoked to invalid thread k_object"
 void test_kobject_access_grant_to_invalid_thread(void *p1, void *p2, void *p3)
diff --git a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c
index e5f1805..933f623 100644
--- a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c
+++ b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c
@@ -356,7 +356,7 @@
 	/* Subtract one since the domain is initialized with one partition
 	 * already present.
 	 */
-	u8_t max_partitions = (u8_t)_arch_mem_domain_max_partitions_get() - 1;
+	u8_t max_partitions = (u8_t)z_arch_mem_domain_max_partitions_get() - 1;
 	u8_t index;
 
 	k_mem_domain_remove_thread(k_current_get());
@@ -420,7 +420,7 @@
 void test_mem_domain_add_partitions_simple(void *p1, void *p2, void *p3)
 {
 
-	u8_t max_partitions = (u8_t)_arch_mem_domain_max_partitions_get();
+	u8_t max_partitions = (u8_t)z_arch_mem_domain_max_partitions_get();
 	u8_t index;
 
 	k_mem_domain_init(&mem_domain_tc3_mem_domain,
diff --git a/tests/kernel/mem_protect/obj_validation/src/main.c b/tests/kernel/mem_protect/obj_validation/src/main.c
index 39b0d51..b06144e 100644
--- a/tests/kernel/mem_protect/obj_validation/src/main.c
+++ b/tests/kernel/mem_protect/obj_validation/src/main.c
@@ -28,12 +28,12 @@
 	int ret;
 
 	if (retval) {
-		/* Expected to fail; bypass _obj_validation_check() so we don't
+		/* Expected to fail; bypass z_obj_validation_check() so we don't
 		 * fill the logs with spam
 		 */
-		ret = _k_object_validate(_k_object_find(sem), K_OBJ_SEM, 0);
+		ret = z_object_validate(z_object_find(sem), K_OBJ_SEM, 0);
 	} else {
-		ret = _obj_validation_check(_k_object_find(sem), sem,
+		ret = z_obj_validation_check(z_object_find(sem), sem,
 					    K_OBJ_SEM, 0);
 	}
 
diff --git a/tests/kernel/mem_protect/protection/src/main.c b/tests/kernel/mem_protect/protection/src/main.c
index b3f21f2..401d4c2 100644
--- a/tests/kernel/mem_protect/protection/src/main.c
+++ b/tests/kernel/mem_protect/protection/src/main.c
@@ -18,14 +18,14 @@
 #define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
 
 /* ARM is a special case, in that k_thread_abort() does indeed return
- * instead of calling _Swap() directly. The PendSV exception is queued
+ * instead of calling z_swap() directly. The PendSV exception is queued
  * and immediately fires upon completing the exception path; the faulting
  * thread is never run again.
  */
 #if !(defined(CONFIG_ARM) || defined(CONFIG_ARC))
 FUNC_NORETURN
 #endif
-void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
+void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
 {
 	INFO("Caught system error -- reason %d\n", reason);
 	ztest_test_pass();
diff --git a/tests/kernel/mem_protect/syscalls/src/main.c b/tests/kernel/mem_protect/syscalls/src/main.c
index 32d1736..ddf6a38 100644
--- a/tests/kernel/mem_protect/syscalls/src/main.c
+++ b/tests/kernel/mem_protect/syscalls/src/main.c
@@ -15,7 +15,7 @@
 char kernel_buf[BUF_SIZE];
 ZTEST_BMEM char user_string[BUF_SIZE];
 
-size_t _impl_string_nlen(char *src, size_t maxlen, int *err)
+size_t z_impl_string_nlen(char *src, size_t maxlen, int *err)
 {
 	return z_user_string_nlen(src, maxlen, err);
 }
@@ -25,7 +25,7 @@
 	int err_copy;
 	size_t ret;
 
-	ret = _impl_string_nlen((char *)src, maxlen, &err_copy);
+	ret = z_impl_string_nlen((char *)src, maxlen, &err_copy);
 	if (!err_copy && Z_SYSCALL_MEMORY_READ(src, ret + 1)) {
 		err_copy = -1;
 	}
@@ -35,7 +35,7 @@
 	return ret;
 }
 
-int _impl_string_alloc_copy(char *src)
+int z_impl_string_alloc_copy(char *src)
 {
 	if (!strcmp(src, kernel_string)) {
 		return 0;
@@ -54,13 +54,13 @@
 		return -1;
 	}
 
-	ret = _impl_string_alloc_copy(src_copy);
+	ret = z_impl_string_alloc_copy(src_copy);
 	k_free(src_copy);
 
 	return ret;
 }
 
-int _impl_string_copy(char *src)
+int z_impl_string_copy(char *src)
 {
 	if (!strcmp(src, kernel_string)) {
 		return 0;
@@ -77,13 +77,13 @@
 		return ret;
 	}
 
-	return _impl_string_copy(kernel_buf);
+	return z_impl_string_copy(kernel_buf);
 }
 
 /* Not actually used, but will copy wrong string if called by mistake instead
  * of the handler
  */
-int _impl_to_copy(char *dest)
+int z_impl_to_copy(char *dest)
 {
 	memcpy(dest, kernel_string, BUF_SIZE);
 	return 0;
@@ -110,7 +110,7 @@
 	size_t ret;
 
 	ret = string_nlen(kernel_string, BUF_SIZE, &err);
-	if (_arch_is_user_context()) {
+	if (z_arch_is_user_context()) {
 		zassert_equal(err, -1,
 			      "kernel string did not fault on user access");
 	} else {
diff --git a/tests/kernel/mem_protect/userspace/src/main.c b/tests/kernel/mem_protect/userspace/src/main.c
index a21f0fb..c228145 100644
--- a/tests/kernel/mem_protect/userspace/src/main.c
+++ b/tests/kernel/mem_protect/userspace/src/main.c
@@ -76,14 +76,14 @@
 #define BARRIER() k_sem_give(&expect_fault_sem)
 
 /* ARM is a special case, in that k_thread_abort() does indeed return
- * instead of calling _Swap() directly. The PendSV exception is queued
+ * instead of calling z_swap() directly. The PendSV exception is queued
  * and immediately fires upon completing the exception path; the faulting
  * thread is never run again.
  */
 #if !(defined(CONFIG_ARM) || defined(CONFIG_ARC))
 FUNC_NORETURN
 #endif
-void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
+void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
 {
 	INFO("Caught system error -- reason %d\n", reason);
 	/*
@@ -271,7 +271,7 @@
 	expect_fault = true;
 	expected_reason = REASON_HW_EXCEPTION;
 	BARRIER();
-	memset(&_is_thread_essential, 0, 4);
+	memset(&z_is_thread_essential, 0, 4);
 	zassert_unreachable("Write to kernel text did not fault");
 }
 
diff --git a/tests/kernel/mem_protect/x86_mmu_api/src/userbuffer_validate.c b/tests/kernel/mem_protect/x86_mmu_api/src/userbuffer_validate.c
index 54d862e..e732a2c 100644
--- a/tests/kernel/mem_protect/x86_mmu_api/src/userbuffer_validate.c
+++ b/tests/kernel/mem_protect/x86_mmu_api/src/userbuffer_validate.c
@@ -22,7 +22,7 @@
 #define BUFF_WRITEABLE ((u32_t) 0x1)
 #define BUFF_USER ((u32_t) 0x2)
 
-int _arch_buffer_validate(void *addr, size_t size, int write);
+int z_arch_buffer_validate(void *addr, size_t size, int write);
 void reset_flag(void);
 void reset_multi_pte_page_flag(void);
 void reset_multi_pde_flag(void);
@@ -38,12 +38,12 @@
 static void set_flags(void *ptr, size_t size, x86_page_entry_data_t flags,
 		      x86_page_entry_data_t mask)
 {
-	_x86_mmu_set_flags(PDPT, ptr, size, flags, mask);
+	z_x86_mmu_set_flags(PDPT, ptr, size, flags, mask);
 }
 
 
 /* if Failure occurs
- * _arch_buffer_validate return -EPERM
+ * z_arch_buffer_validate return -EPERM
  * else return 0.
  * Below conditions will be tested accordingly
  *
@@ -59,7 +59,7 @@
 			   MMU_ENTRY_READ,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_WRITEABLE);
 
@@ -80,7 +80,7 @@
 			   MMU_ENTRY_WRITE,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_WRITEABLE);
 	if (status != 0) {
@@ -100,7 +100,7 @@
 			   MMU_ENTRY_READ,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_READABLE);
 	if (status != 0) {
@@ -120,7 +120,7 @@
 			   MMU_ENTRY_WRITE,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_READABLE);
 
@@ -142,7 +142,7 @@
 			   MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_READABLE | BUFF_USER);
 
@@ -163,7 +163,7 @@
 			   MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_WRITEABLE);
 
@@ -183,7 +183,7 @@
 			   MMU_PAGE_SIZE,
 			   MMU_ENTRY_WRITE | MMU_ENTRY_USER,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_WRITEABLE | BUFF_USER);
 	if (status != 0) {
@@ -202,7 +202,7 @@
 			   MMU_PAGE_SIZE,
 			   MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       BUFF_SIZE,
 				       BUFF_WRITEABLE | BUFF_USER);
 	if (status != -EPERM) {
@@ -229,7 +229,7 @@
 			   MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_WRITEABLE | BUFF_USER);
 	if (status != -EPERM) {
@@ -255,7 +255,7 @@
 			   MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_WRITEABLE);
 	if (status != -EPERM) {
@@ -281,7 +281,7 @@
 			   MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_READABLE | BUFF_USER);
 	if (status != -EPERM) {
@@ -307,7 +307,7 @@
 			   MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
 			   MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_WRITEABLE);
 	if (status != -EPERM) {
@@ -333,7 +333,7 @@
 			   MMU_ENTRY_READ,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_WRITEABLE);
 
@@ -360,7 +360,7 @@
 			   MMU_ENTRY_WRITE,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_WRITEABLE);
 	if (status != 0) {
@@ -386,7 +386,7 @@
 			   MMU_ENTRY_READ,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_READABLE);
 	if (status != 0) {
@@ -412,7 +412,7 @@
 			   MMU_ENTRY_WRITE,
 			   MMU_PDE_RW_MASK);
 
-	status = _arch_buffer_validate(ADDR_PAGE_1,
+	status = z_arch_buffer_validate(ADDR_PAGE_1,
 				       2 * MMU_PAGE_SIZE,
 				       BUFF_READABLE);
 
@@ -463,7 +463,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_pde_buffer_readable_write(void)
 {
@@ -475,7 +475,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see  _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see  z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_pde_buffer_readable_read(void)
 {
@@ -487,7 +487,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see  _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see  z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_pde_buffer_writeable_write(void)
 {
@@ -499,7 +499,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see  _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see  z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_pde_buffer_rw(void)
 {
@@ -511,7 +511,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_rw_read(void)
 {
@@ -523,7 +523,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_writeable_write(void)
 {
@@ -535,7 +535,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_readable_read(void)
 {
@@ -547,7 +547,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_readable_write(void)
 {
@@ -559,7 +559,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_supervisor_rw(void)
 {
@@ -571,7 +571,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_supervisor_w(void)
 {
@@ -583,7 +583,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_user_rw_user(void)
 {
@@ -595,7 +595,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_buffer_user_rw_supervisor(void)
 {
@@ -607,7 +607,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_page_buffer_user(void)
 {
@@ -619,7 +619,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_page_buffer_write_user(void)
 {
@@ -631,7 +631,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_page_buffer_read_user(void)
 {
@@ -643,7 +643,7 @@
  *
  * @ingroup kernel_memprotect_tests
  *
- * @see _arch_buffer_validate(), _x86_mmu_set_flags()
+ * @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
  */
 void test_multi_page_buffer_read(void)
 {
diff --git a/tests/kernel/mp/src/main.c b/tests/kernel/mp/src/main.c
index 1208857..d95cec9 100644
--- a/tests/kernel/mp/src/main.c
+++ b/tests/kernel/mp/src/main.c
@@ -43,13 +43,13 @@
  *
  * @ingroup kernel_mp_tests
  *
- * @see _arch_start_cpu()
+ * @see z_arch_start_cpu()
  */
 void test_mp_start(void)
 {
 	cpu_arg = 12345;
 
-	_arch_start_cpu(1, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, &cpu_arg);
+	z_arch_start_cpu(1, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, &cpu_arg);
 
 	while (!cpu_running) {
 	}
diff --git a/tests/kernel/pipe/pipe/src/test_pipe.c b/tests/kernel/pipe/pipe/src/test_pipe.c
index 7855d03..15498cc 100644
--- a/tests/kernel/pipe/pipe/src/test_pipe.c
+++ b/tests/kernel/pipe/pipe/src/test_pipe.c
@@ -675,7 +675,7 @@
 
 /******************************************************************************/
 ZTEST_BMEM bool valid_fault;
-void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
+void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
 {
 	printk("Caught system error -- reason %d\n", reason);
 	if (valid_fault) {
diff --git a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c
index e8ebc32..78cabfb 100644
--- a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c
+++ b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c
@@ -34,8 +34,8 @@
 		expected_slice_max = HALF_SLICE_SIZE;
 	} else {
 		/*other threads are sliced with tick granulity*/
-		expected_slice_min = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE));
-		expected_slice_max = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE)+1);
+		expected_slice_min = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE));
+		expected_slice_max = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE)+1);
 	}
 
 	#ifdef CONFIG_DEBUG
diff --git a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c
index abc9b333..1350cab 100644
--- a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c
+++ b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c
@@ -37,8 +37,8 @@
 	int thread_parameter = ((int)p1 == (NUM_THREAD - 1)) ? '\n' :
 			       ((int)p1 + 'A');
 
-	s64_t expected_slice_min = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE));
-	s64_t expected_slice_max = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE) + 1);
+	s64_t expected_slice_min = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE));
+	s64_t expected_slice_max = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE) + 1);
 
 	while (1) {
 		s64_t tdelta = k_uptime_delta(&elapsed_slice);
diff --git a/tests/kernel/sleep/src/main.c b/tests/kernel/sleep/src/main.c
index 19814af..3f9606f 100644
--- a/tests/kernel/sleep/src/main.c
+++ b/tests/kernel/sleep/src/main.c
@@ -22,7 +22,7 @@
 
 #define ONE_SECOND		(MSEC_PER_SEC)
 #define ONE_SECOND_ALIGNED	\
-	(u32_t)(__ticks_to_ms(_ms_to_ticks(ONE_SECOND) + _TICK_ALIGN))
+	(u32_t)(__ticks_to_ms(z_ms_to_ticks(ONE_SECOND) + _TICK_ALIGN))
 
 static struct k_sem test_thread_sem;
 static struct k_sem helper_thread_sem;
diff --git a/tests/kernel/smp/src/main.c b/tests/kernel/smp/src/main.c
index bdebcea..171a697 100644
--- a/tests/kernel/smp/src/main.c
+++ b/tests/kernel/smp/src/main.c
@@ -121,7 +121,7 @@
 	ARG_UNUSED(p3);
 	int parent_cpu_id = (int)p1;
 
-	zassert_true(parent_cpu_id != _arch_curr_cpu()->id,
+	zassert_true(parent_cpu_id != z_arch_curr_cpu()->id,
 		     "Parent isn't on other core");
 
 	sync_count++;
@@ -141,7 +141,7 @@
 	/* Make sure idle thread runs on each core */
 	k_sleep(1000);
 
-	int parent_cpu_id = _arch_curr_cpu()->id;
+	int parent_cpu_id = z_arch_curr_cpu()->id;
 
 	k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE,
 				      child_fn, (void *)parent_cpu_id, NULL,
@@ -162,7 +162,7 @@
 	int count = 0;
 
 	tinfo[thread_num].executed  = 1;
-	tinfo[thread_num].cpu_id = _arch_curr_cpu()->id;
+	tinfo[thread_num].cpu_id = z_arch_curr_cpu()->id;
 
 	while (count++ < 5) {
 		k_busy_wait(DELAY_US);
diff --git a/tests/kernel/spinlock/src/main.c b/tests/kernel/spinlock/src/main.c
index 7efc363..f7f77bf 100644
--- a/tests/kernel/spinlock/src/main.c
+++ b/tests/kernel/spinlock/src/main.c
@@ -106,13 +106,13 @@
  *
  * @ingroup kernel_spinlock_tests
  *
- * @see _arch_start_cpu()
+ * @see z_arch_start_cpu()
  */
 void test_spinlock_bounce(void)
 {
 	int i;
 
-	_arch_start_cpu(1, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, 0);
+	z_arch_start_cpu(1, cpu1_stack, CPU1_STACK_SIZE, cpu1_fn, 0);
 
 	k_busy_wait(10);
 
diff --git a/tests/kernel/threads/thread_apis/src/main.c b/tests/kernel/threads/thread_apis/src/main.c
index 3c1108e..6314fe9 100644
--- a/tests/kernel/threads/thread_apis/src/main.c
+++ b/tests/kernel/threads/thread_apis/src/main.c
@@ -168,7 +168,7 @@
 	ARG_UNUSED(p2);
 	ARG_UNUSED(p3);
 
-	if (!_is_thread_essential() &&
+	if (!z_is_thread_essential() &&
 	    (k_current_get() == (k_tid_t)thread_id)) {
 		ztest_test_pass();
 	} else {
@@ -185,9 +185,9 @@
  */
 void test_user_mode(void)
 {
-	_thread_essential_set();
+	z_thread_essential_set();
 
-	zassert_true(_is_thread_essential(), "Thread isn't set"
+	zassert_true(z_is_thread_essential(), "Thread isn't set"
 		     " as essential\n");
 
 	k_thread_user_mode_enter((k_thread_entry_t)umode_entry,
diff --git a/tests/kernel/threads/thread_apis/src/test_essential_thread.c b/tests/kernel/threads/thread_apis/src/test_essential_thread.c
index a66f6c8..d5f1e79 100644
--- a/tests/kernel/threads/thread_apis/src/test_essential_thread.c
+++ b/tests/kernel/threads/thread_apis/src/test_essential_thread.c
@@ -16,16 +16,16 @@
 
 static void thread_entry(void *p1, void *p2, void *p3)
 {
-	_thread_essential_set();
+	z_thread_essential_set();
 
-	if (_is_thread_essential()) {
+	if (z_is_thread_essential()) {
 		k_busy_wait(100);
 	} else {
 		zassert_unreachable("The thread is not set as essential");
 	}
 
-	_thread_essential_clear();
-	zassert_false(_is_thread_essential(),
+	z_thread_essential_clear();
+	zassert_false(z_is_thread_essential(),
 		      "Essential flag of the thread is not cleared");
 
 	k_sem_give(&sync_sem);
diff --git a/tests/kernel/tickless/tickless/src/main.c b/tests/kernel/tickless/tickless/src/main.c
index 0e59ccf..2d1e356 100644
--- a/tests/kernel/tickless/tickless/src/main.c
+++ b/tests/kernel/tickless/tickless/src/main.c
@@ -56,7 +56,7 @@
 #if defined(CONFIG_ARCH_POSIX)
 #define _TIMESTAMP_READ()       (posix_get_hw_cycle())
 #else
-#define _TIMESTAMP_READ()       (_tsc_read())
+#define _TIMESTAMP_READ()       (z_tsc_read())
 #endif
 #define _TIMESTAMP_CLOSE()
 
diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c
index 0a86c49..b355a29 100644
--- a/tests/kernel/workq/work_queue/src/main.c
+++ b/tests/kernel/workq/work_queue/src/main.c
@@ -17,7 +17,7 @@
 
 /* In fact, each work item could take up to this value */
 #define WORK_ITEM_WAIT_ALIGNED	\
-	__ticks_to_ms(_ms_to_ticks(WORK_ITEM_WAIT) + _TICK_ALIGN)
+	__ticks_to_ms(z_ms_to_ticks(WORK_ITEM_WAIT) + _TICK_ALIGN)
 
 /*
  * Wait 50ms between work submissions, to ensure co-op and prempt
diff --git a/tests/lib/rbtree/src/main.c b/tests/lib/rbtree/src/main.c
index 833c2c1..b3256e4 100644
--- a/tests/lib/rbtree/src/main.c
+++ b/tests/lib/rbtree/src/main.c
@@ -91,10 +91,10 @@
 
 void check_rbnode(struct rbnode *node, int blacks_above)
 {
-	int side, bheight = blacks_above + _rb_is_black(node);
+	int side, bheight = blacks_above + z_rb_is_black(node);
 
 	for (side = 0; side < 2; side++) {
-		struct rbnode *ch = _rb_child(node, side);
+		struct rbnode *ch = z_rb_child(node, side);
 
 		if (ch) {
 			/* Basic tree requirement */
@@ -105,7 +105,7 @@
 			}
 
 			/* Can't have adjacent red nodes */
-			CHECK(_rb_is_black(node) || _rb_is_black(ch));
+			CHECK(z_rb_is_black(node) || z_rb_is_black(ch));
 
 			/* Recurse */
 			check_rbnode(ch, bheight);
@@ -124,7 +124,7 @@
 	last_black_height = 0;
 
 	CHECK(tree.root);
-	CHECK(_rb_is_black(tree.root));
+	CHECK(z_rb_is_black(tree.root));
 
 	check_rbnode(tree.root, 0);
 }