kernel: renamespace z_is_in_isr()

This is part of the core kernel -> architecture interface
and is appropriately renamed z_arch_is_in_isr().

References from test cases changed to k_is_in_isr().

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h
index 30648bd..d6433be 100644
--- a/arch/arc/include/kernel_arch_func.h
+++ b/arch/arc/include/kernel_arch_func.h
@@ -66,7 +66,7 @@
 	return irq_num;
 }
 
-#define z_is_in_isr	z_arc_v2_irq_unit_is_in_isr
+#define z_arch_is_in_isr	z_arc_v2_irq_unit_is_in_isr
 
 extern void z_thread_entry_wrapper(void);
 extern void z_user_thread_entry_wrapper(void);
diff --git a/arch/arm/core/thread.c b/arch/arm/core/thread.c
index 7c958ae..132f5a4 100644
--- a/arch/arm/core/thread.c
+++ b/arch/arm/core/thread.c
@@ -352,7 +352,7 @@
 		return -EINVAL;
 	}
 
-	if (z_is_in_isr()) {
+	if (z_arch_is_in_isr()) {
 		return -EINVAL;
 	}
 
diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h
index eaea891..034c608 100644
--- a/arch/arm/include/kernel_arch_func.h
+++ b/arch/arm/include/kernel_arch_func.h
@@ -143,7 +143,7 @@
 
 extern void k_cpu_atomic_idle(unsigned int key);
 
-#define z_is_in_isr() z_IsInIsr()
+#define z_arch_is_in_isr() z_IsInIsr()
 
 extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
 					       void *p1, void *p2, void *p3,
diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h
index b063386..693d0fb 100644
--- a/arch/nios2/include/kernel_arch_func.h
+++ b/arch/nios2/include/kernel_arch_func.h
@@ -44,7 +44,7 @@
 FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
 				       const z_arch_esf_t *esf);
 
-#define z_is_in_isr() (_kernel.nested != 0U)
+#define z_arch_is_in_isr() (_kernel.nested != 0U)
 
 #ifdef CONFIG_IRQ_OFFLOAD
 void z_irq_do_offload(void);
diff --git a/arch/posix/include/kernel_arch_func.h b/arch/posix/include/kernel_arch_func.h
index 224f9cf..6a55719 100644
--- a/arch/posix/include/kernel_arch_func.h
+++ b/arch/posix/include/kernel_arch_func.h
@@ -53,7 +53,7 @@
 }
 #endif
 
-#define z_is_in_isr() (_kernel.nested != 0U)
+#define z_arch_is_in_isr() (_kernel.nested != 0U)
 
 #endif /* _ASMLANGUAGE */
 
diff --git a/arch/riscv/include/kernel_arch_func.h b/arch/riscv/include/kernel_arch_func.h
index 4827c6c..f626441 100644
--- a/arch/riscv/include/kernel_arch_func.h
+++ b/arch/riscv/include/kernel_arch_func.h
@@ -40,7 +40,7 @@
 FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
 				       const z_arch_esf_t *esf);
 
-#define z_is_in_isr() (_kernel.nested != 0U)
+#define z_arch_is_in_isr() (_kernel.nested != 0U)
 
 #ifdef CONFIG_IRQ_OFFLOAD
 int z_irq_do_offload(void);
diff --git a/arch/x86/core/ia32/fatal.c b/arch/x86/core/ia32/fatal.c
index b703d91..33c72ce 100644
--- a/arch/x86/core/ia32/fatal.c
+++ b/arch/x86/core/ia32/fatal.c
@@ -41,7 +41,7 @@
 {
 	u32_t start, end;
 
-	if (z_is_in_isr()) {
+	if (z_arch_is_in_isr()) {
 		/* We were servicing an interrupt */
 		start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
 		end = start + CONFIG_ISR_STACK_SIZE;
diff --git a/arch/x86/core/ia32/irq_manage.c b/arch/x86/core/ia32/irq_manage.c
index 84b5064..ceab880 100644
--- a/arch/x86/core/ia32/irq_manage.c
+++ b/arch/x86/core/ia32/irq_manage.c
@@ -64,7 +64,7 @@
 	sys_trace_isr_enter();
 
 	/* We're not going to unlock IRQs, but we still need to increment this
-	 * so that z_is_in_isr() works
+	 * so that z_arch_is_in_isr() works
 	 */
 	++_kernel.nested;
 }
diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h
index 9fbaa8d..53ef1f9 100644
--- a/arch/x86/include/kernel_arch_func.h
+++ b/arch/x86/include/kernel_arch_func.h
@@ -12,7 +12,7 @@
 #include <ia32/kernel_arch_func.h>
 #endif
 
-#define z_is_in_isr() (_kernel.nested != 0U)
+#define z_arch_is_in_isr() (_kernel.nested != 0U)
 
 #ifndef _ASMLANGUAGE
 
diff --git a/arch/x86_64/include/kernel_arch_func.h b/arch/x86_64/include/kernel_arch_func.h
index 6ec8905..8293627 100644
--- a/arch/x86_64/include/kernel_arch_func.h
+++ b/arch/x86_64/include/kernel_arch_func.h
@@ -78,7 +78,7 @@
 #endif
 }
 
-#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0)
+#define z_arch_is_in_isr() (z_arch_curr_cpu()->nested != 0)
 
 static inline void z_arch_switch(void *switch_to, void **switched_from)
 {
diff --git a/arch/xtensa/include/kernel_arch_func.h b/arch/xtensa/include/kernel_arch_func.h
index 713a09f..91e21db 100644
--- a/arch/xtensa/include/kernel_arch_func.h
+++ b/arch/xtensa/include/kernel_arch_func.h
@@ -89,7 +89,7 @@
 }
 #endif
 
-#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0U)
+#define z_arch_is_in_isr() (z_arch_curr_cpu()->nested != 0U)
 
 #endif /* _ASMLANGUAGE */
 
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index 237cfc9..1461b0e 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -255,7 +255,7 @@
 static inline void z_sched_lock(void)
 {
 #ifdef CONFIG_PREEMPT_ENABLED
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 	__ASSERT(_current->base.sched_locked != 1, "");
 
 	--_current->base.sched_locked;
@@ -270,7 +270,7 @@
 static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
 {
 #ifdef CONFIG_PREEMPT_ENABLED
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 	__ASSERT(_current->base.sched_locked != 0, "");
 
 	compiler_barrier();
diff --git a/kernel/mempool.c b/kernel/mempool.c
index ecc59a8..a5ede59 100644
--- a/kernel/mempool.c
+++ b/kernel/mempool.c
@@ -52,7 +52,7 @@
 	int ret;
 	s64_t end = 0;
 
-	__ASSERT(!(z_is_in_isr() && timeout != K_NO_WAIT), "");
+	__ASSERT(!(z_arch_is_in_isr() && timeout != K_NO_WAIT), "");
 
 	if (timeout > 0) {
 		end = k_uptime_get() + timeout;
diff --git a/kernel/msg_q.c b/kernel/msg_q.c
index 069c95e..fb07392 100644
--- a/kernel/msg_q.c
+++ b/kernel/msg_q.c
@@ -110,7 +110,7 @@
 
 int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
 {
-	__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
+	__ASSERT(!z_arch_is_in_isr() || timeout == K_NO_WAIT, "");
 
 	struct k_thread *pending_thread;
 	k_spinlock_key_t key;
@@ -185,7 +185,7 @@
 
 int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
 {
-	__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
+	__ASSERT(!z_arch_is_in_isr() || timeout == K_NO_WAIT, "");
 
 	k_spinlock_key_t key;
 	struct k_thread *pending_thread;
diff --git a/kernel/poll.c b/kernel/poll.c
index 2441a77..8be123b 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -190,7 +190,7 @@
 
 int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
 {
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 	__ASSERT(events != NULL, "NULL events\n");
 	__ASSERT(num_events > 0, "zero events\n");
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 5bb6e99..0cd5831 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -531,7 +531,7 @@
 	_current_cpu->swap_ok = 0;
 #endif
 
-	return z_arch_irq_unlocked(key) && !z_is_in_isr();
+	return z_arch_irq_unlocked(key) && !z_arch_is_in_isr();
 }
 
 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
@@ -563,7 +563,7 @@
 {
 #ifdef CONFIG_PREEMPT_ENABLED
 	__ASSERT(_current->base.sched_locked != 0, "");
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 
 	LOCKED(&sched_spinlock) {
 		++_current->base.sched_locked;
@@ -855,7 +855,7 @@
 	 * keep track of it) and idle cannot change its priority.
 	 */
 	Z_ASSERT_VALID_PRIO(prio, NULL);
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 
 	struct k_thread *thread = (struct k_thread *)tid;
 
@@ -909,7 +909,7 @@
 
 void z_impl_k_yield(void)
 {
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 
 	if (!is_idle(_current)) {
 		LOCKED(&sched_spinlock) {
@@ -939,7 +939,7 @@
 #ifdef CONFIG_MULTITHREADING
 	u32_t expected_wakeup_time;
 
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 
 	K_DEBUG("thread %p for %d ticks\n", _current, ticks);
 
@@ -1026,7 +1026,7 @@
 	z_mark_thread_as_not_suspended(thread);
 	z_ready_thread(thread);
 
-	if (!z_is_in_isr()) {
+	if (!z_arch_is_in_isr()) {
 		z_reschedule_unlocked();
 	}
 
@@ -1113,7 +1113,7 @@
 
 int z_impl_k_is_preempt_thread(void)
 {
-	return !z_is_in_isr() && is_preempt(_current);
+	return !z_arch_is_in_isr() && is_preempt(_current);
 }
 
 #ifdef CONFIG_USERSPACE
diff --git a/kernel/sem.c b/kernel/sem.c
index 0d8ce51..18d5c7c 100644
--- a/kernel/sem.c
+++ b/kernel/sem.c
@@ -138,7 +138,7 @@
 
 int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
 {
-	__ASSERT(((z_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
+	__ASSERT(((z_arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
 
 	sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
 	k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/kernel/thread.c b/kernel/thread.c
index 4fb9ae2..6dd34f4 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -61,7 +61,7 @@
 
 bool k_is_in_isr(void)
 {
-	return z_is_in_isr();
+	return z_arch_is_in_isr();
 }
 
 /*
@@ -531,7 +531,7 @@
 			      void *p1, void *p2, void *p3,
 			      int prio, u32_t options, s32_t delay)
 {
-	__ASSERT(!z_is_in_isr(), "Threads may not be created in ISRs");
+	__ASSERT(!z_arch_is_in_isr(), "Threads may not be created in ISRs");
 
 	/* Special case, only for unit tests */
 #if defined(CONFIG_TEST) && defined(CONFIG_ARCH_HAS_USERSPACE) && !defined(CONFIG_USERSPACE)
diff --git a/kernel/thread_abort.c b/kernel/thread_abort.c
index 7de1444..d675dd2 100644
--- a/kernel/thread_abort.c
+++ b/kernel/thread_abort.c
@@ -43,7 +43,7 @@
 	z_thread_single_abort(thread);
 	z_thread_monitor_exit(thread);
 
-	if (thread == _current && !z_is_in_isr()) {
+	if (thread == _current && !z_arch_is_in_isr()) {
 		z_swap(&lock, key);
 	} else {
 		/* Really, there's no good reason for this to be a
diff --git a/kernel/timer.c b/kernel/timer.c
index a574b49..0bf7aff 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -184,7 +184,7 @@
 
 u32_t z_impl_k_timer_status_sync(struct k_timer *timer)
 {
-	__ASSERT(!z_is_in_isr(), "");
+	__ASSERT(!z_arch_is_in_isr(), "");
 
 	k_spinlock_key_t key = k_spin_lock(&lock);
 	u32_t result = timer->status;
diff --git a/tests/kernel/common/src/irq_offload.c b/tests/kernel/common/src/irq_offload.c
index 12dda4f..32eb038 100644
--- a/tests/kernel/common/src/irq_offload.c
+++ b/tests/kernel/common/src/irq_offload.c
@@ -25,7 +25,7 @@
 	u32_t x = POINTER_TO_INT(param);
 
 	/* Make sure we're in IRQ context */
-	zassert_true(z_is_in_isr(), "Not in IRQ context!");
+	zassert_true(k_is_in_isr(), "Not in IRQ context!");
 
 	sentinel = x;
 }
diff --git a/tests/kernel/interrupt/src/nested_irq.c b/tests/kernel/interrupt/src/nested_irq.c
index 32a82e7..3ef6e99 100644
--- a/tests/kernel/interrupt/src/nested_irq.c
+++ b/tests/kernel/interrupt/src/nested_irq.c
@@ -128,7 +128,7 @@
 {
 	ARG_UNUSED(param);
 
-	zassert_true(z_is_in_isr(), "Not in IRQ context!");
+	zassert_true(k_is_in_isr(), "Not in IRQ context!");
 	k_timer_init(&timer, timer_handler, NULL);
 	k_busy_wait(MS_TO_US(1));
 	k_timer_start(&timer, DURATION, 0);