clock: rename z_tick_get_32 ->  sys_clock_tick_get_32

Do not use z_ for internal APIs, z_ is for private APIs within one
subsystem only.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
diff --git a/include/sys_clock.h b/include/sys_clock.h
index c6c0f4b..e4d901f 100644
--- a/include/sys_clock.h
+++ b/include/sys_clock.h
@@ -175,7 +175,7 @@
  * @return the current system tick count
  *
  */
-uint32_t z_tick_get_32(void);
+uint32_t sys_clock_tick_get_32(void);
 
 /**
  *
@@ -188,7 +188,7 @@
 
 #ifndef CONFIG_SYS_CLOCK_EXISTS
 #define z_tick_get() (0)
-#define z_tick_get_32() (0)
+#define sys_clock_tick_get_32() (0)
 #endif
 
 uint64_t z_timeout_end_calc(k_timeout_t timeout);
diff --git a/kernel/sched.c b/kernel/sched.c
index a361ea1..99d7688 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1233,7 +1233,7 @@
 
 	k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
 
-	expected_wakeup_ticks = ticks + z_tick_get_32();
+	expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
 
 	k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
 
@@ -1248,7 +1248,7 @@
 
 	__ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
 
-	ticks = (k_ticks_t)expected_wakeup_ticks - z_tick_get_32();
+	ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
 	if (ticks > 0) {
 		return ticks;
 	}
diff --git a/kernel/timeout.c b/kernel/timeout.c
index 79262db..2e0179b 100644
--- a/kernel/timeout.c
+++ b/kernel/timeout.c
@@ -275,7 +275,7 @@
 	return t;
 }
 
-uint32_t z_tick_get_32(void)
+uint32_t sys_clock_tick_get_32(void)
 {
 #ifdef CONFIG_TICKLESS_KERNEL
 	return (uint32_t)z_tick_get();
diff --git a/lib/cmsis_rtos_v2/kernel.c b/lib/cmsis_rtos_v2/kernel.c
index ed6d5fb..2dbd034 100644
--- a/lib/cmsis_rtos_v2/kernel.c
+++ b/lib/cmsis_rtos_v2/kernel.c
@@ -9,7 +9,7 @@
 #include <kernel.h>
 #include <cmsis_os2.h>
 
-extern uint32_t z_tick_get_32(void);
+extern uint32_t sys_clock_tick_get_32(void);
 
 /**
  * @brief Get RTOS Kernel Information.
@@ -89,7 +89,7 @@
  */
 uint32_t osKernelGetTickCount(void)
 {
-	return z_tick_get_32();
+	return sys_clock_tick_get_32();
 }
 
 /**
diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c
index 114e599..725d07a 100644
--- a/tests/kernel/context/src/main.c
+++ b/tests/kernel/context/src/main.c
@@ -442,15 +442,15 @@
 	int imask;
 
 	/* Align to a "tick boundary" */
-	tick = z_tick_get_32();
-	while (z_tick_get_32() == tick) {
+	tick = sys_clock_tick_get_32();
+	while (sys_clock_tick_get_32() == tick) {
 #if defined(CONFIG_ARCH_POSIX)
 		k_busy_wait(1000);
 #endif
 	}
 
 	tick++;
-	while (z_tick_get_32() == tick) {
+	while (sys_clock_tick_get_32() == tick) {
 #if defined(CONFIG_ARCH_POSIX)
 		k_busy_wait(1000);
 #endif
@@ -467,15 +467,15 @@
 	count <<= 4;
 
 	imask = disable_int(irq);
-	tick = z_tick_get_32();
+	tick = sys_clock_tick_get_32();
 	for (i = 0; i < count; i++) {
-		z_tick_get_32();
+		sys_clock_tick_get_32();
 #if defined(CONFIG_ARCH_POSIX)
 		k_busy_wait(1000);
 #endif
 	}
 
-	tick2 = z_tick_get_32();
+	tick2 = sys_clock_tick_get_32();
 
 	/*
 	 * Re-enable interrupts before returning (for both success and failure
@@ -493,13 +493,13 @@
 
 	/* Now repeat with interrupts unlocked. */
 	for (i = 0; i < count; i++) {
-		z_tick_get_32();
+		sys_clock_tick_get_32();
 #if defined(CONFIG_ARCH_POSIX)
 		k_busy_wait(1000);
 #endif
 	}
 
-	tick2 = z_tick_get_32();
+	tick2 = sys_clock_tick_get_32();
 	zassert_not_equal(tick, tick2,
 			  "tick didn't advance as expected");
 }
@@ -530,13 +530,13 @@
  * -# Do action to align to a tick boundary.
  * -# Left shift 4 bits for the value of counts.
  * -# Call irq_lock() and restore its return value to imask.
- * -# Call z_tick_get_32() and store its return value to tick.
- * -# Repeat counts of calling z_tick_get_32().
- * -# Call z_tick_get_32() and store its return value to tick2.
+ * -# Call sys_clock_tick_get_32() and store its return value to tick.
+ * -# Repeat counts of calling sys_clock_tick_get_32().
+ * -# Call sys_clock_tick_get_32() and store its return value to tick2.
  * -# Call irq_unlock() with parameter imask.
  * -# Check if tick is equal to tick2.
- * -# Repeat counts of calling z_tick_get_32().
- * -# Call z_tick_get_32() and store its return value to tick2.
+ * -# Repeat counts of calling sys_clock_tick_get_32().
+ * -# Call sys_clock_tick_get_32() and store its return value to tick2.
  * -# Check if tick is NOT equal to tick2.
  *
  * Expected Test Result:
@@ -588,13 +588,13 @@
  * -# Do action to align to a tick boundary.
  * -# Left shift 4 bit for the value of counts.
  * -# Call irq_disable() and restore its return value to imask.
- * -# Call z_tick_get_32() and store its return value to tick.
- * -# Repeat counts of calling z_tick_get_32().
- * -# Call z_tick_get_32() and store its return value to tick2.
+ * -# Call sys_clock_tick_get_32() and store its return value to tick.
+ * -# Repeat counts of calling sys_clock_tick_get_32().
+ * -# Call sys_clock_tick_get_32() and store its return value to tick2.
  * -# Call irq_enable() with parameter imask.
  * -# Check if tick is equal to tick2.
- * -# Repeat counts of calling z_tick_get_32().
- * -# Call z_tick_get_32() and store its return value to tick2.
+ * -# Repeat counts of calling sys_clock_tick_get_32().
+ * -# Call sys_clock_tick_get_32() and store its return value to tick2.
  * -# Check if tick is NOT equal to tick2.
  *
  * Expected Test Result:
diff --git a/tests/kernel/fpu_sharing/generic/src/load_store.c b/tests/kernel/fpu_sharing/generic/src/load_store.c
index b870919..1d2f95a 100644
--- a/tests/kernel/fpu_sharing/generic/src/load_store.c
+++ b/tests/kernel/fpu_sharing/generic/src/load_store.c
@@ -136,10 +136,10 @@
 		 * thread an opportunity to run when the low priority thread is
 		 * using the floating point registers.
 		 *
-		 * IMPORTANT: This logic requires that z_tick_get_32() not
+		 * IMPORTANT: This logic requires that sys_clock_tick_get_32() not
 		 * perform any floating point operations!
 		 */
-		while ((z_tick_get_32() % 5) != 0) {
+		while ((sys_clock_tick_get_32() % 5) != 0) {
 			/*
 			 * Use a volatile variable to prevent compiler
 			 * optimizing out the spin loop.