x86: remove unused and x86 only latency benchmark

We do have a multi-architecture latency benchmark now, this one was x86
only, was never used or compiled in and is out-dated.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c
index 9361a58..36282bf 100644
--- a/arch/x86/core/cpuhalt.c
+++ b/arch/x86/core/cpuhalt.c
@@ -44,7 +44,6 @@
  */
 void k_cpu_idle(void)
 {
-	z_int_latency_stop();
 	z_sys_trace_idle();
 #if defined(CONFIG_BOOT_TIME_MEASUREMENT)
 	__idle_time_stamp = (u64_t)k_cycle_get_32();
@@ -75,7 +74,6 @@
 
 void k_cpu_atomic_idle(unsigned int key)
 {
-	z_int_latency_stop();
 	z_sys_trace_idle();
 
 	__asm__ volatile (
@@ -96,7 +94,6 @@
 
 	/* restore interrupt lockout state before returning to caller */
 	if ((key & 0x200U) == 0U) {
-		z_int_latency_start();
 		__asm__ volatile("cli");
 	}
 }
diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S
index f8e5959..41fd968 100644
--- a/arch/x86/core/intstub.S
+++ b/arch/x86/core/intstub.S
@@ -37,10 +37,6 @@
 #endif
 
 
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	GTEXT(z_int_latency_start)
-	GTEXT(z_int_latency_stop)
-#endif
 /**
  *
  * @brief Inform the kernel of an interrupt
@@ -135,24 +131,12 @@
 	pushl	%edi
 
 
-#if defined(CONFIG_INT_LATENCY_BENCHMARK) || \
-		defined(CONFIG_TRACING)
+#if defined(CONFIG_TRACING)
 
 	/* Save these as we are using to keep track of isr and isr_param */
 	pushl	%eax
 	pushl	%edx
 
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	/*
-	 * Volatile registers are now saved it is safe to start measuring
-	 * how long interrupt are disabled.
-	 * The interrupt gate created by IRQ_CONNECT disables the
-	 * interrupt.
-	 */
-
-	call	z_int_latency_start
-#endif
-
 	call	z_sys_trace_isr_enter
 
 	popl	%edx
@@ -193,13 +177,6 @@
 	/* fall through to nested case */
 
 alreadyOnIntStack:
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	pushl	%eax
-	pushl	%edx
-	call	z_int_latency_stop
-	popl	%edx
-	popl	%eax
-#endif
 
 #ifndef CONFIG_X86_IAMCU
 	/* EAX has the interrupt handler argument, needs to go on
@@ -236,10 +213,6 @@
 	/* irq_controller.h interface */
 	_irq_controller_eoi_macro
 
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	call	z_int_latency_start
-#endif
-
 	/* determine whether exiting from a nested interrupt */
 	movl	$_kernel, %ecx
 	decl	_kernel_offset_to_nested(%ecx)	/* dec interrupt nest count */
@@ -308,9 +281,6 @@
 #endif /* CONFIG_LAZY_FP_SHARING */
 
 	/* Restore volatile registers and return to the interrupted thread */
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	call	z_int_latency_stop
-#endif
 	popl	%edi
 	popl	%ecx
 	popl	%edx
@@ -344,10 +314,6 @@
 	 */
 
 nestedInterrupt:
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	call	z_int_latency_stop
-#endif
-
 	popl	%edi
 	popl	%ecx		/* pop volatile registers in reverse order */
 	popl	%edx
diff --git a/arch/x86/core/irq_manage.c b/arch/x86/core/irq_manage.c
index 5def4f7..a7d4786 100644
--- a/arch/x86/core/irq_manage.c
+++ b/arch/x86/core/irq_manage.c
@@ -61,7 +61,6 @@
 
 void z_arch_isr_direct_header(void)
 {
-	z_int_latency_start();
 	z_sys_trace_isr_enter();
 
 	/* We're not going to unlock IRQs, but we still need to increment this
@@ -73,7 +72,6 @@
 void z_arch_isr_direct_footer(int swap)
 {
 	z_irq_controller_eoi();
-	z_int_latency_stop();
 	sys_trace_isr_exit();
 	--_kernel.nested;
 
diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S
index 60ad362..dbad84d 100644
--- a/arch/x86/core/swap.S
+++ b/arch/x86/core/swap.S
@@ -382,19 +382,6 @@
 	/* Utilize the 'eflags' parameter to __swap() */
 
 	pushl	4(%esp)
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-	testl	$0x200, (%esp)
-	jz	skipIntLatencyStop
-
-	/* save %eax since it used as the return value for __swap */
-	pushl	%eax
-	/* interrupts are being reenabled, stop accumulating time */
-	call	z_int_latency_stop
-	/* restore __swap's %eax */
-	popl	%eax
-
-skipIntLatencyStop:
-#endif
 	popfl
 #if CONFIG_X86_IAMCU
 	/* Remember that eflags we stuck into the stack before the return
diff --git a/boards/posix/native_posix/irq_handler.c b/boards/posix/native_posix/irq_handler.c
index 31225c6..3e6911e 100644
--- a/boards/posix/native_posix/irq_handler.c
+++ b/boards/posix/native_posix/irq_handler.c
@@ -30,12 +30,6 @@
 
 static inline void vector_to_irq(int irq_nbr, int *may_swap)
 {
-	/*
-	 * As in this architecture an irq (code) executes in 0 time,
-	 * it is a bit senseless to call z_int_latency_start/stop()
-	 */
-	/* z_int_latency_start(); */
-
 	sys_trace_isr_enter();
 
 	if (irq_vector_table[irq_nbr].func == NULL) { /* LCOV_EXCL_BR_LINE */
@@ -59,7 +53,6 @@
 	}
 
 	sys_trace_isr_exit();
-	/* z_int_latency_stop(); */
 }
 
 /**
diff --git a/boards/posix/nrf52_bsim/irq_handler.c b/boards/posix/nrf52_bsim/irq_handler.c
index f11cdf0..e892e48 100644
--- a/boards/posix/nrf52_bsim/irq_handler.c
+++ b/boards/posix/nrf52_bsim/irq_handler.c
@@ -85,11 +85,6 @@
 	bs_trace_raw_time(6, "Vectoring to irq %i (%s)\n", irq_nbr,
 			  irqnames[irq_nbr]);
 
-	/*
-	 * As in this architecture an irq (code) executes in 0 time,
-	 * it is a bit senseless to call z_int_latency_start/stop()
-	 */
-	/* z_int_latency_start(); */
 	sys_trace_isr_enter();
 
 	if (irq_vector_table[irq_nbr].func == NULL) { /* LCOV_EXCL_BR_LINE */
@@ -113,7 +108,6 @@
 	}
 
 	sys_trace_isr_exit();
-	/* z_int_latency_stop(); */
 
 	bs_trace_raw_time(7, "Irq %i (%s) ended\n", irq_nbr, irqnames[irq_nbr]);
 }
diff --git a/include/arch/x86/arch.h b/include/arch/x86/arch.h
index 3eabc3e..acd3b62 100644
--- a/include/arch/x86/arch.h
+++ b/include/arch/x86/arch.h
@@ -51,14 +51,6 @@
 
 #ifndef _ASMLANGUAGE
 
-#ifdef CONFIG_INT_LATENCY_BENCHMARK
-void z_int_latency_start(void);
-void z_int_latency_stop(void);
-#else
-#define z_int_latency_start()  do { } while (false)
-#define z_int_latency_stop()   do { } while (false)
-#endif
-
 /* interrupt/exception/error related definitions */
 
 
@@ -422,8 +414,6 @@
 {
 	unsigned int key = _do_irq_lock();
 
-	z_int_latency_start();
-
 	return key;
 }
 
@@ -448,8 +438,6 @@
 		return;
 	}
 
-	z_int_latency_stop();
-
 	z_do_irq_unlock();
 }
 
diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt
index 1f04d1d..37a27ba 100644
--- a/kernel/CMakeLists.txt
+++ b/kernel/CMakeLists.txt
@@ -34,7 +34,6 @@
   __ZEPHYR_SUPERVISOR__
   )
 
-target_sources_ifdef(CONFIG_INT_LATENCY_BENCHMARK kernel PRIVATE int_latency_bench.c)
 target_sources_ifdef(CONFIG_STACK_CANARIES        kernel PRIVATE compiler_stack_protect.c)
 target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS      kernel PRIVATE timeout.c timer.c)
 target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C   kernel PRIVATE atomic_c.c)
diff --git a/kernel/Kconfig b/kernel/Kconfig
index 034e572..3f729ec 100644
--- a/kernel/Kconfig
+++ b/kernel/Kconfig
@@ -332,16 +332,6 @@
 	  achieved by waiting for DCD on the serial port--however, not
 	  all serial ports have DCD.
 
-config INT_LATENCY_BENCHMARK
-	bool "Interrupt latency metrics [EXPERIMENTAL]"
-	depends on ARCH="x86"
-	help
-	  This option enables the tracking of interrupt latency metrics;
-	  the exact set of metrics being tracked is board-dependent.
-	  Tracking begins when int_latency_init() is invoked by an application.
-	  The metrics are displayed (and a new sampling interval is started)
-	  each time int_latency_show() is called thereafter.
-
 config EXECUTION_BENCHMARKING
 	bool "Timing metrics"
 	help
diff --git a/kernel/int_latency_bench.c b/kernel/int_latency_bench.c
deleted file mode 100644
index c2a2a9b..0000000
--- a/kernel/int_latency_bench.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/* int_latency_bench.c - interrupt latency benchmark support */
-
-/*
- * Copyright (c) 2012-2015 Wind River Systems, Inc.
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-#include "toolchain.h"
-#include "sections.h"
-#include <zephyr/types.h>	    /* u32_t */
-#include <limits.h>	    /* ULONG_MAX */
-#include <misc/printk.h> /* printk */
-#include <sys_clock.h>
-#include <drivers/system_timer.h>
-
-#define NB_CACHE_WARMING_DRY_RUN 7
-
-/*
- * Timestamp corresponding to when interrupt were turned off.
- * A value of zero indicated interrupt are not currently locked.
- */
-static u32_t int_locked_timestamp;
-
-/* stats tracking the minimum and maximum time when interrupts were locked */
-static u32_t int_locked_latency_min = ULONG_MAX;
-static u32_t int_locked_latency_max;
-
-/* overhead added to intLock/intUnlock by this latency benchmark */
-static u32_t initial_start_delay;
-static u32_t nesting_delay;
-static u32_t stop_delay;
-
-/* counter tracking intLock/intUnlock calls once interrupt are locked */
-static u32_t int_lock_unlock_nest;
-
-/* indicate if the interrupt latency benchamrk is ready to be used */
-static u32_t int_latency_bench_ready;
-
-/* min amount of time it takes from HW interrupt generation to 'C' handler */
-u32_t _hw_irq_to_c_handler_latency = ULONG_MAX;
-
-/**
- *
- * @brief Start tracking time spent with interrupts locked
- *
- * calls to lock interrupt can nest, so this routine can be called numerous
- * times before interrupt are unlocked
- *
- * @return N/A
- *
- */
-void z_int_latency_start(void)
-{
-	/* when interrupts are not already locked, take time stamp */
-	if (!int_locked_timestamp && int_latency_bench_ready) {
-		int_locked_timestamp = k_cycle_get_32();
-		int_lock_unlock_nest = 0U;
-	}
-	int_lock_unlock_nest++;
-}
-
-/**
- *
- * @brief Stop accumulating time spent for when interrupts are locked
- *
- * This is only call once when the interrupt are being reenabled
- *
- * @return N/A
- *
- */
-void z_int_latency_stop(void)
-{
-	u32_t delta;
-	u32_t delayOverhead;
-	u32_t currentTime = k_cycle_get_32();
-
-	/* ensured intLatencyStart() was invoked first */
-	if (int_locked_timestamp) {
-		/*
-		 * time spent with interrupt lock is:
-		 * (current time - time when interrupt got disabled first) -
-		 * (delay when invoking start + number nested calls to intLock *
-		 * time it takes to call intLatencyStart + intLatencyStop)
-		 */
-		delta = (currentTime - int_locked_timestamp);
-
-		/*
-		 * Substract overhead introduce by the int latency benchmark
-		 * only if
-		 * it is bigger than delta.  It can be possible sometimes for
-		 * delta to
-		 * be smaller than the estimated overhead.
-		 */
-		delayOverhead =
-			(initial_start_delay +
-			 ((int_lock_unlock_nest - 1) * nesting_delay) + stop_delay);
-		if (delta >= delayOverhead)
-			delta -= delayOverhead;
-
-		/* update max */
-		if (delta > int_locked_latency_max)
-			int_locked_latency_max = delta;
-
-		/* update min */
-		if (delta < int_locked_latency_min)
-			int_locked_latency_min = delta;
-
-		/* interrupts are now enabled, get ready for next interrupt lock
-		 */
-		int_locked_timestamp = 0U;
-	}
-}
-
-/**
- *
- * @brief Initialize interrupt latency benchmark
- *
- * @return N/A
- *
- */
-void int_latency_init(void)
-{
-	u32_t timeToReadTime;
-	u32_t cacheWarming = NB_CACHE_WARMING_DRY_RUN;
-
-	int_latency_bench_ready = 1U;
-
-	/*
-	 * measuring delay introduced by the interrupt latency benchmark few
-	 * times to ensure we get the best possible values. The overhead of
-	 * invoking the latency can changes runtime (i.e. cache hit or miss)
-	 * but an estimated overhead is used to adjust Max interrupt latency.
-	 * The overhead introduced by benchmark is composed of three values:
-	 * initial_start_delay, nesting_delay, stop_delay.
-	 */
-	while (cacheWarming) {
-		/* measure how much time it takes to read time */
-		timeToReadTime = k_cycle_get_32();
-		timeToReadTime = k_cycle_get_32() - timeToReadTime;
-
-		/* measure time to call intLatencyStart() and intLatencyStop
-		 * takes
-		 */
-		initial_start_delay = k_cycle_get_32();
-		z_int_latency_start();
-		initial_start_delay =
-			k_cycle_get_32() - initial_start_delay - timeToReadTime;
-
-		nesting_delay = k_cycle_get_32();
-		z_int_latency_start();
-		nesting_delay = k_cycle_get_32() - nesting_delay - timeToReadTime;
-
-		stop_delay = k_cycle_get_32();
-		z_int_latency_stop();
-		stop_delay = k_cycle_get_32() - stop_delay - timeToReadTime;
-
-		/* re-initialize globals to default values */
-		int_locked_latency_min = ULONG_MAX;
-		int_locked_latency_max = 0U;
-
-		cacheWarming--;
-	}
-}
-
-/**
- *
- * @brief Dumps interrupt latency values
- *
- * The interrupt latency value measures
- *
- * @return N/A
- *
- */
-void int_latency_show(void)
-{
-	u32_t intHandlerLatency = 0U;
-
-	if (int_latency_bench_ready == 0U) {
-		printk("error: int_latency_init() has not been invoked\n");
-		return;
-	}
-
-	if (int_locked_latency_min != ULONG_MAX) {
-		if (_hw_irq_to_c_handler_latency == ULONG_MAX) {
-			intHandlerLatency = 0U;
-			printk(" Min latency from hw interrupt up to 'C' int. "
-			       "handler: "
-			       "not measured\n");
-		} else {
-			intHandlerLatency = _hw_irq_to_c_handler_latency;
-			printk(" Min latency from hw interrupt up to 'C' int. "
-			       "handler:"
-			       " %d tcs = %d nsec\n",
-			       intHandlerLatency,
-			       SYS_CLOCK_HW_CYCLES_TO_NS(intHandlerLatency));
-		}
-
-		printk(" Max interrupt latency (includes hw int. to 'C' "
-		       "handler):"
-		       " %d tcs = %d nsec\n",
-		       int_locked_latency_max + intHandlerLatency,
-		       SYS_CLOCK_HW_CYCLES_TO_NS(int_locked_latency_max + intHandlerLatency));
-
-		printk(" Overhead substracted from Max int. latency:\n"
-		       "  for int. lock           : %d tcs = %d nsec\n"
-		       "  each time int. lock nest: %d tcs = %d nsec\n"
-		       "  for int. unlocked       : %d tcs = %d nsec\n",
-		       initial_start_delay,
-		       SYS_CLOCK_HW_CYCLES_TO_NS(initial_start_delay),
-		       nesting_delay,
-		       SYS_CLOCK_HW_CYCLES_TO_NS(nesting_delay),
-		       stop_delay,
-		       SYS_CLOCK_HW_CYCLES_TO_NS(stop_delay));
-	} else {
-		printk("interrupts were not locked and unlocked yet\n");
-	}
-	/*
-	 * Lets start with new values so that one extra long path executed
-	 * with interrupt disabled hide smaller paths with interrupt
-	 * disabled.
-	 */
-	int_locked_latency_min = ULONG_MAX;
-	int_locked_latency_max = 0U;
-}