soc: xtensa: esp32: Add CONFIG_SMP protection

Add #ifdef CONFIG_SMP around code that only needs to exist when
SMP is enabled.

Also include ksched.h to get decleration of z_sched_ipi.

Signed-off-by: Kumar Gala <kumar.gala@intel.com>
diff --git a/soc/xtensa/esp32/esp32-mp.c b/soc/xtensa/esp32/esp32-mp.c
index ece3196..ba194b1 100644
--- a/soc/xtensa/esp32/esp32-mp.c
+++ b/soc/xtensa/esp32/esp32-mp.c
@@ -11,6 +11,7 @@
 
 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
 #include <soc.h>
+#include <ksched.h>
 #include <zephyr/device.h>
 #include <zephyr/kernel.h>
 #include <zephyr/spinlock.h>
@@ -27,6 +28,7 @@
 #define DPORT_APPCPU_CTRL_B    Z_REG(DPORT_BASE, 0x030)
 #define DPORT_APPCPU_CTRL_C    Z_REG(DPORT_BASE, 0x034)
 
+#ifdef CONFIG_SMP
 struct cpustart_rec {
 	int cpu;
 	arch_cpustart_t fn;
@@ -39,9 +41,9 @@
 volatile struct cpustart_rec *start_rec;
 static void *appcpu_top;
 static bool cpus_active[CONFIG_MP_MAX_NUM_CPUS];
+#endif
 static struct k_spinlock loglock;
 
-extern void z_sched_ipi(void);
 
 /* Note that the logging done here is ACTUALLY REQUIRED FOR RELIABLE
  * OPERATION!  At least one particular board will experience spurious
@@ -73,6 +75,7 @@
 #endif
 }
 
+#ifdef CONFIG_SMP
 static void appcpu_entry2(void)
 {
 	volatile int ps, ie;
@@ -165,6 +168,7 @@
 {
 	z_appcpu_stack_switch(appcpu_top, appcpu_entry2);
 }
+#endif
 
 /* The calls and sequencing here were extracted from the ESP-32
  * FreeRTOS integration with just a tiny bit of cleanup.  None of the
@@ -222,14 +226,13 @@
 	smp_log("ESP32: APPCPU start sequence complete");
 }
 
+#ifdef CONFIG_SMP
 IRAM_ATTR static void esp_crosscore_isr(void *arg)
 {
 	ARG_UNUSED(arg);
 
-#ifdef CONFIG_SMP
 	/* Right now this interrupt is only used for IPIs */
 	z_sched_ipi();
-#endif
 
 	const int core_id = esp_core_id();
 
@@ -302,3 +305,4 @@
 {
 	return cpus_active[cpu_num];
 }
+#endif /* CONFIG_SMP */