kernel: Make irq_{un}lock() APIs into a global spinlock in SMP mode

In SMP mode, the idea of a single "IRQ lock" goes away.  Long term,
all usage needs to migrate to spinlocks (which become simple IRQ locks
in the uniprocessor case).  For the near term, we can ease the
migration (at the expense of performance) by providing a compatibility
implementation around a single global lock.

Note that one complication is that the older lock was recursive, while
spinlocks will deadlock if you try to lock them twice.  So we
implement a simple "count" semantic to handle multiple locks.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/include/irq.h b/include/irq.h
index 29da432..3591394 100644
--- a/include/irq.h
+++ b/include/irq.h
@@ -190,7 +190,12 @@
  *
  * @return Lock-out key.
  */
+#ifdef CONFIG_SMP
+unsigned int _smp_global_lock(void);
+#define irq_lock() _smp_global_lock()
+#else
 #define irq_lock() _arch_irq_lock()
+#endif
 
 /**
  * @brief Unlock interrupts.
@@ -206,7 +211,12 @@
  *
  * @return N/A
  */
+#ifdef CONFIG_SMP
+void _smp_global_unlock(unsigned int key);
+#define irq_unlock(key) _smp_global_unlock(key)
+#else
 #define irq_unlock(key) _arch_irq_unlock(key)
+#endif
 
 /**
  * @brief Enable an IRQ.
diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt
index 07ae850..7aa9d15 100644
--- a/kernel/CMakeLists.txt
+++ b/kernel/CMakeLists.txt
@@ -22,6 +22,7 @@
   thread_abort.c
   version.c
   work_q.c
+  smp.c
 )
 
 target_include_directories(kernel PRIVATE ${PROJECT_SOURCE_DIR}/include/posix)
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644
index 0000000..1a1b668
--- /dev/null
+++ b/kernel/smp.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Intel corporation
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include <kernel.h>
+#include <kernel_structs.h>
+#include <spinlock.h>
+
+static struct k_spinlock global_spinlock;
+
+static volatile int recursive_count;
+
+/* FIXME: this value of key works on all known architectures as an
+ * "invalid state" that will never be legitimately returned from
+ * _arch_irq_lock().  But we should force the architecture code to
+ * define something for us.
+ */
+#define KEY_RECURSIVE 0xffffffff
+
+unsigned int _smp_global_lock(void)
+{
+	/* OK to test this outside the lock.  If it's non-zero, then
+	 * we hold the lock by definition
+	 */
+	if (recursive_count) {
+		recursive_count++;
+
+		return KEY_RECURSIVE;
+	}
+
+	unsigned int k = k_spin_lock(&global_spinlock).key;
+
+	recursive_count = 1;
+	return k;
+}
+
+void _smp_global_unlock(unsigned int key)
+{
+	if (key == KEY_RECURSIVE) {
+		recursive_count--;
+		return;
+	}
+
+	k_spinlock_key_t sk = { .key = key };
+
+	recursive_count = 0;
+	k_spin_unlock(&global_spinlock, sk);
+}