kernel/sched: Remove remaining irq_lock use

The k_sleep() locking was actually to protect the _current state from
preemption before the context switch, so document that and replace
with a spinlock.  Should probably unify this with the rather cleaner
logic in pend_curr(), but right now "sleeping" and "pended" are
needlessly distinct states.

And we can remove the locking entirely from k_wakeup().  There's no
reason for any of that to need to be synchronized.  Even if we're
racing with other thread modifiations, the state on exit will be a
runnable thread without a timeout, or whatever timeout/pend state the
other side was requesting (i.e. it's a bug, but not one solved by
synhronization).

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/kernel/sched.c b/kernel/sched.c
index 31ccc56..f0663c8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -871,7 +871,6 @@
 #ifdef CONFIG_MULTITHREADING
 	u32_t expected_wakeup_time;
 	s32_t ticks;
-	unsigned int key;
 
 	__ASSERT(!_is_in_isr(), "");
 	__ASSERT(duration != K_FOREVER, "");
@@ -886,12 +885,18 @@
 
 	ticks = _TICK_ALIGN + _ms_to_ticks(duration);
 	expected_wakeup_time = ticks + z_tick_get_32();
-	key = irq_lock();
+
+	/* Spinlock purely for local interrupt locking to prevent us
+	 * from being interrupted while _current is in an intermediate
+	 * state.  Should unify this implementation with pend().
+	 */
+	struct k_spinlock local_lock = {};
+	k_spinlock_key_t key = k_spin_lock(&local_lock);
 
 	_remove_thread_from_ready_q(_current);
 	_add_thread_timeout(_current, ticks);
 
-	(void)_Swap_irqlock(key);
+	(void)_Swap(&local_lock, key);
 
 	ticks = expected_wakeup_time - z_tick_get_32();
 	if (ticks > 0) {
@@ -917,25 +922,18 @@
 
 void _impl_k_wakeup(k_tid_t thread)
 {
-	unsigned int key = irq_lock();
-
-	/* verify first if thread is not waiting on an object */
 	if (_is_thread_pending(thread)) {
-		irq_unlock(key);
 		return;
 	}
 
 	if (_abort_thread_timeout(thread) < 0) {
-		irq_unlock(key);
 		return;
 	}
 
 	_ready_thread(thread);
 
-	if (_is_in_isr()) {
-		irq_unlock(key);
-	} else {
-		_reschedule_irqlock(key);
+	if (!_is_in_isr()) {
+		_reschedule_unlocked();
 	}
 }