kernel/mutex: Fix locking to be SMP-safe

The mutex locking was written to use k_sched_lock(), which doesn't
work as a synchronization primitive if there is another CPU running
(it prevents the current CPU from preempting the thread, it says
nothing about what the others are doing).

Use the pre-existing spinlock for all synchronization.  One wrinkle is
that the priority code was needing to call z_thread_priority_set(),
which is a rescheduling call that cannot be called with a lock held.
So that got split out with a low level utility that can update the
schedule state but allow the caller to defer yielding until later.

Fixes #17584

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 724c58e..5590e8f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -99,7 +99,7 @@
 	return new_prio;
 }
 
-static void adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
+static bool adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
 {
 	if (mutex->owner->base.prio != new_prio) {
 
@@ -108,17 +108,19 @@
 			'y' : 'n',
 			new_prio, mutex->owner->base.prio);
 
-		z_thread_priority_set(mutex->owner, new_prio);
+		return z_set_prio(mutex->owner, new_prio);
 	}
+	return false;
 }
 
 int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
 {
 	int new_prio;
 	k_spinlock_key_t key;
+	bool resched = false;
 
 	sys_trace_void(SYS_TRACE_ID_MUTEX_LOCK);
-	z_sched_lock();
+	key = k_spin_lock(&lock);
 
 	if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
 
@@ -133,14 +135,14 @@
 			_current, mutex, mutex->lock_count,
 			mutex->owner_orig_prio);
 
-		k_sched_unlock();
+		k_spin_unlock(&lock, key);
 		sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
 
 		return 0;
 	}
 
 	if (unlikely(timeout == (s32_t)K_NO_WAIT)) {
-		k_sched_unlock();
+		k_spin_unlock(&lock, key);
 		sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
 		return -EBUSY;
 	}
@@ -148,12 +150,10 @@
 	new_prio = new_prio_for_inheritance(_current->base.prio,
 					    mutex->owner->base.prio);
 
-	key = k_spin_lock(&lock);
-
 	K_DEBUG("adjusting prio up on mutex %p\n", mutex);
 
 	if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
-		adjust_owner_prio(mutex, new_prio);
+		resched = adjust_owner_prio(mutex, new_prio);
 	}
 
 	int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);
@@ -164,7 +164,6 @@
 		got_mutex ? 'y' : 'n');
 
 	if (got_mutex == 0) {
-		k_sched_unlock();
 		sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
 		return 0;
 	}
@@ -173,6 +172,8 @@
 
 	K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
 
+	key = k_spin_lock(&lock);
+
 	struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
 
 	new_prio = mutex->owner_orig_prio;
@@ -182,11 +183,13 @@
 
 	K_DEBUG("adjusting prio down on mutex %p\n", mutex);
 
-	key = k_spin_lock(&lock);
-	adjust_owner_prio(mutex, new_prio);
-	k_spin_unlock(&lock, key);
+	resched = adjust_owner_prio(mutex, new_prio) || resched;
 
-	k_sched_unlock();
+	if (resched) {
+		z_reschedule(&lock, key);
+	} else {
+		k_spin_unlock(&lock, key);
+	}
 
 	sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
 	return -EAGAIN;