kernel: Cleanup, unify _add_thread_to_ready_q() and _ready_thread()

The scheduler exposed two APIs to do the same thing:
_add_thread_to_ready_q() was a low level primitive that in most cases
was wrapped by _ready_thread(), which also (1) checks that the thread
_is_ready() or exits, (2) flags the thread as "started" to handle the
case of a thread running for the first time out of a waitq timeout,
and (3) signals a logger event.

As it turns out, all existing usage was already checking case #1.
Case #2 can be better handled in the timeout resume path instead of on
every call.  And case #3 was probably wrong to have been skipping
anyway (there were paths that could make a thread runnable without
logging).

Now _add_thread_to_ready_q() is an internal scheduler API, as it
probably always should have been.

This also moves some asserts from the inline _ready_thread() wrapper
to the underlying true function for code size reasons, otherwise the
extra use of the inline added by this patch blows past code size
limits on Quark D2000.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index 7f76f52..8385ec0 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -418,21 +418,6 @@
  */
 static inline void _ready_thread(struct k_thread *thread)
 {
-	__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
-		 ((thread->base.prio == K_LOWEST_THREAD_PRIO) &&
-		  (thread == _idle_thread)),
-		 "thread %p prio too low (is %d, cannot be lower than %d)",
-		 thread, thread->base.prio,
-		 thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
-					  K_LOWEST_APPLICATION_THREAD_PRIO);
-
-	__ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO),
-		 "thread %p prio too high (id %d, cannot be higher than %d)",
-		 thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
-
-	/* needed to handle the start-with-delay case */
-	_mark_thread_as_started(thread);
-
 	if (_is_thread_ready(thread)) {
 		_add_thread_to_ready_q(thread);
 	}
diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h
index f612760..dc79bb3 100644
--- a/kernel/include/timeout_q.h
+++ b/kernel/include/timeout_q.h
@@ -90,6 +90,7 @@
 	K_DEBUG("timeout %p\n", timeout);
 	if (thread) {
 		_unpend_thread_timing_out(thread, timeout);
+		_mark_thread_as_started(thread);
 		_ready_thread(thread);
 		irq_unlock(key);
 	} else {
diff --git a/kernel/init.c b/kernel/init.c
index dd2ace9..4c63c48 100644
--- a/kernel/init.c
+++ b/kernel/init.c
@@ -274,7 +274,7 @@
 			  IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
 			  K_LOWEST_THREAD_PRIO, K_ESSENTIAL);
 	_mark_thread_as_started(thr);
-	_add_thread_to_ready_q(thr);
+	_ready_thread(thr);
 }
 #endif
 
@@ -352,7 +352,7 @@
 			  NULL, NULL, NULL,
 			  CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
 	_mark_thread_as_started(_main_thread);
-	_add_thread_to_ready_q(_main_thread);
+	_ready_thread(_main_thread);
 
 #ifdef CONFIG_MULTITHREADING
 	init_idle_thread(_idle_thread, _idle_stack);
diff --git a/kernel/poll.c b/kernel/poll.c
index 04e5fb0..66e5940 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -299,7 +299,7 @@
 		goto ready_event;
 	}
 
-	_add_thread_to_ready_q(thread);
+	_ready_thread(thread);
 	*must_reschedule = !_is_in_isr() && _must_switch_threads();
 
 ready_event:
diff --git a/kernel/sched.c b/kernel/sched.c
index a1f4010..4a3248d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -70,6 +70,18 @@
 
 void _add_thread_to_ready_q(struct k_thread *thread)
 {
+	__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
+		 ((thread->base.prio == K_LOWEST_THREAD_PRIO) &&
+		  (thread == _idle_thread)),
+		 "thread %p prio too low (is %d, cannot be lower than %d)",
+		 thread, thread->base.prio,
+		 thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
+					  K_LOWEST_APPLICATION_THREAD_PRIO);
+
+	__ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO),
+		 "thread %p prio too high (id %d, cannot be higher than %d)",
+		 thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
+
 #ifdef CONFIG_MULTITHREADING
 	int q_index = _get_ready_q_q_index(thread->base.prio);
 	sys_dlist_t *q = &_ready_q.q[q_index];
diff --git a/kernel/thread.c b/kernel/thread.c
index c24eb45..5516e6d 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -227,16 +227,8 @@
 	}
 
 	_mark_thread_as_started(thread);
-
-	if (_is_thread_ready(thread)) {
-		_add_thread_to_ready_q(thread);
-		if (_must_switch_threads()) {
-			_Swap(key);
-			return;
-		}
-	}
-
-	irq_unlock(key);
+	_ready_thread(thread);
+	_reschedule_threads(key);
 }
 
 #ifdef CONFIG_USERSPACE
@@ -462,10 +454,7 @@
 void _k_thread_single_resume(struct k_thread *thread)
 {
 	_mark_thread_as_not_suspended(thread);
-
-	if (_is_thread_ready(thread)) {
-		_add_thread_to_ready_q(thread);
-	}
+	_ready_thread(thread);
 }
 
 void _impl_k_thread_resume(struct k_thread *thread)