unified: Ensure delays do not time out prematurely

Ensures that all APIs which accept a timeout value wait for at least
the specified amount of time, and do not time out prematurely.

* The kernel now waits for the next system clock tick to occur before
  the timeout interval is considered to have started. (That is, the only
  way to ensure a delay of N tick intervals is to wait for N+1 ticks
  to occur.)

* Gets rid of ticks -> milliseconds -> ticks conversion in task_sleep()
  and fiber_sleep() legacy APIs, since this introduces rounding that
  -- coupled with the previous change -- can alter the number of ticks
  being requested during the sleep operation.

* Corrects work queue API that was incorrectly shown to use a delay
  measured in ticks, rather than milliseconds.

Change-Id: I8b04467237b24fb0364c8f344d872457418c18da
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
diff --git a/include/kernel.h b/include/kernel.h
index 2cc3cfe..bb88526 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -273,6 +273,9 @@
 
 /* private internal time manipulation (users should never play with ticks) */
 
+/* added tick needed to account for tick in progress */
+#define _TICK_ALIGN 1
+
 static int64_t __ticks_to_ms(int64_t ticks)
 {
 #if CONFIG_SYS_CLOCK_EXISTS
@@ -694,15 +697,15 @@
  * mutual exclusion mechanism. Such usage is not recommended and if necessary,
  * it should be explicitly done between the submitter and the handler.
  *
- * @param work_q to schedule the work item
+ * @param work_q Workqueue to schedule the work item
  * @param work Delayed work item
- * @param ticks Ticks to wait before scheduling the work item
+ * @param delay Delay before scheduling the work item (in milliseconds)
  *
  * @return 0 in case of success or negative value in case of error.
  */
 extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
 					  struct k_delayed_work *work,
-					  int32_t ticks);
+					  int32_t delay);
 
 /**
  * @brief Cancel a delayed work item
@@ -749,9 +752,9 @@
  * unexpected behavior.
  */
 static inline int k_delayed_work_submit(struct k_delayed_work *work,
-					   int ticks)
+					   int32_t delay)
 {
-	return k_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks);
+	return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay);
 }
 
 #endif /* CONFIG_SYS_CLOCK_EXISTS */
diff --git a/include/legacy.h b/include/legacy.h
index 2d1d482..4ae1469 100644
--- a/include/legacy.h
+++ b/include/legacy.h
@@ -138,17 +138,16 @@
 
 #define fiber_yield k_yield
 #define fiber_abort() k_thread_abort(k_current_get())
-static inline void fiber_sleep(int32_t timeout)
-{
-	k_sleep(_ticks_to_ms(timeout));
-}
+
+extern void _legacy_sleep(int32_t ticks);
+#define fiber_sleep _legacy_sleep
+#define task_sleep _legacy_sleep
 
 #define fiber_wakeup k_wakeup
 #define isr_fiber_wakeup k_wakeup
 #define fiber_fiber_wakeup k_wakeup
 #define task_fiber_wakeup k_wakeup
 
-#define task_sleep fiber_sleep
 #define task_yield k_yield
 #define task_priority_set(task, prio) k_thread_priority_set(task, (int)prio)
 #define task_entry_set(task, entry) \
diff --git a/kernel/unified/legacy/timer_legacy.c b/kernel/unified/legacy/timer_legacy.c
index b70edfd..1e61f93 100644
--- a/kernel/unified/legacy/timer_legacy.c
+++ b/kernel/unified/legacy/timer_legacy.c
@@ -17,8 +17,29 @@
 #include <kernel.h>
 #include <init.h>
 #include <ksched.h>
+#include <wait_q.h>
+#include <misc/__assert.h>
 #include <misc/util.h>
 
+void _legacy_sleep(int32_t ticks)
+{
+	__ASSERT(!_is_in_isr(), "");
+	__ASSERT(ticks != TICKS_UNLIMITED, "");
+
+	if (ticks <= 0) {
+		k_yield();
+		return;
+	}
+
+	int key = irq_lock();
+
+	_mark_thread_as_timing(_current);
+	_remove_thread_from_ready_q(_current);
+	_add_thread_timeout(_current, NULL, ticks);
+
+	_Swap(key);
+}
+
 #if (CONFIG_NUM_DYNAMIC_TIMERS > 0)
 
 static struct k_timer dynamic_timers[CONFIG_NUM_DYNAMIC_TIMERS];
diff --git a/kernel/unified/sched.c b/kernel/unified/sched.c
index a03b138..44c5a2b 100644
--- a/kernel/unified/sched.c
+++ b/kernel/unified/sched.c
@@ -152,7 +152,8 @@
 
 	if (timeout != K_FOREVER) {
 		_mark_thread_as_timing(thread);
-		_add_thread_timeout(thread, wait_q, _ms_to_ticks(timeout));
+		_add_thread_timeout(thread, wait_q,
+					_TICK_ALIGN + _ms_to_ticks(timeout));
 	}
 }
 
@@ -296,7 +297,8 @@
 
 	_mark_thread_as_timing(_current);
 	_remove_thread_from_ready_q(_current);
-	_add_thread_timeout(_current, NULL, _ms_to_ticks(duration));
+	_add_thread_timeout(_current, NULL,
+				_TICK_ALIGN + _ms_to_ticks(duration));
 
 	_Swap(key);
 }
diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c
index 06e247a..ed2c7a2 100644
--- a/kernel/unified/thread.c
+++ b/kernel/unified/thread.c
@@ -271,7 +271,8 @@
 		start_thread(thread);
 	} else {
 		_mark_thread_as_timing(thread);
-		_add_thread_timeout(thread, NULL, _ms_to_ticks(delay));
+		_add_thread_timeout(thread, NULL,
+					_TICK_ALIGN + _ms_to_ticks(delay));
 	}
 #else
 	ARG_UNUSED(delay);
diff --git a/kernel/unified/timer.c b/kernel/unified/timer.c
index 7766901..4485311 100644
--- a/kernel/unified/timer.c
+++ b/kernel/unified/timer.c
@@ -31,7 +31,10 @@
 	struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
 	struct k_thread *pending_thread;
 
-	/* if the time is periodic, start it again */
+	/*
+	 * if the timer is periodic, start it again; don't add _TICK_ALIGN
+	 * since we're already aligned to a tick boundary
+	 */
 	if (timer->period > 0) {
 		_add_timeout(NULL, &timer->timeout, &timer->wait_q,
 				timer->period);
@@ -87,7 +90,7 @@
 
 	timer->period = _ms_to_ticks(period);
 	_add_timeout(NULL, &timer->timeout, &timer->wait_q,
-			_ms_to_ticks(duration));
+			_TICK_ALIGN + _ms_to_ticks(duration));
 	timer->status = 0;
 	irq_unlock(key);
 }
diff --git a/kernel/unified/work_q.c b/kernel/unified/work_q.c
index 6bd3c3e..4c3a2be 100644
--- a/kernel/unified/work_q.c
+++ b/kernel/unified/work_q.c
@@ -81,7 +81,7 @@
 
 int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
 				   struct k_delayed_work *work,
-				   int32_t timeout)
+				   int32_t delay)
 {
 	int key = irq_lock();
 	int err;
@@ -103,12 +103,13 @@
 	/* Attach workqueue so the timeout callback can submit it */
 	work->work_q = work_q;
 
-	if (!timeout) {
+	if (!delay) {
 		/* Submit work if no ticks is 0 */
 		k_work_submit_to_queue(work_q, &work->work);
 	} else {
 		/* Add timeout */
-		_add_timeout(NULL, &work->timeout, NULL, _ms_to_ticks(timeout));
+		_add_timeout(NULL, &work->timeout, NULL,
+				_TICK_ALIGN + _ms_to_ticks(delay));
 	}
 
 	err = 0;