kernel: extend CPU runtime stats
Extends the CPU usage runtime stats to track current, total, peak
and average usage (as bounded by the scheduling of the idle thread).
This permits a developer to obtain more system information if desired
to tune the system.
Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
diff --git a/kernel/usage.c b/kernel/usage.c
index f1d6e81..ee0ad86 100644
--- a/kernel/usage.c
+++ b/kernel/usage.c
@@ -34,13 +34,20 @@
/**
* Update the usage statistics for the specified CPU and thread
*/
-static void sched_update_usage(struct k_thread *thread, uint32_t cycles)
+static void sched_update_usage(struct _cpu *cpu, struct k_thread *thread,
+ uint32_t cycles)
{
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
- if (z_is_idle_thread_object(thread)) {
- _kernel.idle_thread_usage += cycles;
- } else {
- _kernel.all_thread_usage += cycles;
+ if (!z_is_idle_thread_object(thread)) {
+#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
+ cpu->usage.current += cycles;
+
+ if (cpu->usage.longest < cpu->usage.current) {
+ cpu->usage.longest = cpu->usage.current;
+ }
+#endif
+
+ cpu->usage.total += cycles;
}
#endif
@@ -87,13 +94,59 @@
if (u0 != 0) {
uint32_t dt = usage_now() - u0;
- sched_update_usage(cpu->current, dt);
+ sched_update_usage(cpu, cpu->current, dt);
}
cpu->usage0 = 0;
k_spin_unlock(&usage_lock, k);
}
+#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
+void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats)
+{
+ k_spinlock_key_t key;
+ struct _cpu *cpu;
+ uint32_t now;
+ uint32_t u0;
+
+ cpu = _current_cpu;
+ key = k_spin_lock(&usage_lock);
+
+ u0 = cpu->usage0;
+ now = usage_now();
+
+ if ((u0 != 0) && (&_kernel.cpus[core_id] == cpu)) {
+ uint32_t dt = now - u0;
+
+ /* It is safe to update the CPU's usage stats */
+
+ sched_update_usage(cpu, cpu->current, dt);
+
+ cpu->usage0 = now;
+ }
+
+ stats->total_cycles = cpu->usage.total;
+#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
+ stats->current_cycles = cpu->usage.current;
+ stats->peak_cycles = cpu->usage.longest;
+
+ if (cpu->usage.num_windows == 0) {
+ stats->average_cycles = 0;
+ } else {
+ stats->average_cycles = stats->total_cycles /
+ cpu->usage.num_windows;
+ }
+#endif
+
+ stats->idle_cycles =
+ _kernel.cpus[core_id].idle_thread->base.usage.total;
+
+ stats->execution_cycles = stats->total_cycles + stats->idle_cycles;
+
+ k_spin_unlock(&usage_lock, key);
+}
+#endif
+
void z_sched_thread_usage(struct k_thread *thread,
struct k_thread_runtime_stats *stats)
{
@@ -116,19 +169,19 @@
* running on the current core.
*/
- sched_update_usage(thread, dt);
+ sched_update_usage(cpu, thread, dt);
cpu->usage0 = now;
}
stats->execution_cycles = thread->base.usage.total;
+ stats->total_cycles = thread->base.usage.total;
/* Copy-out the thread's usage stats */
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current_cycles = thread->base.usage.current;
stats->peak_cycles = thread->base.usage.longest;
- stats->total_cycles = thread->base.usage.total;
if (thread->base.usage.num_windows == 0) {
stats->average_cycles = 0;
@@ -136,7 +189,9 @@
stats->average_cycles = stats->total_cycles /
thread->base.usage.num_windows;
}
+#endif
+#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
stats->idle_cycles = 0;
#endif
stats->execution_cycles = thread->base.usage.total;