kernel: extend CPU runtime stats Extends the CPU usage runtime stats to track current, total, peak and average usage (as bounded by the scheduling of the idle thread). This permits a developer to obtain more system information if desired to tune the system. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 71faef7..0ad7290 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h
@@ -386,6 +386,11 @@ void z_sched_usage_start(struct k_thread *thread); /** + * @brief Retrieves CPU cycle usage data for specified core + */ +void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats); + +/** * @brief Retrieves thread cycle usage data for specified thread */ void z_sched_thread_usage(struct k_thread *thread,
diff --git a/kernel/thread.c b/kernel/thread.c index 526eb60..280c532 100644 --- a/kernel/thread.c +++ b/kernel/thread.c
@@ -1037,6 +1037,10 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats) { +#ifdef CONFIG_SCHED_THREAD_USAGE_ALL + k_thread_runtime_stats_t tmp_stats; +#endif + if (stats == NULL) { return -EINVAL; } @@ -1044,8 +1048,19 @@ *stats = (k_thread_runtime_stats_t) {}; #ifdef CONFIG_SCHED_THREAD_USAGE_ALL - stats->execution_cycles = (_kernel.all_thread_usage - + _kernel.idle_thread_usage); + /* Retrieve the usage stats for each core and amalgamate them. */ + + for (uint8_t i = 0; i < CONFIG_MP_NUM_CPUS; i++) { + z_sched_cpu_usage(i, &tmp_stats); + + stats->execution_cycles += tmp_stats.execution_cycles; + stats->total_cycles += tmp_stats.total_cycles; +#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS + stats->peak_cycles += tmp_stats.peak_cycles; + stats->average_cycles += tmp_stats.average_cycles; +#endif + stats->idle_cycles += tmp_stats.idle_cycles; + } #endif return 0;
diff --git a/kernel/usage.c b/kernel/usage.c index f1d6e81..ee0ad86 100644 --- a/kernel/usage.c +++ b/kernel/usage.c
@@ -34,13 +34,20 @@ /** * Update the usage statistics for the specified CPU and thread */ -static void sched_update_usage(struct k_thread *thread, uint32_t cycles) +static void sched_update_usage(struct _cpu *cpu, struct k_thread *thread, + uint32_t cycles) { #ifdef CONFIG_SCHED_THREAD_USAGE_ALL - if (z_is_idle_thread_object(thread)) { - _kernel.idle_thread_usage += cycles; - } else { - _kernel.all_thread_usage += cycles; + if (!z_is_idle_thread_object(thread)) { +#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS + cpu->usage.current += cycles; + + if (cpu->usage.longest < cpu->usage.current) { + cpu->usage.longest = cpu->usage.current; + } +#endif + + cpu->usage.total += cycles; } #endif @@ -87,13 +94,59 @@ if (u0 != 0) { uint32_t dt = usage_now() - u0; - sched_update_usage(cpu->current, dt); + sched_update_usage(cpu, cpu->current, dt); } cpu->usage0 = 0; k_spin_unlock(&usage_lock, k); } +#ifdef CONFIG_SCHED_THREAD_USAGE_ALL +void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats) +{ + k_spinlock_key_t key; + struct _cpu *cpu; + uint32_t now; + uint32_t u0; + + cpu = _current_cpu; + key = k_spin_lock(&usage_lock); + + u0 = cpu->usage0; + now = usage_now(); + + if ((u0 != 0) && (&_kernel.cpus[core_id] == cpu)) { + uint32_t dt = now - u0; + + /* It is safe to update the CPU's usage stats */ + + sched_update_usage(cpu, cpu->current, dt); + + cpu->usage0 = now; + } + + stats->total_cycles = cpu->usage.total; +#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS + stats->current_cycles = cpu->usage.current; + stats->peak_cycles = cpu->usage.longest; + + if (cpu->usage.num_windows == 0) { + stats->average_cycles = 0; + } else { + stats->average_cycles = stats->total_cycles / + cpu->usage.num_windows; + } +#endif + + stats->idle_cycles = + _kernel.cpus[core_id].idle_thread->base.usage.total; + + stats->execution_cycles = stats->total_cycles + stats->idle_cycles; + + k_spin_unlock(&usage_lock, key); +} +#endif + void z_sched_thread_usage(struct k_thread *thread, struct k_thread_runtime_stats *stats) { @@ -116,19 +169,19 @@ * running on the current core. */ - sched_update_usage(thread, dt); + sched_update_usage(cpu, thread, dt); cpu->usage0 = now; } stats->execution_cycles = thread->base.usage.total; + stats->total_cycles = thread->base.usage.total; /* Copy-out the thread's usage stats */ #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS stats->current_cycles = thread->base.usage.current; stats->peak_cycles = thread->base.usage.longest; - stats->total_cycles = thread->base.usage.total; if (thread->base.usage.num_windows == 0) { stats->average_cycles = 0; @@ -136,7 +189,9 @@ stats->average_cycles = stats->total_cycles / thread->base.usage.num_windows; } +#endif +#ifdef CONFIG_SCHED_THREAD_USAGE_ALL stats->idle_cycles = 0; #endif stats->execution_cycles = thread->base.usage.total;