kernel: Refactor CPU usage

Refactors CPU usage (thread runtime stats) to make it easier to
integrate with the object core statistics framework.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h
index 7289866..4ead724 100644
--- a/include/zephyr/kernel_structs.h
+++ b/include/zephyr/kernel_structs.h
@@ -141,7 +141,7 @@
 	uint32_t usage0;
 
 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
-	struct k_cycle_stats usage;
+	struct k_cycle_stats *usage;
 #endif
 #endif
 
@@ -183,6 +183,9 @@
 #if defined(CONFIG_THREAD_MONITOR)
 	struct k_thread *threads; /* singly linked list of ALL threads */
 #endif
+#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
+	struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
+#endif
 
 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
 	/* Need to signal an IPI at the next scheduling point */
diff --git a/kernel/init.c b/kernel/init.c
index 3d5991f..dccbad6 100644
--- a/kernel/init.c
+++ b/kernel/init.c
@@ -399,7 +399,8 @@
 		(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
 		 K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id]));
 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
-	_kernel.cpus[id].usage.track_usage =
+	_kernel.cpus[id].usage = &_kernel.usage[id];
+	_kernel.cpus[id].usage->track_usage =
 		CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
 #endif
 
diff --git a/kernel/usage.c b/kernel/usage.c
index a44c755..57a55a7 100644
--- a/kernel/usage.c
+++ b/kernel/usage.c
@@ -35,22 +35,22 @@
 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
 static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
 {
-	if (!cpu->usage.track_usage) {
+	if (!cpu->usage->track_usage) {
 		return;
 	}
 
 	if (cpu->current != cpu->idle_thread) {
-		cpu->usage.total += cycles;
+		cpu->usage->total += cycles;
 
 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
-		cpu->usage.current += cycles;
+		cpu->usage->current += cycles;
 
-		if (cpu->usage.longest < cpu->usage.current) {
-			cpu->usage.longest = cpu->usage.current;
+		if (cpu->usage->longest < cpu->usage->current) {
+			cpu->usage->longest = cpu->usage->current;
 		}
 	} else {
-		cpu->usage.current = 0;
-		cpu->usage.num_windows++;
+		cpu->usage->current = 0;
+		cpu->usage->num_windows++;
 #endif
 	}
 }
@@ -148,16 +148,16 @@
 		cpu->usage0 = now;
 	}
 
-	stats->total_cycles     = cpu->usage.total;
+	stats->total_cycles     = cpu->usage->total;
 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
-	stats->current_cycles   = cpu->usage.current;
-	stats->peak_cycles      = cpu->usage.longest;
+	stats->current_cycles   = cpu->usage->current;
+	stats->peak_cycles      = cpu->usage->longest;
 
-	if (cpu->usage.num_windows == 0) {
+	if (cpu->usage->num_windows == 0) {
 		stats->average_cycles = 0;
 	} else {
 		stats->average_cycles = stats->total_cycles /
-					cpu->usage.num_windows;
+					cpu->usage->num_windows;
 	}
 #endif
 
@@ -282,7 +282,7 @@
 
 	key = k_spin_lock(&usage_lock);
 
-	if (_current_cpu->usage.track_usage) {
+	if (_current_cpu->usage->track_usage) {
 
 		/*
 		 * Usage tracking is already enabled on the current CPU
@@ -299,10 +299,10 @@
 	unsigned int num_cpus = arch_num_cpus();
 
 	for (uint8_t i = 0; i < num_cpus; i++) {
-		_kernel.cpus[i].usage.track_usage = true;
+		_kernel.cpus[i].usage->track_usage = true;
 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
-		_kernel.cpus[i].usage.num_windows++;
-		_kernel.cpus[i].usage.current = 0;
+		_kernel.cpus[i].usage->num_windows++;
+		_kernel.cpus[i].usage->current = 0;
 #endif
 	}
 
@@ -316,7 +316,7 @@
 
 	key = k_spin_lock(&usage_lock);
 
-	if (!_current_cpu->usage.track_usage) {
+	if (!_current_cpu->usage->track_usage) {
 
 		/*
 		 * Usage tracking is already disabled on the current CPU
@@ -337,7 +337,7 @@
 		if (cpu->usage0 != 0) {
 			sched_cpu_update_usage(cpu, now - cpu->usage0);
 		}
-		cpu->usage.track_usage = false;
+		cpu->usage->track_usage = false;
 	}
 
 	k_spin_unlock(&usage_lock, key);