blob: ad40e3ffd48a09330dfb1aca887caafe278781b2 [file] [log] [blame]
Peter Mitsis82c3d532021-12-13 15:38:25 -05001/*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
Peter Mitsis82c3d532021-12-13 15:38:25 -05008
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02009#include <zephyr/timing/timing.h>
Peter Mitsis82c3d532021-12-13 15:38:25 -050010#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020011#include <zephyr/spinlock.h>
12#include <zephyr/sys/check.h>
Peter Mitsis82c3d532021-12-13 15:38:25 -050013
14/* Need one of these for this to work */
15#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
16#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
Simon Heinbcd1d192024-03-08 12:00:10 +010017#endif /* !CONFIG_USE_SWITCH && !CONFIG_INSTRUMENT_THREAD_SWITCHING */
Peter Mitsis82c3d532021-12-13 15:38:25 -050018
19static struct k_spinlock usage_lock;
20
21static uint32_t usage_now(void)
22{
23 uint32_t now;
24
25#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
26 now = (uint32_t)timing_counter_get();
27#else
28 now = k_cycle_get_32();
Simon Heinbcd1d192024-03-08 12:00:10 +010029#endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
Peter Mitsis82c3d532021-12-13 15:38:25 -050030
31 /* Edge case: we use a zero as a null ("stop() already called") */
32 return (now == 0) ? 1 : now;
33}
34
Peter Mitsis572f1db2021-12-14 21:31:10 -050035#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
Peter Mitsis0ebd6c72021-12-15 09:46:52 -050036static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
37{
Peter Mitsis9bedfd82023-05-23 18:36:04 -040038 if (!cpu->usage->track_usage) {
Peter Mitsis0ebd6c72021-12-15 09:46:52 -050039 return;
40 }
41
Peter Mitsis11f8f662022-01-18 10:17:14 -050042 if (cpu->current != cpu->idle_thread) {
Peter Mitsis9bedfd82023-05-23 18:36:04 -040043 cpu->usage->total += cycles;
Peter Mitsis976e4082022-05-03 11:39:39 -040044
45#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
Peter Mitsis9bedfd82023-05-23 18:36:04 -040046 cpu->usage->current += cycles;
Peter Mitsis976e4082022-05-03 11:39:39 -040047
Peter Mitsis9bedfd82023-05-23 18:36:04 -040048 if (cpu->usage->longest < cpu->usage->current) {
49 cpu->usage->longest = cpu->usage->current;
Peter Mitsis976e4082022-05-03 11:39:39 -040050 }
51 } else {
Peter Mitsis9bedfd82023-05-23 18:36:04 -040052 cpu->usage->current = 0;
53 cpu->usage->num_windows++;
Simon Heinbcd1d192024-03-08 12:00:10 +010054#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis11f8f662022-01-18 10:17:14 -050055 }
Peter Mitsis0ebd6c72021-12-15 09:46:52 -050056}
57#else
58#define sched_cpu_update_usage(cpu, cycles) do { } while (0)
Simon Heinbcd1d192024-03-08 12:00:10 +010059#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
Peter Mitsis0ebd6c72021-12-15 09:46:52 -050060
61static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
62{
Peter Mitsis572f1db2021-12-14 21:31:10 -050063 thread->base.usage.total += cycles;
64
65#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
66 thread->base.usage.current += cycles;
67
68 if (thread->base.usage.longest < thread->base.usage.current) {
69 thread->base.usage.longest = thread->base.usage.current;
70 }
Simon Heinbcd1d192024-03-08 12:00:10 +010071#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis572f1db2021-12-14 21:31:10 -050072}
73
Peter Mitsis82c3d532021-12-13 15:38:25 -050074void z_sched_usage_start(struct k_thread *thread)
75{
Peter Mitsis572f1db2021-12-14 21:31:10 -050076#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
77 k_spinlock_key_t key;
78
79 key = k_spin_lock(&usage_lock);
80
Peter Mitsis0ebd6c72021-12-15 09:46:52 -050081 _current_cpu->usage0 = usage_now(); /* Always update */
Peter Mitsis572f1db2021-12-14 21:31:10 -050082
Peter Mitsis0ebd6c72021-12-15 09:46:52 -050083 if (thread->base.usage.track_usage) {
84 thread->base.usage.num_windows++;
85 thread->base.usage.current = 0;
86 }
Peter Mitsis572f1db2021-12-14 21:31:10 -050087
88 k_spin_unlock(&usage_lock, key);
89#else
Peter Mitsis82c3d532021-12-13 15:38:25 -050090 /* One write through a volatile pointer doesn't require
91 * synchronization as long as _usage() treats it as volatile
92 * (we can't race with _stop() by design).
93 */
94
95 _current_cpu->usage0 = usage_now();
Simon Heinbcd1d192024-03-08 12:00:10 +010096#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis82c3d532021-12-13 15:38:25 -050097}
98
99void z_sched_usage_stop(void)
100{
Peter Mitsis572f1db2021-12-14 21:31:10 -0500101 k_spinlock_key_t k = k_spin_lock(&usage_lock);
Tom Burdick9c0bf4b2022-09-22 11:32:31 -0500102
103 struct _cpu *cpu = _current_cpu;
104
Peter Mitsis572f1db2021-12-14 21:31:10 -0500105 uint32_t u0 = cpu->usage0;
Peter Mitsis82c3d532021-12-13 15:38:25 -0500106
107 if (u0 != 0) {
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500108 uint32_t cycles = usage_now() - u0;
Peter Mitsis82c3d532021-12-13 15:38:25 -0500109
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500110 if (cpu->current->base.usage.track_usage) {
111 sched_thread_update_usage(cpu->current, cycles);
112 }
113
114 sched_cpu_update_usage(cpu, cycles);
Peter Mitsis82c3d532021-12-13 15:38:25 -0500115 }
116
Peter Mitsis572f1db2021-12-14 21:31:10 -0500117 cpu->usage0 = 0;
Peter Mitsis82c3d532021-12-13 15:38:25 -0500118 k_spin_unlock(&usage_lock, k);
119}
120
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500121#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500122void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500123{
124 k_spinlock_key_t key;
125 struct _cpu *cpu;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500126
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500127 key = k_spin_lock(&usage_lock);
Tom Burdick9c0bf4b2022-09-22 11:32:31 -0500128 cpu = _current_cpu;
129
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500130
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500131 if (&_kernel.cpus[cpu_id] == cpu) {
132 uint32_t now = usage_now();
133 uint32_t cycles = now - cpu->usage0;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500134
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500135 /*
136 * Getting stats for the current CPU. Update both its
137 * current thread stats and the CPU stats as the CPU's
138 * [usage0] field will also get updated. This keeps all
139 * that information up-to-date.
140 */
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500141
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500142 if (cpu->current->base.usage.track_usage) {
143 sched_thread_update_usage(cpu->current, cycles);
144 }
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500145
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500146 sched_cpu_update_usage(cpu, cycles);
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500147
148 cpu->usage0 = now;
149 }
150
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400151 stats->total_cycles = cpu->usage->total;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500152#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400153 stats->current_cycles = cpu->usage->current;
154 stats->peak_cycles = cpu->usage->longest;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500155
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400156 if (cpu->usage->num_windows == 0) {
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500157 stats->average_cycles = 0;
158 } else {
159 stats->average_cycles = stats->total_cycles /
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400160 cpu->usage->num_windows;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500161 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100162#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500163
164 stats->idle_cycles =
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500165 _kernel.cpus[cpu_id].idle_thread->base.usage.total;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500166
167 stats->execution_cycles = stats->total_cycles + stats->idle_cycles;
168
169 k_spin_unlock(&usage_lock, key);
170}
Simon Heinbcd1d192024-03-08 12:00:10 +0100171#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500172
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500173void z_sched_thread_usage(struct k_thread *thread,
174 struct k_thread_runtime_stats *stats)
Peter Mitsis82c3d532021-12-13 15:38:25 -0500175{
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500176 struct _cpu *cpu;
177 k_spinlock_key_t key;
Peter Mitsis82c3d532021-12-13 15:38:25 -0500178
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500179 key = k_spin_lock(&usage_lock);
Tom Burdick9c0bf4b2022-09-22 11:32:31 -0500180 cpu = _current_cpu;
181
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500182
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500183 if (thread == cpu->current) {
184 uint32_t now = usage_now();
185 uint32_t cycles = now - cpu->usage0;
Peter Mitsis82c3d532021-12-13 15:38:25 -0500186
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500187 /*
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500188 * Getting stats for the current thread. Update both the
189 * current thread stats and its CPU stats as the CPU's
190 * [usage0] field will also get updated. This keeps all
191 * that information up-to-date.
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500192 */
193
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500194 if (thread->base.usage.track_usage) {
195 sched_thread_update_usage(thread, cycles);
196 }
197
198 sched_cpu_update_usage(cpu, cycles);
Peter Mitsis82c3d532021-12-13 15:38:25 -0500199
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500200 cpu->usage0 = now;
Peter Mitsis82c3d532021-12-13 15:38:25 -0500201 }
202
Peter Mitsis572f1db2021-12-14 21:31:10 -0500203 stats->execution_cycles = thread->base.usage.total;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500204 stats->total_cycles = thread->base.usage.total;
Peter Mitsis572f1db2021-12-14 21:31:10 -0500205
206 /* Copy-out the thread's usage stats */
207
208#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
209 stats->current_cycles = thread->base.usage.current;
210 stats->peak_cycles = thread->base.usage.longest;
Peter Mitsis572f1db2021-12-14 21:31:10 -0500211
212 if (thread->base.usage.num_windows == 0) {
213 stats->average_cycles = 0;
214 } else {
215 stats->average_cycles = stats->total_cycles /
216 thread->base.usage.num_windows;
217 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100218#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis572f1db2021-12-14 21:31:10 -0500219
Peter Mitsis4eb1dd02021-12-14 22:26:22 -0500220#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
Peter Mitsis572f1db2021-12-14 21:31:10 -0500221 stats->idle_cycles = 0;
Simon Heinbcd1d192024-03-08 12:00:10 +0100222#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
Peter Mitsis5deaffb2021-12-14 10:56:14 -0500223
224 k_spin_unlock(&usage_lock, key);
Peter Mitsis82c3d532021-12-13 15:38:25 -0500225}
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500226
227#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
228int k_thread_runtime_stats_enable(k_tid_t thread)
229{
230 k_spinlock_key_t key;
231
232 CHECKIF(thread == NULL) {
233 return -EINVAL;
234 }
235
236 key = k_spin_lock(&usage_lock);
237
238 if (!thread->base.usage.track_usage) {
239 thread->base.usage.track_usage = true;
240 thread->base.usage.num_windows++;
241 thread->base.usage.current = 0;
242 }
243
244 k_spin_unlock(&usage_lock, key);
245
246 return 0;
247}
248
249int k_thread_runtime_stats_disable(k_tid_t thread)
250{
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500251 k_spinlock_key_t key;
252
253 CHECKIF(thread == NULL) {
254 return -EINVAL;
255 }
256
257 key = k_spin_lock(&usage_lock);
Tom Burdick9c0bf4b2022-09-22 11:32:31 -0500258 struct _cpu *cpu = _current_cpu;
259
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500260 if (thread->base.usage.track_usage) {
261 thread->base.usage.track_usage = false;
262
263 if (thread == cpu->current) {
264 uint32_t cycles = usage_now() - cpu->usage0;
265
266 sched_thread_update_usage(thread, cycles);
267 sched_cpu_update_usage(cpu, cycles);
268 }
269 }
270
271 k_spin_unlock(&usage_lock, key);
272
273 return 0;
274}
Simon Heinbcd1d192024-03-08 12:00:10 +0100275#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500276
277#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
278void k_sys_runtime_stats_enable(void)
279{
280 k_spinlock_key_t key;
281
282 key = k_spin_lock(&usage_lock);
283
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400284 if (_current_cpu->usage->track_usage) {
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500285
286 /*
287 * Usage tracking is already enabled on the current CPU
288 * and thus on all other CPUs (if applicable). There is
289 * nothing left to do.
290 */
291
292 k_spin_unlock(&usage_lock, key);
293 return;
294 }
295
296 /* Enable gathering of runtime stats on each CPU */
297
Kumar Galaa1195ae2022-10-18 09:45:13 -0500298 unsigned int num_cpus = arch_num_cpus();
299
300 for (uint8_t i = 0; i < num_cpus; i++) {
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400301 _kernel.cpus[i].usage->track_usage = true;
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500302#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400303 _kernel.cpus[i].usage->num_windows++;
304 _kernel.cpus[i].usage->current = 0;
Simon Heinbcd1d192024-03-08 12:00:10 +0100305#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500306 }
307
308 k_spin_unlock(&usage_lock, key);
309}
310
311void k_sys_runtime_stats_disable(void)
312{
313 struct _cpu *cpu;
314 k_spinlock_key_t key;
315
316 key = k_spin_lock(&usage_lock);
317
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400318 if (!_current_cpu->usage->track_usage) {
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500319
320 /*
321 * Usage tracking is already disabled on the current CPU
322 * and thus on all other CPUs (if applicable). There is
323 * nothing left to do.
324 */
325
326 k_spin_unlock(&usage_lock, key);
327 return;
328 }
329
330 uint32_t now = usage_now();
331
Kumar Galaa1195ae2022-10-18 09:45:13 -0500332 unsigned int num_cpus = arch_num_cpus();
333
334 for (uint8_t i = 0; i < num_cpus; i++) {
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500335 cpu = &_kernel.cpus[i];
336 if (cpu->usage0 != 0) {
337 sched_cpu_update_usage(cpu, now - cpu->usage0);
338 }
Peter Mitsis9bedfd82023-05-23 18:36:04 -0400339 cpu->usage->track_usage = false;
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500340 }
341
342 k_spin_unlock(&usage_lock, key);
343}
Simon Heinbcd1d192024-03-08 12:00:10 +0100344#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
Peter Mitsise6f10902023-06-01 12:16:40 -0400345
346#ifdef CONFIG_OBJ_CORE_STATS_THREAD
347int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats)
348{
349 k_spinlock_key_t key;
350
351 key = k_spin_lock(&usage_lock);
352 memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
353 k_spin_unlock(&usage_lock, key);
354
355 return 0;
356}
357
358int z_thread_stats_query(struct k_obj_core *obj_core, void *stats)
359{
360 struct k_thread *thread;
361
362 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
363
364 z_sched_thread_usage(thread, stats);
365
366 return 0;
367}
368
369int z_thread_stats_reset(struct k_obj_core *obj_core)
370{
371 k_spinlock_key_t key;
372 struct k_cycle_stats *stats;
373 struct k_thread *thread;
374
375 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
376 key = k_spin_lock(&usage_lock);
377 stats = obj_core->stats;
378
379 stats->total = 0ULL;
380#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
381 stats->current = 0ULL;
382 stats->longest = 0ULL;
383 stats->num_windows = (thread->base.usage.track_usage) ? 1U : 0U;
Simon Heinbcd1d192024-03-08 12:00:10 +0100384#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsise6f10902023-06-01 12:16:40 -0400385
386 if (thread != _current_cpu->current) {
387
388 /*
389 * If the thread is not running, there is nothing else to do.
390 * If the thread is running on another core, then it is not
391 * safe to do anything else but unlock and return (and pretend
392 * that its stats were reset at the start of its execution
393 * window.
394 */
395
396 k_spin_unlock(&usage_lock, key);
397
398 return 0;
399 }
400
401 /* Update the current CPU stats. */
402
403 uint32_t now = usage_now();
404 uint32_t cycles = now - _current_cpu->usage0;
405
406 sched_cpu_update_usage(_current_cpu, cycles);
407
408 _current_cpu->usage0 = now;
409
410 k_spin_unlock(&usage_lock, key);
411
412 return 0;
413}
414
415int z_thread_stats_disable(struct k_obj_core *obj_core)
416{
417#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
418 struct k_thread *thread;
419
420 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
421
422 return k_thread_runtime_stats_disable(thread);
423#else
424 return -ENOTSUP;
Simon Heinbcd1d192024-03-08 12:00:10 +0100425#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsise6f10902023-06-01 12:16:40 -0400426}
427
428int z_thread_stats_enable(struct k_obj_core *obj_core)
429{
430#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
431 struct k_thread *thread;
432
433 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
434
435 return k_thread_runtime_stats_enable(thread);
436#else
437 return -ENOTSUP;
Simon Heinbcd1d192024-03-08 12:00:10 +0100438#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
Peter Mitsise6f10902023-06-01 12:16:40 -0400439}
Simon Heinbcd1d192024-03-08 12:00:10 +0100440#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
Peter Mitsise6f10902023-06-01 12:16:40 -0400441
442#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
443int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats)
444{
445 k_spinlock_key_t key;
446
447 key = k_spin_lock(&usage_lock);
448 memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
449 k_spin_unlock(&usage_lock, key);
450
451 return 0;
452}
453
454int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats)
455{
456 struct _cpu *cpu;
457
458 cpu = CONTAINER_OF(obj_core, struct _cpu, obj_core);
459
460 z_sched_cpu_usage(cpu->id, stats);
461
462 return 0;
463}
Simon Heinbcd1d192024-03-08 12:00:10 +0100464#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
Peter Mitsise6f10902023-06-01 12:16:40 -0400465
466#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
467int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats)
468{
469 k_spinlock_key_t key;
470
471 key = k_spin_lock(&usage_lock);
472 memcpy(stats, obj_core->stats,
473 CONFIG_MP_MAX_NUM_CPUS * sizeof(struct k_cycle_stats));
474 k_spin_unlock(&usage_lock, key);
475
476 return 0;
477}
478
479int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats)
480{
481 ARG_UNUSED(obj_core);
482
483 return k_thread_runtime_stats_all_get(stats);
484}
Simon Heinbcd1d192024-03-08 12:00:10 +0100485#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */