blob: b7e5304bcc2f49b9ea7c1ce839b53f578021614b [file] [log] [blame]
Mazen NEIFER8f92bc22017-01-26 15:51:33 +01001/*
Andy Ross39b2a092018-10-23 09:08:58 -07002 * Copyright (c) 2018 Intel Corporation
3 *
Mazen NEIFER8f92bc22017-01-26 15:51:33 +01004 * SPDX-License-Identifier: Apache-2.0
5 */
Anas Nashif68c389c2019-06-21 12:55:37 -04006#include <drivers/timer/system_timer.h>
Andy Ross39b2a092018-10-23 09:08:58 -07007#include <sys_clock.h>
8#include <spinlock.h>
Mazen NEIFER8f92bc22017-01-26 15:51:33 +01009
Andy Ross39b2a092018-10-23 09:08:58 -070010#define TIMER_IRQ UTIL_CAT(XCHAL_TIMER, \
11 UTIL_CAT(CONFIG_XTENSA_TIMER_ID, _INTERRUPT))
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010012
Piotr Zięcik0c0c0d92019-04-23 15:08:00 +020013#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
Andy Ross39b2a092018-10-23 09:08:58 -070014 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
Andy Ross5f63c9d2019-11-26 11:27:19 -080015#define MAX_CYC 0xffffffffu
16#define MAX_TICKS ((MAX_CYC - CYC_PER_TICK) / CYC_PER_TICK)
Andy Ross39b2a092018-10-23 09:08:58 -070017#define MIN_DELAY 1000
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010018
Andy Ross39b2a092018-10-23 09:08:58 -070019static struct k_spinlock lock;
20static unsigned int last_count;
Andy Rossab488272018-09-20 13:56:45 -070021
Kumar Galaa1b77fd2020-05-27 11:26:57 -050022static void set_ccompare(uint32_t val)
Youvedeep Singh833025d2017-10-27 21:38:42 +053023{
Andy Ross39b2a092018-10-23 09:08:58 -070024 __asm__ volatile ("wsr.CCOMPARE" STRINGIFY(CONFIG_XTENSA_TIMER_ID) " %0"
25 :: "r"(val));
Youvedeep Singh833025d2017-10-27 21:38:42 +053026}
27
Kumar Galaa1b77fd2020-05-27 11:26:57 -050028static uint32_t ccount(void)
Youvedeep Singh833025d2017-10-27 21:38:42 +053029{
Kumar Galaa1b77fd2020-05-27 11:26:57 -050030 uint32_t val;
Youvedeep Singh833025d2017-10-27 21:38:42 +053031
Andy Ross39b2a092018-10-23 09:08:58 -070032 __asm__ volatile ("rsr.CCOUNT %0" : "=r"(val));
33 return val;
Youvedeep Singh833025d2017-10-27 21:38:42 +053034}
35
Tomasz Bursztyka4dcfb552020-06-17 14:58:56 +020036static void ccompare_isr(const void *arg)
Youvedeep Singh833025d2017-10-27 21:38:42 +053037{
Andy Ross39b2a092018-10-23 09:08:58 -070038 ARG_UNUSED(arg);
Youvedeep Singh833025d2017-10-27 21:38:42 +053039
Andy Ross39b2a092018-10-23 09:08:58 -070040 k_spinlock_key_t key = k_spin_lock(&lock);
Kumar Galaa1b77fd2020-05-27 11:26:57 -050041 uint32_t curr = ccount();
42 uint32_t dticks = (curr - last_count) / CYC_PER_TICK;
Youvedeep Singh833025d2017-10-27 21:38:42 +053043
Andy Ross39b2a092018-10-23 09:08:58 -070044 last_count += dticks * CYC_PER_TICK;
Youvedeep Singh833025d2017-10-27 21:38:42 +053045
Wentong Wu72227572020-05-12 10:32:40 +080046 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
Kumar Galaa1b77fd2020-05-27 11:26:57 -050047 uint32_t next = last_count + CYC_PER_TICK;
Youvedeep Singh833025d2017-10-27 21:38:42 +053048
Kumar Galaa1b77fd2020-05-27 11:26:57 -050049 if ((int32_t)(next - curr) < MIN_DELAY) {
Andy Ross39b2a092018-10-23 09:08:58 -070050 next += CYC_PER_TICK;
Youvedeep Singh833025d2017-10-27 21:38:42 +053051 }
Andy Ross39b2a092018-10-23 09:08:58 -070052 set_ccompare(next);
Youvedeep Singh833025d2017-10-27 21:38:42 +053053 }
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010054
Andy Ross39b2a092018-10-23 09:08:58 -070055 k_spin_unlock(&lock, key);
Anas Nashif9c1efe62021-02-25 15:33:15 -050056 sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010057}
58
Anas Nashif5d6c2192021-03-22 10:28:25 -040059int sys_clock_driver_init(const struct device *dev)
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010060{
Anas Nashif5d6c2192021-03-22 10:28:25 -040061 ARG_UNUSED(dev);
Flavio Ceoline1e4a402019-07-17 21:32:19 -070062
Andy Ross39b2a092018-10-23 09:08:58 -070063 IRQ_CONNECT(TIMER_IRQ, 0, ccompare_isr, 0, 0);
64 set_ccompare(ccount() + CYC_PER_TICK);
65 irq_enable(TIMER_IRQ);
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010066 return 0;
67}
68
Anas Nashif9c1efe62021-02-25 15:33:15 -050069void sys_clock_set_timeout(int32_t ticks, bool idle)
Andy Ross39b2a092018-10-23 09:08:58 -070070{
71 ARG_UNUSED(idle);
Mazen NEIFER8f92bc22017-01-26 15:51:33 +010072
Wentong Wu72227572020-05-12 10:32:40 +080073#if defined(CONFIG_TICKLESS_KERNEL)
Andy Ross78327382020-03-05 15:18:14 -080074 ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
Trond Einar Snekvik86c793a2020-10-27 12:27:25 +010075 ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
Andy Ross39b2a092018-10-23 09:08:58 -070076
77 k_spinlock_key_t key = k_spin_lock(&lock);
Kumar Galaa1b77fd2020-05-27 11:26:57 -050078 uint32_t curr = ccount(), cyc, adj;
Andy Ross39b2a092018-10-23 09:08:58 -070079
80 /* Round up to next tick boundary */
Andy Ross5f63c9d2019-11-26 11:27:19 -080081 cyc = ticks * CYC_PER_TICK;
82 adj = (curr - last_count) + (CYC_PER_TICK - 1);
83 if (cyc <= MAX_CYC - adj) {
84 cyc += adj;
85 } else {
86 cyc = MAX_CYC;
87 }
Andy Ross39b2a092018-10-23 09:08:58 -070088 cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK;
89 cyc += last_count;
90
91 if ((cyc - curr) < MIN_DELAY) {
92 cyc += CYC_PER_TICK;
93 }
94
95 set_ccompare(cyc);
96 k_spin_unlock(&lock, key);
97#endif
98}
99
Anas Nashif9c1efe62021-02-25 15:33:15 -0500100uint32_t sys_clock_elapsed(void)
Andy Ross39b2a092018-10-23 09:08:58 -0700101{
102 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
103 return 0;
104 }
105
106 k_spinlock_key_t key = k_spin_lock(&lock);
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500107 uint32_t ret = (ccount() - last_count) / CYC_PER_TICK;
Andy Ross39b2a092018-10-23 09:08:58 -0700108
109 k_spin_unlock(&lock, key);
110 return ret;
111}
112
Anas Nashif12b53d12021-03-12 12:46:52 -0500113uint32_t sys_clock_cycle_get_32(void)
Mazen NEIFER8f92bc22017-01-26 15:51:33 +0100114{
Andy Ross39b2a092018-10-23 09:08:58 -0700115 return ccount();
Mazen NEIFER8f92bc22017-01-26 15:51:33 +0100116}
Daniel Leungd0ad4192019-01-28 09:12:13 -0800117
118#ifdef CONFIG_SMP
119void smp_timer_init(void)
120{
121 set_ccompare(ccount() + CYC_PER_TICK);
122 irq_enable(TIMER_IRQ);
123}
124#endif