Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Linaro Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
Gerard Marull-Paretas | 79e6b0e | 2022-08-25 09:58:46 +0200 | [diff] [blame] | 7 | #include <zephyr/kernel.h> |
Gerard Marull-Paretas | 5113c14 | 2022-05-06 11:12:04 +0200 | [diff] [blame] | 8 | #include <zephyr/drivers/uart.h> |
| 9 | #include <zephyr/sys/printk.h> |
| 10 | #include <zephyr/console/tty.h> |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 11 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 12 | static int tty_irq_input_hook(struct tty_serial *tty, uint8_t c); |
| 13 | static int tty_putchar(struct tty_serial *tty, uint8_t c); |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 14 | |
Tomasz Bursztyka | e18fcbb | 2020-04-30 20:33:38 +0200 | [diff] [blame] | 15 | static void tty_uart_isr(const struct device *dev, void *user_data) |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 16 | { |
| 17 | struct tty_serial *tty = user_data; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 18 | |
| 19 | uart_irq_update(dev); |
| 20 | |
| 21 | if (uart_irq_rx_ready(dev)) { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 22 | uint8_t c; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 23 | |
| 24 | while (1) { |
| 25 | if (uart_fifo_read(dev, &c, 1) == 0) { |
| 26 | break; |
| 27 | } |
| 28 | tty_irq_input_hook(tty, c); |
| 29 | } |
| 30 | } |
| 31 | |
| 32 | if (uart_irq_tx_ready(dev)) { |
| 33 | if (tty->tx_get == tty->tx_put) { |
| 34 | /* Output buffer empty, don't bother |
| 35 | * us with tx interrupts |
| 36 | */ |
| 37 | uart_irq_tx_disable(dev); |
| 38 | } else { |
| 39 | uart_fifo_fill(dev, &tty->tx_ringbuf[tty->tx_get++], 1); |
| 40 | if (tty->tx_get >= tty->tx_ringbuf_sz) { |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 41 | tty->tx_get = 0U; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 42 | } |
Paul Sokolovsky | 9d3b48f | 2018-10-23 20:02:25 +0300 | [diff] [blame] | 43 | k_sem_give(&tty->tx_sem); |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 44 | } |
| 45 | } |
| 46 | } |
| 47 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 48 | static int tty_irq_input_hook(struct tty_serial *tty, uint8_t c) |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 49 | { |
| 50 | int rx_next = tty->rx_put + 1; |
| 51 | |
| 52 | if (rx_next >= tty->rx_ringbuf_sz) { |
| 53 | rx_next = 0; |
| 54 | } |
| 55 | |
| 56 | if (rx_next == tty->rx_get) { |
| 57 | /* Try to give a clue to user that some input was lost */ |
Paul Sokolovsky | 1165e85 | 2018-11-12 18:17:42 +0300 | [diff] [blame] | 58 | tty_putchar(tty, '~'); |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 59 | return 1; |
| 60 | } |
| 61 | |
| 62 | tty->rx_ringbuf[tty->rx_put] = c; |
| 63 | tty->rx_put = rx_next; |
| 64 | k_sem_give(&tty->rx_sem); |
| 65 | |
| 66 | return 1; |
| 67 | } |
| 68 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 69 | static int tty_putchar(struct tty_serial *tty, uint8_t c) |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 70 | { |
| 71 | unsigned int key; |
| 72 | int tx_next; |
Paul Sokolovsky | 9d3b48f | 2018-10-23 20:02:25 +0300 | [diff] [blame] | 73 | int res; |
| 74 | |
Joakim Andersson | 1738f0e | 2020-06-16 14:48:47 +0200 | [diff] [blame] | 75 | res = k_sem_take(&tty->tx_sem, |
| 76 | k_is_in_isr() ? K_NO_WAIT : |
| 77 | SYS_TIMEOUT_MS(tty->tx_timeout)); |
Paul Sokolovsky | 9d3b48f | 2018-10-23 20:02:25 +0300 | [diff] [blame] | 78 | if (res < 0) { |
| 79 | return res; |
| 80 | } |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 81 | |
| 82 | key = irq_lock(); |
| 83 | tx_next = tty->tx_put + 1; |
| 84 | if (tx_next >= tty->tx_ringbuf_sz) { |
| 85 | tx_next = 0; |
| 86 | } |
| 87 | if (tx_next == tty->tx_get) { |
| 88 | irq_unlock(key); |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 89 | return -ENOSPC; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | tty->tx_ringbuf[tty->tx_put] = c; |
| 93 | tty->tx_put = tx_next; |
| 94 | |
| 95 | irq_unlock(key); |
| 96 | uart_irq_tx_enable(tty->uart_dev); |
| 97 | return 0; |
| 98 | } |
| 99 | |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 100 | ssize_t tty_write(struct tty_serial *tty, const void *buf, size_t size) |
| 101 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 102 | const uint8_t *p = buf; |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 103 | size_t out_size = 0; |
| 104 | int res = 0; |
| 105 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 106 | if (tty->tx_ringbuf_sz == 0U) { |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 107 | /* Unbuffered operation, implicitly blocking. */ |
| 108 | out_size = size; |
| 109 | |
| 110 | while (size--) { |
| 111 | uart_poll_out(tty->uart_dev, *p++); |
| 112 | } |
| 113 | |
| 114 | return out_size; |
| 115 | } |
| 116 | |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 117 | while (size--) { |
| 118 | res = tty_putchar(tty, *p++); |
| 119 | if (res < 0) { |
| 120 | /* If we didn't transmit anything, return the error. */ |
| 121 | if (out_size == 0) { |
| 122 | errno = -res; |
| 123 | return res; |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Otherwise, return how much we transmitted. If error |
| 128 | * was transient (like EAGAIN), on next call user might |
| 129 | * not even get it. And if it's non-transient, they'll |
| 130 | * get it on the next call. |
| 131 | */ |
| 132 | return out_size; |
| 133 | } |
| 134 | |
| 135 | out_size++; |
| 136 | } |
| 137 | |
| 138 | return out_size; |
| 139 | } |
| 140 | |
| 141 | static int tty_getchar(struct tty_serial *tty) |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 142 | { |
| 143 | unsigned int key; |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 144 | uint8_t c; |
Paul Sokolovsky | 680085b | 2018-10-22 21:01:22 +0300 | [diff] [blame] | 145 | int res; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 146 | |
Carles Cufi | 8e297e8 | 2020-05-04 12:58:46 +0200 | [diff] [blame] | 147 | res = k_sem_take(&tty->rx_sem, SYS_TIMEOUT_MS(tty->rx_timeout)); |
Paul Sokolovsky | 680085b | 2018-10-22 21:01:22 +0300 | [diff] [blame] | 148 | if (res < 0) { |
| 149 | return res; |
| 150 | } |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 151 | |
| 152 | key = irq_lock(); |
| 153 | c = tty->rx_ringbuf[tty->rx_get++]; |
| 154 | if (tty->rx_get >= tty->rx_ringbuf_sz) { |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 155 | tty->rx_get = 0U; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 156 | } |
| 157 | irq_unlock(key); |
| 158 | |
| 159 | return c; |
| 160 | } |
| 161 | |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 162 | static ssize_t tty_read_unbuf(struct tty_serial *tty, void *buf, size_t size) |
| 163 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 164 | uint8_t *p = buf; |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 165 | size_t out_size = 0; |
| 166 | int res = 0; |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 167 | uint32_t timeout = tty->rx_timeout; |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 168 | |
| 169 | while (size) { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 170 | uint8_t c; |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 171 | res = uart_poll_in(tty->uart_dev, &c); |
| 172 | if (res <= -2) { |
Anas Nashif | f2cb20c | 2019-06-18 14:45:40 -0400 | [diff] [blame] | 173 | /* Error occurred, best we can do is to return |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 174 | * accumulated data w/o error, or return error |
| 175 | * directly if none. |
| 176 | */ |
| 177 | if (out_size == 0) { |
| 178 | errno = res; |
| 179 | return -1; |
| 180 | } |
| 181 | break; |
| 182 | } |
| 183 | |
| 184 | if (res == 0) { |
| 185 | *p++ = c; |
| 186 | out_size++; |
| 187 | size--; |
| 188 | } |
| 189 | |
Andy Ross | 32bb239 | 2020-03-05 14:54:28 -0800 | [diff] [blame] | 190 | if (size == 0 || |
Carles Cufi | 8e297e8 | 2020-05-04 12:58:46 +0200 | [diff] [blame] | 191 | ((timeout != SYS_FOREVER_MS) && timeout-- == 0U)) { |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 192 | break; |
| 193 | } |
| 194 | |
| 195 | /* Avoid 100% busy-polling, and yet try to process bursts |
| 196 | * of data without extra delays. |
| 197 | */ |
| 198 | if (res == -1) { |
Peter Bigot | 6e5db35 | 2019-10-06 14:02:31 -0500 | [diff] [blame] | 199 | k_sleep(K_MSEC(1)); |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 200 | } |
| 201 | } |
| 202 | |
| 203 | return out_size; |
| 204 | } |
| 205 | |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 206 | ssize_t tty_read(struct tty_serial *tty, void *buf, size_t size) |
| 207 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 208 | uint8_t *p = buf; |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 209 | size_t out_size = 0; |
| 210 | int res = 0; |
| 211 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 212 | if (tty->rx_ringbuf_sz == 0U) { |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 213 | return tty_read_unbuf(tty, buf, size); |
| 214 | } |
| 215 | |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 216 | while (size--) { |
| 217 | res = tty_getchar(tty); |
| 218 | if (res < 0) { |
| 219 | /* If we didn't transmit anything, return the error. */ |
| 220 | if (out_size == 0) { |
| 221 | errno = -res; |
| 222 | return res; |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Otherwise, return how much we transmitted. If error |
| 227 | * was transient (like EAGAIN), on next call user might |
| 228 | * not even get it. And if it's non-transient, they'll |
| 229 | * get it on the next call. |
| 230 | */ |
| 231 | return out_size; |
| 232 | } |
| 233 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 234 | *p++ = (uint8_t)res; |
Paul Sokolovsky | 6203020 | 2018-10-23 21:15:46 +0300 | [diff] [blame] | 235 | out_size++; |
| 236 | } |
| 237 | |
| 238 | return out_size; |
| 239 | } |
| 240 | |
Tomasz Bursztyka | e18fcbb | 2020-04-30 20:33:38 +0200 | [diff] [blame] | 241 | int tty_init(struct tty_serial *tty, const struct device *uart_dev) |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 242 | { |
Pavel Kral | 51eb457 | 2019-07-15 19:02:29 +0200 | [diff] [blame] | 243 | if (!uart_dev) { |
| 244 | return -ENODEV; |
| 245 | } |
| 246 | |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 247 | tty->uart_dev = uart_dev; |
Paul Sokolovsky | 0925411 | 2018-12-05 12:07:38 +0300 | [diff] [blame] | 248 | |
| 249 | /* We start in unbuffer mode. */ |
| 250 | tty->rx_ringbuf = NULL; |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 251 | tty->rx_ringbuf_sz = 0U; |
Paul Sokolovsky | 0925411 | 2018-12-05 12:07:38 +0300 | [diff] [blame] | 252 | tty->tx_ringbuf = NULL; |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 253 | tty->tx_ringbuf_sz = 0U; |
Paul Sokolovsky | 0925411 | 2018-12-05 12:07:38 +0300 | [diff] [blame] | 254 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 255 | tty->rx_get = tty->rx_put = tty->tx_get = tty->tx_put = 0U; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 256 | |
Carles Cufi | 8e297e8 | 2020-05-04 12:58:46 +0200 | [diff] [blame] | 257 | tty->rx_timeout = SYS_FOREVER_MS; |
| 258 | tty->tx_timeout = SYS_FOREVER_MS; |
Paul Sokolovsky | 680085b | 2018-10-22 21:01:22 +0300 | [diff] [blame] | 259 | |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 260 | uart_irq_callback_user_data_set(uart_dev, tty_uart_isr, tty); |
Paul Sokolovsky | 0925411 | 2018-12-05 12:07:38 +0300 | [diff] [blame] | 261 | |
| 262 | return 0; |
Paul Sokolovsky | ca8aea1 | 2018-09-26 21:09:28 +0300 | [diff] [blame] | 263 | } |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 264 | |
| 265 | int tty_set_rx_buf(struct tty_serial *tty, void *buf, size_t size) |
| 266 | { |
| 267 | uart_irq_rx_disable(tty->uart_dev); |
| 268 | |
| 269 | tty->rx_ringbuf = buf; |
| 270 | tty->rx_ringbuf_sz = size; |
| 271 | |
| 272 | if (size > 0) { |
James Harris | b104281 | 2021-03-03 12:02:05 -0800 | [diff] [blame] | 273 | k_sem_init(&tty->rx_sem, 0, K_SEM_MAX_LIMIT); |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 274 | uart_irq_rx_enable(tty->uart_dev); |
| 275 | } |
| 276 | |
| 277 | return 0; |
| 278 | } |
| 279 | |
| 280 | int tty_set_tx_buf(struct tty_serial *tty, void *buf, size_t size) |
| 281 | { |
| 282 | uart_irq_tx_disable(tty->uart_dev); |
| 283 | |
| 284 | tty->tx_ringbuf = buf; |
| 285 | tty->tx_ringbuf_sz = size; |
| 286 | |
James Harris | b104281 | 2021-03-03 12:02:05 -0800 | [diff] [blame] | 287 | k_sem_init(&tty->tx_sem, size - 1, K_SEM_MAX_LIMIT); |
Paul Sokolovsky | 0925411 | 2018-12-05 12:07:38 +0300 | [diff] [blame] | 288 | |
Paul Sokolovsky | a7df3a1 | 2018-12-05 11:03:12 +0300 | [diff] [blame] | 289 | /* New buffer is initially empty, no need to re-enable interrupts, |
| 290 | * it will be done when needed (on first output char). |
| 291 | */ |
| 292 | |
| 293 | return 0; |
| 294 | } |