blob: 757ebf4361fdcb7f7ef2aff7566955832a99bd00 [file] [log] [blame]
Paul Sokolovskyf484bba2018-10-08 13:55:03 +03001/*
2 * Copyright (c) 2018 Linaro Limited
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 * @brief File descriptor table
10 *
11 * This file provides generic file descriptor table implementation, suitable
12 * for any I/O object implementing POSIX I/O semantics (i.e. read/write +
13 * aux operations).
14 */
15
16#include <errno.h>
Christopher Friedt12ea06c2023-10-12 20:59:31 -040017#include <string.h>
18
HaiLong Yang1c2c2542023-01-11 00:20:45 +080019#include <zephyr/posix/fcntl.h>
Gerard Marull-Paretascbd31d72022-05-06 11:23:05 +020020#include <zephyr/kernel.h>
21#include <zephyr/sys/fdtable.h>
22#include <zephyr/sys/speculation.h>
Anas Nashif4e396172023-09-26 22:46:01 +000023#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascbd31d72022-05-06 11:23:05 +020024#include <zephyr/sys/atomic.h>
Paul Sokolovskyf484bba2018-10-08 13:55:03 +030025
26struct fd_entry {
27 void *obj;
28 const struct fd_op_vtable *vtable;
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030029 atomic_t refcount;
Jukka Rissanendde03c62020-07-23 13:49:06 +030030 struct k_mutex lock;
Christopher Friedt0014dd02023-05-30 17:11:46 -040031 struct k_condvar cond;
Paul Sokolovskyf484bba2018-10-08 13:55:03 +030032};
33
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +030034#ifdef CONFIG_POSIX_API
Paul Sokolovsky8f690e22018-11-08 10:06:28 +030035static const struct fd_op_vtable stdinout_fd_op_vtable;
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +030036#endif
37
Andrew Boie41f60112019-01-31 15:53:24 -080038static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = {
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +030039#ifdef CONFIG_POSIX_API
40 /*
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030041 * Predefine entries for stdin/stdout/stderr.
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +030042 */
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030043 {
44 /* STDIN */
45 .vtable = &stdinout_fd_op_vtable,
Christopher Friedt12ea06c2023-10-12 20:59:31 -040046 .refcount = ATOMIC_INIT(1),
47 .lock = Z_MUTEX_INITIALIZER(fdtable[0].lock),
48 .cond = Z_CONDVAR_INITIALIZER(fdtable[0].cond),
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030049 },
50 {
51 /* STDOUT */
52 .vtable = &stdinout_fd_op_vtable,
Christopher Friedt12ea06c2023-10-12 20:59:31 -040053 .refcount = ATOMIC_INIT(1),
54 .lock = Z_MUTEX_INITIALIZER(fdtable[1].lock),
55 .cond = Z_CONDVAR_INITIALIZER(fdtable[1].cond),
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030056 },
57 {
58 /* STDERR */
59 .vtable = &stdinout_fd_op_vtable,
Christopher Friedt12ea06c2023-10-12 20:59:31 -040060 .refcount = ATOMIC_INIT(1),
61 .lock = Z_MUTEX_INITIALIZER(fdtable[2].lock),
62 .cond = Z_CONDVAR_INITIALIZER(fdtable[2].cond),
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030063 },
Abramo Bagnara795500b2021-11-16 08:12:13 +010064#else
Flavio Ceolindfa71ca2022-07-22 10:10:18 -070065 {
Abramo Bagnara795500b2021-11-16 08:12:13 +010066 0
Flavio Ceolindfa71ca2022-07-22 10:10:18 -070067 },
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +030068#endif
69};
Paul Sokolovskyf484bba2018-10-08 13:55:03 +030070
71static K_MUTEX_DEFINE(fdtable_lock);
72
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030073static int z_fd_ref(int fd)
74{
75 return atomic_inc(&fdtable[fd].refcount) + 1;
76}
77
78static int z_fd_unref(int fd)
79{
Grzegorz Kostkacccb1902020-09-07 13:11:15 +020080 atomic_val_t old_rc;
81
82 /* Reference counter must be checked to avoid decrement refcount below
83 * zero causing file descriptor leak. Loop statement below executes
84 * atomic decrement if refcount value is grater than zero. Otherwise,
85 * refcount is not going to be written.
86 */
87 do {
88 old_rc = atomic_get(&fdtable[fd].refcount);
89 if (!old_rc) {
90 return 0;
91 }
92 } while (!atomic_cas(&fdtable[fd].refcount, old_rc, old_rc - 1));
Jukka Rissanen8b578bd2020-06-02 15:18:42 +030093
94 if (old_rc != 1) {
95 return old_rc - 1;
96 }
97
98 fdtable[fd].obj = NULL;
99 fdtable[fd].vtable = NULL;
100
101 return 0;
102}
103
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300104static int _find_fd_entry(void)
105{
106 int fd;
107
108 for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
Jukka Rissanen8b578bd2020-06-02 15:18:42 +0300109 if (!atomic_get(&fdtable[fd].refcount)) {
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300110 return fd;
111 }
112 }
113
114 errno = ENFILE;
115 return -1;
116}
117
118static int _check_fd(int fd)
119{
Andrew Boie74164572019-03-08 10:53:48 -0800120 if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
121 errno = EBADF;
122 return -1;
123 }
124
125 fd = k_array_index_sanitize(fd, ARRAY_SIZE(fdtable));
126
Jukka Rissanen8b578bd2020-06-02 15:18:42 +0300127 if (!atomic_get(&fdtable[fd].refcount)) {
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300128 errno = EBADF;
129 return -1;
130 }
131
132 return 0;
133}
134
Christopher Friedt12ea06c2023-10-12 20:59:31 -0400135#ifdef CONFIG_ZTEST
136bool fdtable_fd_is_initialized(int fd)
137{
138 struct k_mutex ref_lock;
139 struct k_condvar ref_cond;
140
141 if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
142 return false;
143 }
144
145 ref_lock = (struct k_mutex)Z_MUTEX_INITIALIZER(fdtable[fd].lock);
146 if (memcmp(&ref_lock, &fdtable[fd].lock, sizeof(ref_lock)) != 0) {
147 return false;
148 }
149
150 ref_cond = (struct k_condvar)Z_CONDVAR_INITIALIZER(fdtable[fd].cond);
151 if (memcmp(&ref_cond, &fdtable[fd].cond, sizeof(ref_cond)) != 0) {
152 return false;
153 }
154
155 return true;
156}
157#endif /* CONFIG_ZTEST */
158
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300159void *z_get_fd_obj(int fd, const struct fd_op_vtable *vtable, int err)
160{
Anas Nashif0ac30732021-03-22 09:37:58 -0400161 struct fd_entry *entry;
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300162
163 if (_check_fd(fd) < 0) {
164 return NULL;
165 }
166
Anas Nashif0ac30732021-03-22 09:37:58 -0400167 entry = &fdtable[fd];
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300168
Anas Nashif0ac30732021-03-22 09:37:58 -0400169 if (vtable != NULL && entry->vtable != vtable) {
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300170 errno = err;
171 return NULL;
172 }
173
Anas Nashif0ac30732021-03-22 09:37:58 -0400174 return entry->obj;
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300175}
176
Christopher Friedt0014dd02023-05-30 17:11:46 -0400177static int z_get_fd_by_obj_and_vtable(void *obj, const struct fd_op_vtable *vtable)
178{
179 int fd;
180
181 for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
182 if (fdtable[fd].obj == obj && fdtable[fd].vtable == vtable) {
183 return fd;
184 }
185 }
186
187 errno = ENFILE;
188 return -1;
189}
190
191bool z_get_obj_lock_and_cond(void *obj, const struct fd_op_vtable *vtable, struct k_mutex **lock,
192 struct k_condvar **cond)
193{
194 int fd;
195 struct fd_entry *entry;
196
197 fd = z_get_fd_by_obj_and_vtable(obj, vtable);
198 if (_check_fd(fd) < 0) {
199 return false;
200 }
201
202 entry = &fdtable[fd];
203
204 if (lock) {
205 *lock = &entry->lock;
206 }
207
208 if (cond) {
209 *cond = &entry->cond;
210 }
211
212 return true;
213}
214
Jukka Rissanendde03c62020-07-23 13:49:06 +0300215void *z_get_fd_obj_and_vtable(int fd, const struct fd_op_vtable **vtable,
216 struct k_mutex **lock)
Robert Lubos03df2bb2018-11-21 15:14:28 +0100217{
Anas Nashif0ac30732021-03-22 09:37:58 -0400218 struct fd_entry *entry;
Robert Lubos03df2bb2018-11-21 15:14:28 +0100219
220 if (_check_fd(fd) < 0) {
221 return NULL;
222 }
223
Anas Nashif0ac30732021-03-22 09:37:58 -0400224 entry = &fdtable[fd];
225 *vtable = entry->vtable;
Robert Lubos03df2bb2018-11-21 15:14:28 +0100226
Jukka Rissanendde03c62020-07-23 13:49:06 +0300227 if (lock) {
228 *lock = &entry->lock;
229 }
230
Anas Nashif0ac30732021-03-22 09:37:58 -0400231 return entry->obj;
Robert Lubos03df2bb2018-11-21 15:14:28 +0100232}
233
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300234int z_reserve_fd(void)
235{
236 int fd;
237
238 (void)k_mutex_lock(&fdtable_lock, K_FOREVER);
239
240 fd = _find_fd_entry();
241 if (fd >= 0) {
242 /* Mark entry as used, z_finalize_fd() will fill it in. */
Vincent Wan0e436ed2020-08-21 14:37:39 -0700243 (void)z_fd_ref(fd);
Jukka Rissanen8b578bd2020-06-02 15:18:42 +0300244 fdtable[fd].obj = NULL;
245 fdtable[fd].vtable = NULL;
Jukka Rissanendde03c62020-07-23 13:49:06 +0300246 k_mutex_init(&fdtable[fd].lock);
Christopher Friedt0014dd02023-05-30 17:11:46 -0400247 k_condvar_init(&fdtable[fd].cond);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300248 }
249
250 k_mutex_unlock(&fdtable_lock);
251
252 return fd;
253}
254
255void z_finalize_fd(int fd, void *obj, const struct fd_op_vtable *vtable)
256{
257 /* Assumes fd was already bounds-checked. */
Andrew Boie87480cd2020-05-29 14:52:26 -0700258#ifdef CONFIG_USERSPACE
259 /* descriptor context objects are inserted into the table when they
260 * are ready for use. Mark the object as initialized and grant the
261 * caller (and only the caller) access.
262 *
263 * This call is a no-op if obj is invalid or points to something
264 * not a kernel object.
265 */
Anas Nashif43a74022023-09-27 10:44:47 +0000266 k_object_recycle(obj);
Andrew Boie87480cd2020-05-29 14:52:26 -0700267#endif
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300268 fdtable[fd].obj = obj;
269 fdtable[fd].vtable = vtable;
Jukka Rissanendde03c62020-07-23 13:49:06 +0300270
271 /* Let the object know about the lock just in case it needs it
272 * for something. For BSD sockets, the lock is used with condition
273 * variables to avoid keeping the lock for a long period of time.
274 */
275 if (vtable && vtable->ioctl) {
276 (void)z_fdtable_call_ioctl(vtable, obj, ZFD_IOCTL_SET_LOCK,
277 &fdtable[fd].lock);
278 }
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300279}
280
281void z_free_fd(int fd)
282{
283 /* Assumes fd was already bounds-checked. */
Jukka Rissanen8b578bd2020-06-02 15:18:42 +0300284 (void)z_fd_unref(fd);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300285}
286
287int z_alloc_fd(void *obj, const struct fd_op_vtable *vtable)
288{
289 int fd;
290
291 fd = z_reserve_fd();
292 if (fd >= 0) {
293 z_finalize_fd(fd, obj, vtable);
294 }
295
296 return fd;
297}
298
299#ifdef CONFIG_POSIX_API
300
301ssize_t read(int fd, void *buf, size_t sz)
302{
Daniel Nejezchlebb68bdf22022-02-09 18:48:58 +0100303 ssize_t res;
304
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300305 if (_check_fd(fd) < 0) {
306 return -1;
307 }
308
Daniel Nejezchlebb68bdf22022-02-09 18:48:58 +0100309 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
310
311 res = fdtable[fd].vtable->read(fdtable[fd].obj, buf, sz);
312
313 k_mutex_unlock(&fdtable[fd].lock);
314
315 return res;
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300316}
Paul Sokolovskyd01f75b2019-02-28 13:55:43 +0300317FUNC_ALIAS(read, _read, ssize_t);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300318
319ssize_t write(int fd, const void *buf, size_t sz)
320{
Daniel Nejezchlebb68bdf22022-02-09 18:48:58 +0100321 ssize_t res;
322
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300323 if (_check_fd(fd) < 0) {
324 return -1;
325 }
326
Daniel Nejezchlebb68bdf22022-02-09 18:48:58 +0100327 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
328
329 res = fdtable[fd].vtable->write(fdtable[fd].obj, buf, sz);
330
331 k_mutex_unlock(&fdtable[fd].lock);
332
333 return res;
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300334}
Paul Sokolovskyd01f75b2019-02-28 13:55:43 +0300335FUNC_ALIAS(write, _write, ssize_t);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300336
337int close(int fd)
338{
339 int res;
340
341 if (_check_fd(fd) < 0) {
342 return -1;
343 }
344
Daniel Nejezchlebb68bdf22022-02-09 18:48:58 +0100345 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
346
Jukka Rissanen2ed6b6a2020-08-11 11:43:51 +0300347 res = fdtable[fd].vtable->close(fdtable[fd].obj);
348
Daniel Nejezchlebb68bdf22022-02-09 18:48:58 +0100349 k_mutex_unlock(&fdtable[fd].lock);
350
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300351 z_free_fd(fd);
352
353 return res;
354}
Paul Sokolovskyd01f75b2019-02-28 13:55:43 +0300355FUNC_ALIAS(close, _close, int);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300356
357int fsync(int fd)
358{
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300359 if (_check_fd(fd) < 0) {
360 return -1;
361 }
362
Paul Sokolovsky13b38ed2018-12-11 17:48:47 +0300363 return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_FSYNC);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300364}
365
366off_t lseek(int fd, off_t offset, int whence)
367{
368 if (_check_fd(fd) < 0) {
369 return -1;
370 }
371
Paul Sokolovsky13b38ed2018-12-11 17:48:47 +0300372 return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_LSEEK,
373 offset, whence);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300374}
Paul Sokolovskyd01f75b2019-02-28 13:55:43 +0300375FUNC_ALIAS(lseek, _lseek, off_t);
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300376
Paul Sokolovsky4a04ed22018-12-11 17:53:56 +0300377int ioctl(int fd, unsigned long request, ...)
378{
379 va_list args;
380 int res;
381
382 if (_check_fd(fd) < 0) {
383 return -1;
384 }
385
386 va_start(args, request);
387 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, request, args);
388 va_end(args);
389
390 return res;
391}
392
393int fcntl(int fd, int cmd, ...)
394{
395 va_list args;
396 int res;
397
398 if (_check_fd(fd) < 0) {
399 return -1;
400 }
401
402 /* Handle fdtable commands. */
Maksim Masalski9eab89f2021-05-18 16:03:21 +0800403 if (cmd == F_DUPFD) {
Paul Sokolovsky4a04ed22018-12-11 17:53:56 +0300404 /* Not implemented so far. */
405 errno = EINVAL;
406 return -1;
407 }
408
409 /* The rest of commands are per-fd, handled by ioctl vmethod. */
410 va_start(args, cmd);
411 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
412 va_end(args);
413
414 return res;
415}
416
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +0300417/*
418 * fd operations for stdio/stdout/stderr
419 */
420
Patrik Flykt4344e272019-03-08 14:19:05 -0700421int z_impl_zephyr_write_stdout(const char *buf, int nbytes);
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +0300422
423static ssize_t stdinout_read_vmeth(void *obj, void *buffer, size_t count)
424{
425 return 0;
426}
427
428static ssize_t stdinout_write_vmeth(void *obj, const void *buffer, size_t count)
429{
Paul Sokolovsky79ea6132018-10-22 16:56:51 +0300430#if defined(CONFIG_BOARD_NATIVE_POSIX)
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +0300431 return write(1, buffer, count);
Evgeniy Paltseva5788ff2021-06-04 00:03:23 +0300432#elif defined(CONFIG_NEWLIB_LIBC) || defined(CONFIG_ARCMWDT_LIBC)
Patrik Flykt4344e272019-03-08 14:19:05 -0700433 return z_impl_zephyr_write_stdout(buffer, count);
Paul Sokolovsky79ea6132018-10-22 16:56:51 +0300434#else
435 return 0;
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +0300436#endif
437}
438
Paul Sokolovsky13b38ed2018-12-11 17:48:47 +0300439static int stdinout_ioctl_vmeth(void *obj, unsigned int request, va_list args)
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +0300440{
441 errno = EINVAL;
442 return -1;
443}
444
445
Paul Sokolovsky8f690e22018-11-08 10:06:28 +0300446static const struct fd_op_vtable stdinout_fd_op_vtable = {
Paul Sokolovsky65a33bd2018-10-18 22:01:02 +0300447 .read = stdinout_read_vmeth,
448 .write = stdinout_write_vmeth,
449 .ioctl = stdinout_ioctl_vmeth,
450};
451
Paul Sokolovskyf484bba2018-10-08 13:55:03 +0300452#endif /* CONFIG_POSIX_API */