blob: 16cbbc14d3c73fe0e728f9c5d02101f9131ad0bf [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
10 * @brief Pipes
11 */
12
13#include <kernel.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050014#include <kernel_structs.h>
Anas Nashif4d994af2021-04-18 23:24:40 -040015
Benjamin Walsh456c6da2016-09-02 18:55:39 -040016#include <toolchain.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090017#include <ksched.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040018#include <wait_q.h>
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040019#include <init.h>
Andrew Boieb9a05782017-09-29 16:05:32 -070020#include <syscall_handler.h>
Andy Ross4f911e12018-09-05 10:13:38 -070021#include <kernel_internal.h>
Anas Nashif361a84d2019-06-16 08:22:08 -040022#include <sys/check.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
24struct k_pipe_desc {
25 unsigned char *buffer; /* Position in src/dest buffer */
26 size_t bytes_to_xfer; /* # bytes left to transfer */
27#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
28 struct k_mem_block *block; /* Pointer to memory block */
29 struct k_mem_block copy_block; /* For backwards compatibility */
30 struct k_sem *sem; /* Semaphore to give if async */
31#endif
32};
33
34struct k_pipe_async {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050035 struct _thread_base thread; /* Dummy thread object */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040036 struct k_pipe_desc desc; /* Pipe message descriptor */
37};
38
39#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040040/* stack of unused asynchronous message descriptors */
41K_STACK_DEFINE(pipe_async_msgs, CONFIG_NUM_PIPE_ASYNC_MSGS);
Allan Stephense7d2cc22016-10-19 16:10:46 -050042#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS > 0 */
43
Anas Nashif4d994af2021-04-18 23:24:40 -040044#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
Allan Stephense7d2cc22016-10-19 16:10:46 -050045
46/*
47 * Do run-time initialization of pipe object subsystem.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040048 */
Tomasz Bursztykae18fcbb2020-04-30 20:33:38 +020049static int init_pipes_module(const struct device *dev)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040050{
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040051 ARG_UNUSED(dev);
52
Flavio Ceolinac146852018-11-01 17:42:07 -070053 /* Array of asynchronous message descriptors */
54 static struct k_pipe_async __noinit async_msg[CONFIG_NUM_PIPE_ASYNC_MSGS];
55
Allan Stephense7d2cc22016-10-19 16:10:46 -050056#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
57 /*
58 * Create pool of asynchronous pipe message descriptors.
59 *
60 * A dummy thread requires minimal initialization, since it never gets
Benjamin Walsha8978ab2017-01-22 11:41:59 -050061 * to execute. The _THREAD_DUMMY flag is sufficient to distinguish a
62 * dummy thread from a real one. The threads are *not* added to the
63 * kernel's list of known threads.
Allan Stephense7d2cc22016-10-19 16:10:46 -050064 *
65 * Once initialized, the address of each descriptor is added to a stack
66 * that governs access to them.
67 */
68
Benjamin Walsh456c6da2016-09-02 18:55:39 -040069 for (int i = 0; i < CONFIG_NUM_PIPE_ASYNC_MSGS; i++) {
Benjamin Walsha8978ab2017-01-22 11:41:59 -050070 async_msg[i].thread.thread_state = _THREAD_DUMMY;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040071 async_msg[i].thread.swap_data = &async_msg[i].desc;
Daniel Leung1c6d2022018-08-18 22:06:32 -070072
Patrik Flykt4344e272019-03-08 14:19:05 -070073 z_init_thread_timeout(&async_msg[i].thread);
Daniel Leung1c6d2022018-08-18 22:06:32 -070074
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -040075 k_stack_push(&pipe_async_msgs, (stack_data_t)&async_msg[i]);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040076 }
Allan Stephense7d2cc22016-10-19 16:10:46 -050077#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS > 0 */
78
79 /* Complete initialization of statically defined mailboxes. */
80
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040081 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040082}
83
Andrew Boie0b474ee2016-11-08 11:06:55 -080084SYS_INIT(init_pipes_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040085
Anas Nashif4d994af2021-04-18 23:24:40 -040086#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS */
Allan Stephense7d2cc22016-10-19 16:10:46 -050087
Andrew Boie44fe8122018-04-12 17:38:12 -070088void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040089{
90 pipe->buffer = buffer;
91 pipe->size = size;
92 pipe->bytes_used = 0;
93 pipe->read_index = 0;
94 pipe->write_index = 0;
Peter Bigotaea9d352020-04-20 10:52:25 -050095 pipe->lock = (struct k_spinlock){};
Patrik Flykt4344e272019-03-08 14:19:05 -070096 z_waitq_init(&pipe->wait_q.writers);
97 z_waitq_init(&pipe->wait_q.readers);
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +010098 SYS_PORT_TRACING_OBJ_INIT(k_pipe, pipe);
99
Peter Bigotaea9d352020-04-20 10:52:25 -0500100 pipe->flags = 0;
Patrik Flykt4344e272019-03-08 14:19:05 -0700101 z_object_init(pipe);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400102}
103
Patrik Flykt4344e272019-03-08 14:19:05 -0700104int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
Andrew Boieb9a05782017-09-29 16:05:32 -0700105{
Andrew Boie44fe8122018-04-12 17:38:12 -0700106 void *buffer;
107 int ret;
Andrew Boieb9a05782017-09-29 16:05:32 -0700108
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100109 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, alloc_init, pipe);
110
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400111 if (size != 0U) {
Andrew Boie44fe8122018-04-12 17:38:12 -0700112 buffer = z_thread_malloc(size);
Flavio Ceolin76b35182018-12-16 12:48:29 -0800113 if (buffer != NULL) {
Andrew Boie44fe8122018-04-12 17:38:12 -0700114 k_pipe_init(pipe, buffer, size);
115 pipe->flags = K_PIPE_FLAG_ALLOC;
116 ret = 0;
117 } else {
118 ret = -ENOMEM;
119 }
120 } else {
121 k_pipe_init(pipe, NULL, 0);
122 ret = 0;
123 }
124
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100125 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, alloc_init, pipe, ret);
126
Andrew Boie44fe8122018-04-12 17:38:12 -0700127 return ret;
128}
129
130#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700131static inline int z_vrfy_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
Andrew Boie44fe8122018-04-12 17:38:12 -0700132{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700133 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(pipe, K_OBJ_PIPE));
Andrew Boie44fe8122018-04-12 17:38:12 -0700134
Andy Ross65649742019-08-06 13:34:31 -0700135 return z_impl_k_pipe_alloc_init(pipe, size);
Andrew Boieb9a05782017-09-29 16:05:32 -0700136}
Andy Ross65649742019-08-06 13:34:31 -0700137#include <syscalls/k_pipe_alloc_init_mrsh.c>
Andrew Boieb9a05782017-09-29 16:05:32 -0700138#endif
139
Anas Nashif361a84d2019-06-16 08:22:08 -0400140int k_pipe_cleanup(struct k_pipe *pipe)
Andrew Boie44fe8122018-04-12 17:38:12 -0700141{
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100142 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, cleanup, pipe);
143
Anas Nashif361a84d2019-06-16 08:22:08 -0400144 CHECKIF(z_waitq_head(&pipe->wait_q.readers) != NULL ||
145 z_waitq_head(&pipe->wait_q.writers) != NULL) {
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100146 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, -EAGAIN);
147
Anas Nashif361a84d2019-06-16 08:22:08 -0400148 return -EAGAIN;
149 }
Andrew Boie44fe8122018-04-12 17:38:12 -0700150
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400151 if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0U) {
Andrew Boie44fe8122018-04-12 17:38:12 -0700152 k_free(pipe->buffer);
153 pipe->buffer = NULL;
154 pipe->flags &= ~K_PIPE_FLAG_ALLOC;
155 }
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100156
157 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, 0);
158
Anas Nashif361a84d2019-06-16 08:22:08 -0400159 return 0;
Andrew Boie44fe8122018-04-12 17:38:12 -0700160}
161
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400162/**
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400163 * @brief Copy bytes from @a src to @a dest
164 *
165 * @return Number of bytes copied
166 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800167static size_t pipe_xfer(unsigned char *dest, size_t dest_size,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400168 const unsigned char *src, size_t src_size)
169{
Carlos Stuart75f77db2019-02-11 17:14:19 +0000170 size_t num_bytes = MIN(dest_size, src_size);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400171 const unsigned char *end = src + num_bytes;
172
173 while (src != end) {
174 *dest = *src;
175 dest++;
176 src++;
177 }
178
179 return num_bytes;
180}
181
182/**
183 * @brief Put data from @a src into the pipe's circular buffer
184 *
185 * Modifies the following fields in @a pipe:
186 * buffer, bytes_used, write_index
187 *
188 * @return Number of bytes written to the pipe's circular buffer
189 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800190static size_t pipe_buffer_put(struct k_pipe *pipe,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400191 const unsigned char *src, size_t src_size)
192{
193 size_t bytes_copied;
194 size_t run_length;
195 size_t num_bytes_written = 0;
196 int i;
197
198
199 for (i = 0; i < 2; i++) {
Carlos Stuart75f77db2019-02-11 17:14:19 +0000200 run_length = MIN(pipe->size - pipe->bytes_used,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400201 pipe->size - pipe->write_index);
202
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800203 bytes_copied = pipe_xfer(pipe->buffer + pipe->write_index,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400204 run_length,
205 src + num_bytes_written,
206 src_size - num_bytes_written);
207
208 num_bytes_written += bytes_copied;
209 pipe->bytes_used += bytes_copied;
210 pipe->write_index += bytes_copied;
211 if (pipe->write_index == pipe->size) {
212 pipe->write_index = 0;
213 }
214 }
215
216 return num_bytes_written;
217}
218
219/**
220 * @brief Get data from the pipe's circular buffer
221 *
222 * Modifies the following fields in @a pipe:
223 * bytes_used, read_index
224 *
225 * @return Number of bytes read from the pipe's circular buffer
226 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800227static size_t pipe_buffer_get(struct k_pipe *pipe,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400228 unsigned char *dest, size_t dest_size)
229{
230 size_t bytes_copied;
231 size_t run_length;
232 size_t num_bytes_read = 0;
233 int i;
234
235 for (i = 0; i < 2; i++) {
Carlos Stuart75f77db2019-02-11 17:14:19 +0000236 run_length = MIN(pipe->bytes_used,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400237 pipe->size - pipe->read_index);
238
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800239 bytes_copied = pipe_xfer(dest + num_bytes_read,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400240 dest_size - num_bytes_read,
241 pipe->buffer + pipe->read_index,
242 run_length);
243
244 num_bytes_read += bytes_copied;
245 pipe->bytes_used -= bytes_copied;
246 pipe->read_index += bytes_copied;
247 if (pipe->read_index == pipe->size) {
248 pipe->read_index = 0;
249 }
250 }
251
252 return num_bytes_read;
253}
254
255/**
256 * @brief Prepare a working set of readers/writers
257 *
258 * Prepare a list of "working threads" into/from which the data
259 * will be directly copied. This list is useful as it is used to ...
260 *
261 * 1. avoid double copying
262 * 2. minimize interrupt latency as interrupts are unlocked
263 * while copying data
264 * 3. ensure a timeout can not make the request impossible to satisfy
265 *
266 * The list is populated with previously pended threads that will be ready to
267 * run after the pipe call is complete.
268 *
269 * Important things to remember when reading from the pipe ...
270 * 1. If there are writers int @a wait_q, then the pipe's buffer is full.
271 * 2. Conversely if the pipe's buffer is not full, there are no writers.
272 * 3. The amount of available data in the pipe is the sum the bytes used in
273 * the pipe (@a pipe_space) and all the requests from the waiting writers.
274 * 4. Since data is read from the pipe's buffer first, the working set must
275 * include writers that will (try to) re-fill the pipe's buffer afterwards.
276 *
277 * Important things to remember when writing to the pipe ...
278 * 1. If there are readers in @a wait_q, then the pipe's buffer is empty.
279 * 2. Conversely if the pipe's buffer is not empty, then there are no readers.
280 * 3. The amount of space available in the pipe is the sum of the bytes unused
281 * in the pipe (@a pipe_space) and all the requests from the waiting readers.
282 *
283 * @return false if request is unsatisfiable, otherwise true
284 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800285static bool pipe_xfer_prepare(sys_dlist_t *xfer_list,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400286 struct k_thread **waiter,
287 _wait_q_t *wait_q,
288 size_t pipe_space,
289 size_t bytes_to_xfer,
290 size_t min_xfer,
Andy Ross78327382020-03-05 15:18:14 -0800291 k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400292{
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400293 struct k_thread *thread;
294 struct k_pipe_desc *desc;
295 size_t num_bytes = 0;
296
Andy Ross78327382020-03-05 15:18:14 -0800297 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Ross1acd8c22018-05-03 14:51:49 -0700298 _WAIT_Q_FOR_EACH(wait_q, thread) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500299 desc = (struct k_pipe_desc *)thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400300
301 num_bytes += desc->bytes_to_xfer;
302
303 if (num_bytes >= bytes_to_xfer) {
304 break;
305 }
306 }
307
308 if (num_bytes + pipe_space < min_xfer) {
309 return false;
310 }
311 }
312
313 /*
314 * Either @a timeout is not K_NO_WAIT (so the thread may pend) or
315 * the entire request can be satisfied. Generate the working list.
316 */
317
318 sys_dlist_init(xfer_list);
319 num_bytes = 0;
320
Patrik Flykt4344e272019-03-08 14:19:05 -0700321 while ((thread = z_waitq_head(wait_q)) != NULL) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500322 desc = (struct k_pipe_desc *)thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400323 num_bytes += desc->bytes_to_xfer;
324
325 if (num_bytes > bytes_to_xfer) {
326 /*
327 * This request can not be fully satisfied.
328 * Do not remove it from the wait_q.
329 * Do not abort its timeout (if applicable).
330 * Do not add it to the transfer list
331 */
332 break;
333 }
334
335 /*
336 * This request can be fully satisfied.
337 * Remove it from the wait_q.
338 * Abort its timeout.
339 * Add it to the transfer list.
340 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700341 z_unpend_thread(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700342 sys_dlist_append(xfer_list, &thread->base.qnode_dlist);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400343 }
344
345 *waiter = (num_bytes > bytes_to_xfer) ? thread : NULL;
346
347 return true;
348}
349
350/**
351 * @brief Determine the correct return code
352 *
353 * Bytes Xferred No Wait Wait
354 * >= Minimum 0 0
355 * < Minimum -EIO* -EAGAIN
356 *
357 * * The "-EIO No Wait" case was already checked when the "working set"
358 * was created in _pipe_xfer_prepare().
359 *
360 * @return See table above
361 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800362static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400363 size_t bytes_requested)
364{
365 if (bytes_requested - bytes_remaining >= min_xfer) {
366 /*
367 * At least the minimum number of requested
368 * bytes have been transferred.
369 */
370 return 0;
371 }
372
373 return -EAGAIN;
374}
375
376/**
377 * @brief Ready a pipe thread
378 *
379 * If the pipe thread is a real thread, then add it to the ready queue.
380 * If it is a dummy thread, then finish the asynchronous work.
381 *
382 * @return N/A
383 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800384static void pipe_thread_ready(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400385{
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400386#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
Patrik Flykt24d71432019-03-26 19:57:45 -0600387 if ((thread->base.thread_state & _THREAD_DUMMY) != 0U) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400388 return;
389 }
390#endif
391
Patrik Flykt4344e272019-03-08 14:19:05 -0700392 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400393}
394
395/**
396 * @brief Internal API used to send data to a pipe
397 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700398int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400399 unsigned char *data, size_t bytes_to_write,
400 size_t *bytes_written, size_t min_xfer,
Andy Ross78327382020-03-05 15:18:14 -0800401 k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400402{
403 struct k_thread *reader;
404 struct k_pipe_desc *desc;
405 sys_dlist_t xfer_list;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400406 size_t num_bytes_written = 0;
407 size_t bytes_copied;
408
409#if (CONFIG_NUM_PIPE_ASYNC_MSGS == 0)
410 ARG_UNUSED(async_desc);
411#endif
412
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100413 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, put, pipe, timeout);
414
Anas Nashif361a84d2019-06-16 08:22:08 -0400415 CHECKIF((min_xfer > bytes_to_write) || bytes_written == NULL) {
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100416 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, -EINVAL);
417
Anas Nashif361a84d2019-06-16 08:22:08 -0400418 return -EINVAL;
419 }
420
Andy Rossf582b552019-02-05 16:10:18 -0800421 k_spinlock_key_t key = k_spin_lock(&pipe->lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400422
423 /*
424 * Create a list of "working readers" into which the data will be
425 * directly copied.
426 */
427
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800428 if (!pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400429 pipe->size - pipe->bytes_used, bytes_to_write,
430 min_xfer, timeout)) {
Andy Rossf582b552019-02-05 16:10:18 -0800431 k_spin_unlock(&pipe->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400432 *bytes_written = 0;
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100433
434 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, -EIO);
435
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400436 return -EIO;
437 }
438
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100439 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout);
440
Patrik Flykt4344e272019-03-08 14:19:05 -0700441 z_sched_lock();
Andy Rossf582b552019-02-05 16:10:18 -0800442 k_spin_unlock(&pipe->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400443
444 /*
445 * 1. 'xfer_list' currently contains a list of reader threads that can
446 * have their read requests fulfilled by the current call.
447 * 2. 'reader' if not NULL points to a thread on the reader wait_q
448 * that can get some of its requested data.
449 * 3. Interrupts are unlocked but the scheduler is locked to allow
450 * ticks to be delivered but no scheduling to occur
451 * 4. If 'reader' times out while we are copying data, not only do we
452 * still have a pointer to it, but it can not execute until this call
453 * is complete so it is still safe to copy data to it.
454 */
455
456 struct k_thread *thread = (struct k_thread *)
457 sys_dlist_get(&xfer_list);
Flavio Ceolinc806ac32018-09-17 16:03:52 -0700458 while (thread != NULL) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500459 desc = (struct k_pipe_desc *)thread->base.swap_data;
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800460 bytes_copied = pipe_xfer(desc->buffer, desc->bytes_to_xfer,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400461 data + num_bytes_written,
462 bytes_to_write - num_bytes_written);
463
464 num_bytes_written += bytes_copied;
465 desc->buffer += bytes_copied;
466 desc->bytes_to_xfer -= bytes_copied;
467
468 /* The thread's read request has been satisfied. Ready it. */
Patrik Flykt4344e272019-03-08 14:19:05 -0700469 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400470
471 thread = (struct k_thread *)sys_dlist_get(&xfer_list);
472 }
473
474 /*
475 * Copy any data to the reader that we left on the wait_q.
476 * It is possible no data will be copied.
477 */
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700478 if (reader != NULL) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500479 desc = (struct k_pipe_desc *)reader->base.swap_data;
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800480 bytes_copied = pipe_xfer(desc->buffer, desc->bytes_to_xfer,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400481 data + num_bytes_written,
482 bytes_to_write - num_bytes_written);
483
484 num_bytes_written += bytes_copied;
485 desc->buffer += bytes_copied;
486 desc->bytes_to_xfer -= bytes_copied;
487 }
488
489 /*
490 * As much data as possible has been directly copied to any waiting
491 * readers. Add as much as possible to the pipe's circular buffer.
492 */
493
494 num_bytes_written +=
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800495 pipe_buffer_put(pipe, data + num_bytes_written,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400496 bytes_to_write - num_bytes_written);
497
498 if (num_bytes_written == bytes_to_write) {
499 *bytes_written = num_bytes_written;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400500 k_sched_unlock();
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100501
502 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, 0);
503
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400504 return 0;
505 }
506
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400507 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)
508 && num_bytes_written >= min_xfer
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400509 && min_xfer > 0U) {
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400510 *bytes_written = num_bytes_written;
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400511 k_sched_unlock();
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100512
513 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, 0);
514
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400515 return 0;
516 }
517
518 /* Not all data was copied */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400519
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400520 struct k_pipe_desc pipe_desc;
521
522 pipe_desc.buffer = data + num_bytes_written;
523 pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written;
524
Andy Ross78327382020-03-05 15:18:14 -0800525 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500526 _current->base.swap_data = &pipe_desc;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400527 /*
528 * Lock interrupts and unlock the scheduler before
529 * manipulating the writers wait_q.
530 */
Daniel Leung2acafaf2020-07-23 12:52:44 -0700531 k_spinlock_key_t key2 = k_spin_lock(&pipe->lock);
Patrik Flykt4344e272019-03-08 14:19:05 -0700532 z_sched_unlock_no_reschedule();
Daniel Leung2acafaf2020-07-23 12:52:44 -0700533 (void)z_pend_curr(&pipe->lock, key2,
Andy Rossf582b552019-02-05 16:10:18 -0800534 &pipe->wait_q.writers, timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400535 } else {
536 k_sched_unlock();
537 }
538
539 *bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer;
540
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100541 int ret = pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400542 bytes_to_write);
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100543 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout, ret);
544 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400545}
546
Patrik Flykt4344e272019-03-08 14:19:05 -0700547int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
Andy Ross78327382020-03-05 15:18:14 -0800548 size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400549{
550 struct k_thread *writer;
551 struct k_pipe_desc *desc;
552 sys_dlist_t xfer_list;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400553 size_t num_bytes_read = 0;
554 size_t bytes_copied;
555
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100556 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, get, pipe, timeout);
557
Anas Nashif361a84d2019-06-16 08:22:08 -0400558 CHECKIF((min_xfer > bytes_to_read) || bytes_read == NULL) {
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100559 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe, timeout, -EINVAL);
560
Anas Nashif361a84d2019-06-16 08:22:08 -0400561 return -EINVAL;
562 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400563
Andy Rossf582b552019-02-05 16:10:18 -0800564 k_spinlock_key_t key = k_spin_lock(&pipe->lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400565
566 /*
567 * Create a list of "working readers" into which the data will be
568 * directly copied.
569 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800570 if (!pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400571 pipe->bytes_used, bytes_to_read,
572 min_xfer, timeout)) {
Andy Rossf582b552019-02-05 16:10:18 -0800573 k_spin_unlock(&pipe->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400574 *bytes_read = 0;
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100575
576 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe, timeout, -EIO);
577
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400578 return -EIO;
579 }
580
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100581 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout);
582
Patrik Flykt4344e272019-03-08 14:19:05 -0700583 z_sched_lock();
Andy Rossf582b552019-02-05 16:10:18 -0800584 k_spin_unlock(&pipe->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400585
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800586 num_bytes_read = pipe_buffer_get(pipe, data, bytes_to_read);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400587
588 /*
589 * 1. 'xfer_list' currently contains a list of writer threads that can
590 * have their write requests fulfilled by the current call.
591 * 2. 'writer' if not NULL points to a thread on the writer wait_q
592 * that can post some of its requested data.
593 * 3. Data will be copied from each writer's buffer to either the
594 * reader's buffer and/or to the pipe's circular buffer.
595 * 4. Interrupts are unlocked but the scheduler is locked to allow
596 * ticks to be delivered but no scheduling to occur
597 * 5. If 'writer' times out while we are copying data, not only do we
598 * still have a pointer to it, but it can not execute until this
599 * call is complete so it is still safe to copy data from it.
600 */
601
602 struct k_thread *thread = (struct k_thread *)
603 sys_dlist_get(&xfer_list);
Flavio Ceolinc806ac32018-09-17 16:03:52 -0700604 while ((thread != NULL) && (num_bytes_read < bytes_to_read)) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500605 desc = (struct k_pipe_desc *)thread->base.swap_data;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500606 bytes_copied = pipe_xfer((uint8_t *)data + num_bytes_read,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400607 bytes_to_read - num_bytes_read,
608 desc->buffer, desc->bytes_to_xfer);
609
610 num_bytes_read += bytes_copied;
611 desc->buffer += bytes_copied;
612 desc->bytes_to_xfer -= bytes_copied;
613
614 /*
615 * It is expected that the write request will be satisfied.
616 * However, if the read request was satisfied before the
617 * write request was satisfied, then the write request must
618 * finish later when writing to the pipe's circular buffer.
619 */
620 if (num_bytes_read == bytes_to_read) {
621 break;
622 }
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800623 pipe_thread_ready(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400624
625 thread = (struct k_thread *)sys_dlist_get(&xfer_list);
626 }
627
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700628 if ((writer != NULL) && (num_bytes_read < bytes_to_read)) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500629 desc = (struct k_pipe_desc *)writer->base.swap_data;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500630 bytes_copied = pipe_xfer((uint8_t *)data + num_bytes_read,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400631 bytes_to_read - num_bytes_read,
632 desc->buffer, desc->bytes_to_xfer);
633
634 num_bytes_read += bytes_copied;
635 desc->buffer += bytes_copied;
636 desc->bytes_to_xfer -= bytes_copied;
637 }
638
639 /*
640 * Copy as much data as possible from the writers (if any)
641 * into the pipe's circular buffer.
642 */
643
Flavio Ceolinc806ac32018-09-17 16:03:52 -0700644 while (thread != NULL) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500645 desc = (struct k_pipe_desc *)thread->base.swap_data;
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800646 bytes_copied = pipe_buffer_put(pipe, desc->buffer,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400647 desc->bytes_to_xfer);
648
649 desc->buffer += bytes_copied;
650 desc->bytes_to_xfer -= bytes_copied;
651
Anas Nashiff2cb20c2019-06-18 14:45:40 -0400652 /* Write request has been satisfied */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800653 pipe_thread_ready(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400654
655 thread = (struct k_thread *)sys_dlist_get(&xfer_list);
656 }
657
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700658 if (writer != NULL) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500659 desc = (struct k_pipe_desc *)writer->base.swap_data;
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800660 bytes_copied = pipe_buffer_put(pipe, desc->buffer,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400661 desc->bytes_to_xfer);
662
663 desc->buffer += bytes_copied;
664 desc->bytes_to_xfer -= bytes_copied;
665 }
666
667 if (num_bytes_read == bytes_to_read) {
668 k_sched_unlock();
669
670 *bytes_read = num_bytes_read;
671
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100672 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe, timeout, 0);
673
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400674 return 0;
675 }
676
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400677 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)
678 && num_bytes_read >= min_xfer
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400679 && min_xfer > 0U) {
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400680 k_sched_unlock();
681
682 *bytes_read = num_bytes_read;
683
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100684 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe, timeout, 0);
685
Christopher Friedtd650f4b2020-04-27 21:44:38 -0400686 return 0;
687 }
688
689 /* Not all data was read */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400690
691 struct k_pipe_desc pipe_desc;
692
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500693 pipe_desc.buffer = (uint8_t *)data + num_bytes_read;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400694 pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;
695
Andy Ross78327382020-03-05 15:18:14 -0800696 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500697 _current->base.swap_data = &pipe_desc;
Daniel Leung2acafaf2020-07-23 12:52:44 -0700698 k_spinlock_key_t key2 = k_spin_lock(&pipe->lock);
Andy Rossf582b552019-02-05 16:10:18 -0800699
Patrik Flykt4344e272019-03-08 14:19:05 -0700700 z_sched_unlock_no_reschedule();
Daniel Leung2acafaf2020-07-23 12:52:44 -0700701 (void)z_pend_curr(&pipe->lock, key2,
Andy Rossf582b552019-02-05 16:10:18 -0800702 &pipe->wait_q.readers, timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400703 } else {
704 k_sched_unlock();
705 }
706
707 *bytes_read = bytes_to_read - pipe_desc.bytes_to_xfer;
708
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100709 int ret = pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400710 bytes_to_read);
Torbjörn Leksellfa9e64b2021-03-26 13:29:16 +0100711 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe, timeout, ret);
712 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400713}
714
Andrew Boieb9a05782017-09-29 16:05:32 -0700715#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700716int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
Andy Ross78327382020-03-05 15:18:14 -0800717 size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
Andrew Boieb9a05782017-09-29 16:05:32 -0700718{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700719 Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
720 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_read, sizeof(*bytes_read)));
721 Z_OOPS(Z_SYSCALL_MEMORY_WRITE((void *)data, bytes_to_read));
Andrew Boieb9a05782017-09-29 16:05:32 -0700722
Patrik Flykt4344e272019-03-08 14:19:05 -0700723 return z_impl_k_pipe_get((struct k_pipe *)pipe, (void *)data,
Andrew Boieb9a05782017-09-29 16:05:32 -0700724 bytes_to_read, bytes_read, min_xfer,
725 timeout);
726}
Andy Ross65649742019-08-06 13:34:31 -0700727#include <syscalls/k_pipe_get_mrsh.c>
Andrew Boieb9a05782017-09-29 16:05:32 -0700728#endif
729
Patrik Flykt4344e272019-03-08 14:19:05 -0700730int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
Andy Ross78327382020-03-05 15:18:14 -0800731 size_t *bytes_written, size_t min_xfer,
732 k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400733{
Patrik Flykt4344e272019-03-08 14:19:05 -0700734 return z_pipe_put_internal(pipe, NULL, data,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400735 bytes_to_write, bytes_written,
736 min_xfer, timeout);
737}
738
Andrew Boieb9a05782017-09-29 16:05:32 -0700739#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700740int z_vrfy_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
Andy Ross78327382020-03-05 15:18:14 -0800741 size_t *bytes_written, size_t min_xfer,
742 k_timeout_t timeout)
Andrew Boieb9a05782017-09-29 16:05:32 -0700743{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700744 Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
745 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_written, sizeof(*bytes_written)));
746 Z_OOPS(Z_SYSCALL_MEMORY_READ((void *)data, bytes_to_write));
Andrew Boieb9a05782017-09-29 16:05:32 -0700747
Patrik Flykt4344e272019-03-08 14:19:05 -0700748 return z_impl_k_pipe_put((struct k_pipe *)pipe, (void *)data,
Andrew Boieb9a05782017-09-29 16:05:32 -0700749 bytes_to_write, bytes_written, min_xfer,
750 timeout);
751}
Andy Ross65649742019-08-06 13:34:31 -0700752#include <syscalls/k_pipe_put_mrsh.c>
Andrew Boieb9a05782017-09-29 16:05:32 -0700753#endif
754
Christopher Friedt3315f8f2020-05-06 18:43:58 -0400755size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
756{
757 size_t res;
758 k_spinlock_key_t key;
759
760 /* Buffer and size are fixed. No need to spin. */
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400761 if (pipe->buffer == NULL || pipe->size == 0U) {
Christopher Friedt3315f8f2020-05-06 18:43:58 -0400762 res = 0;
763 goto out;
764 }
765
766 key = k_spin_lock(&pipe->lock);
767
768 if (pipe->read_index == pipe->write_index) {
769 res = pipe->bytes_used;
770 } else if (pipe->read_index < pipe->write_index) {
771 res = pipe->write_index - pipe->read_index;
772 } else {
773 res = pipe->size - (pipe->read_index - pipe->write_index);
774 }
775
776 k_spin_unlock(&pipe->lock, key);
777
778out:
779 return res;
780}
781
782#ifdef CONFIG_USERSPACE
783size_t z_vrfy_k_pipe_read_avail(struct k_pipe *pipe)
784{
785 Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
786
787 return z_impl_k_pipe_read_avail(pipe);
788}
789#include <syscalls/k_pipe_read_avail_mrsh.c>
790#endif
791
792size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
793{
794 size_t res;
795 k_spinlock_key_t key;
796
797 /* Buffer and size are fixed. No need to spin. */
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400798 if (pipe->buffer == NULL || pipe->size == 0U) {
Christopher Friedt3315f8f2020-05-06 18:43:58 -0400799 res = 0;
800 goto out;
801 }
802
803 key = k_spin_lock(&pipe->lock);
804
805 if (pipe->write_index == pipe->read_index) {
806 res = pipe->size - pipe->bytes_used;
807 } else if (pipe->write_index < pipe->read_index) {
808 res = pipe->read_index - pipe->write_index;
809 } else {
810 res = pipe->size - (pipe->write_index - pipe->read_index);
811 }
812
813 k_spin_unlock(&pipe->lock, key);
814
815out:
816 return res;
817}
818
819#ifdef CONFIG_USERSPACE
820size_t z_vrfy_k_pipe_write_avail(struct k_pipe *pipe)
821{
822 Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
823
824 return z_impl_k_pipe_write_avail(pipe);
825}
826#include <syscalls/k_pipe_write_avail_mrsh.c>
827#endif