kernel/pipe: Spinlockify
One spinlock per pipe object. Also removed some vestigial locking
around _ready_thread(). That call is internally synchronized now.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/include/kernel.h b/include/kernel.h
index 31528f1..d3e63e9 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -3631,6 +3631,7 @@
size_t bytes_used; /**< # bytes used in buffer */
size_t read_index; /**< Where in buffer to read from */
size_t write_index; /**< Where in buffer to write */
+ struct k_spinlock lock; /**< Synchronization lock */
struct {
_wait_q_t readers; /**< Reader wait queue */
diff --git a/kernel/pipes.c b/kernel/pipes.c
index 2d530dd..9f83107 100644
--- a/kernel/pipes.c
+++ b/kernel/pipes.c
@@ -413,8 +413,6 @@
*/
static void pipe_thread_ready(struct k_thread *thread)
{
- unsigned int key;
-
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if ((thread->base.thread_state & _THREAD_DUMMY) != 0) {
pipe_async_finish((struct k_pipe_async *)thread);
@@ -422,9 +420,7 @@
}
#endif
- key = irq_lock();
_ready_thread(thread);
- irq_unlock(key);
}
/**
@@ -438,7 +434,6 @@
struct k_thread *reader;
struct k_pipe_desc *desc;
sys_dlist_t xfer_list;
- unsigned int key;
size_t num_bytes_written = 0;
size_t bytes_copied;
@@ -446,7 +441,7 @@
ARG_UNUSED(async_desc);
#endif
- key = irq_lock();
+ k_spinlock_key_t key = k_spin_lock(&pipe->lock);
/*
* Create a list of "working readers" into which the data will be
@@ -456,13 +451,13 @@
if (!pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
pipe->size - pipe->bytes_used, bytes_to_write,
min_xfer, timeout)) {
- irq_unlock(key);
+ k_spin_unlock(&pipe->lock, key);
*bytes_written = 0;
return -EIO;
}
_sched_lock();
- irq_unlock(key);
+ k_spin_unlock(&pipe->lock, key);
/*
* 1. 'xfer_list' currently contains a list of reader threads that can
@@ -489,9 +484,7 @@
desc->bytes_to_xfer -= bytes_copied;
/* The thread's read request has been satisfied. Ready it. */
- key = irq_lock();
_ready_thread(thread);
- irq_unlock(key);
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
@@ -539,7 +532,7 @@
* Lock interrupts and unlock the scheduler before
* manipulating the writers wait_q.
*/
- key = irq_lock();
+ k_spinlock_key_t key = k_spin_lock(&pipe->lock);
_sched_unlock_no_reschedule();
async_desc->desc.buffer = data + num_bytes_written;
@@ -548,7 +541,7 @@
_pend_thread((struct k_thread *) &async_desc->thread,
&pipe->wait_q.writers, K_FOREVER);
- _reschedule_irqlock(key);
+ _reschedule(&pipe->lock, key);
return 0;
}
#endif
@@ -564,9 +557,10 @@
* Lock interrupts and unlock the scheduler before
* manipulating the writers wait_q.
*/
- key = irq_lock();
+ k_spinlock_key_t key = k_spin_lock(&pipe->lock);
_sched_unlock_no_reschedule();
- (void)_pend_curr_irqlock(key, &pipe->wait_q.writers, timeout);
+ (void)_pend_curr(&pipe->lock, key,
+ &pipe->wait_q.writers, timeout);
} else {
k_sched_unlock();
}
@@ -583,14 +577,13 @@
struct k_thread *writer;
struct k_pipe_desc *desc;
sys_dlist_t xfer_list;
- unsigned int key;
size_t num_bytes_read = 0;
size_t bytes_copied;
__ASSERT(min_xfer <= bytes_to_read, "");
__ASSERT(bytes_read != NULL, "");
- key = irq_lock();
+ k_spinlock_key_t key = k_spin_lock(&pipe->lock);
/*
* Create a list of "working readers" into which the data will be
@@ -600,13 +593,13 @@
if (!pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
pipe->bytes_used, bytes_to_read,
min_xfer, timeout)) {
- irq_unlock(key);
+ k_spin_unlock(&pipe->lock, key);
*bytes_read = 0;
return -EIO;
}
_sched_lock();
- irq_unlock(key);
+ k_spin_unlock(&pipe->lock, key);
num_bytes_read = pipe_buffer_get(pipe, data, bytes_to_read);
@@ -706,9 +699,11 @@
if (timeout != K_NO_WAIT) {
_current->base.swap_data = &pipe_desc;
- key = irq_lock();
+ k_spinlock_key_t key = k_spin_lock(&pipe->lock);
+
_sched_unlock_no_reschedule();
- (void)_pend_curr_irqlock(key, &pipe->wait_q.readers, timeout);
+ (void)_pend_curr(&pipe->lock, key,
+ &pipe->wait_q.readers, timeout);
} else {
k_sched_unlock();
}