kernel: Make If statement have essentially Boolean type
Make if statement using pointers explicitly check whether the value is
NULL or not.
The C standard does not say that the null pointer is the same as the
pointer to memory address 0 and because of this is a good practice
always compare with the macro NULL.
Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
diff --git a/kernel/mempool.c b/kernel/mempool.c
index 3817e4e..e82be2b 100644
--- a/kernel/mempool.c
+++ b/kernel/mempool.c
@@ -192,7 +192,7 @@
}
ret = k_malloc(bounds);
- if (ret) {
+ if (ret != NULL) {
(void)memset(ret, 0, bounds);
}
return ret;
diff --git a/kernel/msg_q.c b/kernel/msg_q.c
index 33755d6..4cbf0a3 100644
--- a/kernel/msg_q.c
+++ b/kernel/msg_q.c
@@ -202,7 +202,7 @@
/* handle first thread waiting to write (if any) */
pending_thread = _unpend_first_thread(&q->wait_q);
- if (pending_thread) {
+ if (pending_thread != NULL) {
/* add thread's message to queue */
(void)memcpy(q->write_ptr, pending_thread->base.swap_data,
q->msg_size);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index e3f3584..64b9bd7 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -241,7 +241,7 @@
K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
- if (new_owner) {
+ if (new_owner != NULL) {
_ready_thread(new_owner);
irq_unlock(key);
diff --git a/kernel/pipes.c b/kernel/pipes.c
index 8e52bce..f56fc33 100644
--- a/kernel/pipes.c
+++ b/kernel/pipes.c
@@ -499,7 +499,7 @@
* Copy any data to the reader that we left on the wait_q.
* It is possible no data will be copied.
*/
- if (reader) {
+ if (reader != NULL) {
desc = (struct k_pipe_desc *)reader->base.swap_data;
bytes_copied = pipe_xfer(desc->buffer, desc->bytes_to_xfer,
data + num_bytes_written,
@@ -649,7 +649,7 @@
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
- if (writer && (num_bytes_read < bytes_to_read)) {
+ if ((writer != NULL) && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)writer->base.swap_data;
bytes_copied = pipe_xfer(data + num_bytes_read,
bytes_to_read - num_bytes_read,
@@ -679,7 +679,7 @@
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
- if (writer) {
+ if (writer != NULL) {
desc = (struct k_pipe_desc *)writer->base.swap_data;
bytes_copied = pipe_buffer_put(pipe, desc->buffer,
desc->bytes_to_xfer);
diff --git a/kernel/poll.c b/kernel/poll.c
index 73149d6..81811a7 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -80,8 +80,9 @@
struct k_poll_event *pending;
pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
- if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread,
- poller->thread)) {
+ if ((pending == NULL) ||
+ _is_t1_higher_prio_than_t2(pending->poller->thread,
+ poller->thread)) {
sys_dlist_append(events, &event->_node);
return;
}
@@ -358,7 +359,7 @@
struct k_poll_event *poll_event;
poll_event = (struct k_poll_event *)sys_dlist_get(events);
- if (poll_event) {
+ if (poll_event != NULL) {
(void) signal_poll_event(poll_event, state);
}
}
@@ -409,7 +410,7 @@
signal->signaled = 1;
poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
- if (!poll_event) {
+ if (poll_event == NULL) {
irq_unlock(key);
return 0;
}
diff --git a/kernel/queue.c b/kernel/queue.c
index f9f7931..a493a6b 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -127,7 +127,7 @@
first_pending_thread = _unpend_first_thread(&queue->wait_q);
- if (first_pending_thread) {
+ if (first_pending_thread != NULL) {
prepare_thread_to_run(first_pending_thread, NULL);
}
#else
@@ -151,7 +151,7 @@
first_pending_thread = _unpend_first_thread(&queue->wait_q);
- if (first_pending_thread) {
+ if (first_pending_thread != NULL) {
prepare_thread_to_run(first_pending_thread, data);
_reschedule(key);
return 0;
@@ -163,7 +163,7 @@
struct alloc_node *anode;
anode = z_thread_malloc(sizeof(*anode));
- if (!anode) {
+ if (anode == NULL) {
return -ENOMEM;
}
anode->data = data;
@@ -242,7 +242,7 @@
head = *(void **)head;
}
- if (head) {
+ if (head != NULL) {
sys_sflist_append_list(&queue->data_q, head, tail);
}
@@ -303,7 +303,7 @@
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
irq_unlock(key);
- if (!val && timeout != K_FOREVER) {
+ if ((val == NULL) && (timeout != K_FOREVER)) {
elapsed = k_uptime_get_32() - start;
done = elapsed > timeout;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index ed8dd02..37d4cfc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -169,7 +169,7 @@
/* Choose the best thread that is not current */
struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq);
- if (!th) {
+ if (th == NULL) {
th = _current_cpu->idle_thread;
}
@@ -269,7 +269,7 @@
irq_unlock(key);
}
- if (wait_q) {
+ if (wait_q != NULL) {
#ifdef CONFIG_WAITQ_SCALABLE
thread->base.pended_on = wait_q;
#endif
@@ -333,7 +333,7 @@
{
struct k_thread *t = _unpend1_no_timeout(wait_q);
- if (t) {
+ if (t != NULL) {
(void)_abort_thread_timeout(t);
}
@@ -622,7 +622,7 @@
LOCKED(&sched_lock) {
struct k_thread *next = _priq_run_best(&_kernel.ready_q.runq);
- if (next) {
+ if (next != NULL) {
ret = thread->base.prio == next->base.prio;
}
}
diff --git a/kernel/sys_clock.c b/kernel/sys_clock.c
index fc2bef9..6d9c725 100644
--- a/kernel/sys_clock.c
+++ b/kernel/sys_clock.c
@@ -183,7 +183,7 @@
K_DEBUG("head: %p, delta: %d\n",
timeout, timeout ? timeout->delta_ticks_from_prev : -2112);
- if (!next) {
+ if (next == NULL) {
irq_unlock(key);
return;
}
diff --git a/kernel/timer.c b/kernel/timer.c
index a4dfc4b..a67edbe 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -70,7 +70,7 @@
thread = _waitq_head(&timer->wait_q);
- if (!thread) {
+ if (thread == NULL) {
return;
}
@@ -163,7 +163,7 @@
key = irq_lock();
struct k_thread *pending_thread = _unpend1_no_timeout(&timer->wait_q);
- if (pending_thread) {
+ if (pending_thread != NULL) {
_ready_thread(pending_thread);
}
diff --git a/kernel/userspace.c b/kernel/userspace.c
index e4339a1..cd8f729 100644
--- a/kernel/userspace.c
+++ b/kernel/userspace.c
@@ -220,7 +220,7 @@
"bad object type requested");
dyn_obj = z_thread_malloc(sizeof(*dyn_obj) + obj_size_get(otype));
- if (!dyn_obj) {
+ if (dyn_obj == NULL) {
SYS_LOG_WRN("could not allocate kernel object");
return NULL;
}
@@ -265,7 +265,7 @@
key = irq_lock();
dyn_obj = dyn_object_find(obj);
- if (dyn_obj) {
+ if (dyn_obj != NULL) {
rb_remove(&obj_rb_tree, &dyn_obj->node);
sys_dlist_remove(&dyn_obj->obj_list);
@@ -275,7 +275,7 @@
}
irq_unlock(key);
- if (dyn_obj) {
+ if (dyn_obj != NULL) {
k_free(dyn_obj);
}
}
@@ -286,11 +286,11 @@
ret = _k_object_gperf_find(obj);
- if (!ret) {
+ if (ret == NULL) {
struct dyn_obj *dyn_obj;
dyn_obj = dyn_object_find(obj);
- if (dyn_obj) {
+ if (dyn_obj != NULL) {
ret = &dyn_obj->kobj;
}
}
@@ -319,7 +319,7 @@
ko = _k_object_find(t);
- if (!ko) {
+ if (ko == NULL) {
return -1;
}
@@ -482,7 +482,7 @@
{
struct _k_object *ko = _k_object_find(object);
- if (ko) {
+ if (ko != NULL) {
_thread_perms_set(ko, thread);
}
}
@@ -491,7 +491,7 @@
{
struct _k_object *ko = _k_object_find(object);
- if (ko) {
+ if (ko != NULL) {
_thread_perms_clear(ko, thread);
}
}
@@ -505,7 +505,7 @@
{
struct _k_object *ko = _k_object_find(object);
- if (ko) {
+ if (ko != NULL) {
ko->flags |= K_OBJ_FLAG_PUBLIC;
}
}
@@ -553,7 +553,7 @@
*/
ko = _k_object_find(object);
- if (!ko) {
+ if (ko == NULL) {
/* Supervisor threads can ignore rules about kernel objects
* and may declare them on stacks, etc. Such objects will never
* be usable from userspace, but we shouldn't explode.
@@ -569,7 +569,7 @@
{
struct _k_object *ko = _k_object_find(object);
- if (ko) {
+ if (ko != NULL) {
(void)memset(ko->perms, 0, sizeof(ko->perms));
_thread_perms_set(ko, k_current_get());
ko->flags |= K_OBJ_FLAG_INITIALIZED;
@@ -582,7 +582,7 @@
/* See comments in _k_object_init() */
ko = _k_object_find(object);
- if (!ko) {
+ if (ko == NULL) {
return;
}
@@ -605,7 +605,7 @@
}
dst = z_thread_malloc(size);
- if (!dst) {
+ if (dst == NULL) {
printk("out of thread resource pool memory (%zu)", size);
goto out_err;
}
diff --git a/kernel/work_q.c b/kernel/work_q.c
index d1ff03b..42de0c7 100644
--- a/kernel/work_q.c
+++ b/kernel/work_q.c
@@ -27,7 +27,7 @@
k_work_handler_t handler;
work = k_queue_get(&work_q->queue, K_FOREVER);
- if (!work) {
+ if (work == NULL) {
continue;
}
diff --git a/lib/libc/minimal/source/stdlib/malloc.c b/lib/libc/minimal/source/stdlib/malloc.c
index 80acb94..a8d63b7 100644
--- a/lib/libc/minimal/source/stdlib/malloc.c
+++ b/lib/libc/minimal/source/stdlib/malloc.c
@@ -22,7 +22,7 @@
void *ret;
ret = sys_mem_pool_alloc(&z_malloc_mem_pool, size);
- if (!ret) {
+ if (ret == NULL) {
errno = ENOMEM;
}
@@ -82,7 +82,7 @@
ret = malloc(size);
- if (ret) {
+ if (ret != NULL) {
(void)memset(ret, 0, size);
}
@@ -120,7 +120,7 @@
}
new_ptr = malloc(requested_size);
- if (!new_ptr) {
+ if (new_ptr == NULL) {
return NULL;
}
diff --git a/lib/libc/minimal/source/stdlib/strtol.c b/lib/libc/minimal/source/stdlib/strtol.c
index 9617c2b..c54b17c 100644
--- a/lib/libc/minimal/source/stdlib/strtol.c
+++ b/lib/libc/minimal/source/stdlib/strtol.c
@@ -113,7 +113,7 @@
errno = ERANGE;
} else if (neg)
acc = -acc;
- if (endptr != 0)
+ if (endptr != NULL)
*endptr = (char *)(any ? s - 1 : nptr);
return acc;
}
diff --git a/lib/libc/minimal/source/stdlib/strtoul.c b/lib/libc/minimal/source/stdlib/strtoul.c
index 77610d7..a5934ac 100644
--- a/lib/libc/minimal/source/stdlib/strtoul.c
+++ b/lib/libc/minimal/source/stdlib/strtoul.c
@@ -92,7 +92,7 @@
errno = ERANGE;
} else if (neg)
acc = -acc;
- if (endptr != 0)
+ if (endptr != NULL)
*endptr = (char *)(any ? s - 1 : nptr);
return acc;
}
diff --git a/lib/mempool/mempool.c b/lib/mempool/mempool.c
index 006d2ca..b3a89b8 100644
--- a/lib/mempool/mempool.c
+++ b/lib/mempool/mempool.c
@@ -147,7 +147,7 @@
int key = pool_irq_lock(p);
block = sys_dlist_get(&p->levels[l].free_list);
- if (block) {
+ if (block != NULL) {
clear_free_bit(p, l, block_num(p, block, lsz));
}
pool_irq_unlock(p, key);
@@ -258,7 +258,7 @@
/* Iteratively break the smallest enclosing block... */
data = block_alloc(p, free_l, lsizes[free_l]);
- if (!data) {
+ if (data == NULL) {
/* This can happen if we race with another allocator.
* It's OK, just back out and the timeout code will
* retry. Note mild overloading: -EAGAIN isn't for
@@ -337,7 +337,7 @@
struct sys_mem_pool_block *blk;
struct sys_mem_pool *p;
- if (!ptr) {
+ if (ptr == NULL) {
return;
}
diff --git a/lib/rbtree/rb.c b/lib/rbtree/rb.c
index 85f4507..762ca46 100644
--- a/lib/rbtree/rb.c
+++ b/lib/rbtree/rb.c
@@ -172,7 +172,7 @@
int side = get_side(grandparent, parent);
struct rbnode *aunt = get_child(grandparent, !side);
- if (aunt && is_red(aunt)) {
+ if ((aunt != NULL) && is_red(aunt)) {
set_color(grandparent, RED);
set_color(parent, BLACK);
set_color(aunt, BLACK);
@@ -431,7 +431,7 @@
struct rbnode *child = get_child(node, 0);
- if (!child) {
+ if (child == NULL) {
child = get_child(node, 1);
}
@@ -481,7 +481,7 @@
void _rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie)
{
- if (node) {
+ if (node != NULL) {
_rb_walk(get_child(node, 0), visit_fn, cookie);
visit_fn(node, cookie);
_rb_walk(get_child(node, 1), visit_fn, cookie);
@@ -557,7 +557,7 @@
* it's right subtree if it has a right child
*/
n = get_child(f->stack[f->top], 1);
- if (n) {
+ if (n != NULL) {
return stack_left_limb(n, f);
}
diff --git a/scripts/gen_kobject_list.py b/scripts/gen_kobject_list.py
index e0384c9..0bc0cdb 100755
--- a/scripts/gen_kobject_list.py
+++ b/scripts/gen_kobject_list.py
@@ -85,7 +85,7 @@
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
- if (wordlist[i].name) {
+ if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}