kernel: mem_slab: error handling

Add runtime error checking for k_mem_slab_init and replace asserts with
returning error codes.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
diff --git a/include/kernel.h b/include/kernel.h
index 4272e15..4e45125 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -4333,10 +4333,12 @@
  * @param block_size Size of each memory block (in bytes).
  * @param num_blocks Number of memory blocks.
  *
- * @return N/A
+ * @retval 0 on success
+ * @retval -EINVAL invalid data supplied
+ *
  * @req K-MSLAB-002
  */
-extern void k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
+extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
 			   size_t block_size, u32_t num_blocks);
 
 /**
@@ -4354,6 +4356,7 @@
  *         is set to the starting address of the memory block.
  * @retval -ENOMEM Returned without waiting.
  * @retval -EAGAIN Waiting period timed out.
+ * @retval -EINVAL Invalid data supplied
  * @req K-MSLAB-002
  */
 extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c
index f22a04f..df83d64 100644
--- a/kernel/mem_slab.c
+++ b/kernel/mem_slab.c
@@ -13,6 +13,7 @@
 #include <sys/dlist.h>
 #include <ksched.h>
 #include <init.h>
+#include <sys/check.h>
 
 static struct k_spinlock lock;
 
@@ -28,15 +29,16 @@
  *
  * @return N/A
  */
-static void create_free_list(struct k_mem_slab *slab)
+static int create_free_list(struct k_mem_slab *slab)
 {
 	u32_t j;
 	char *p;
 
 	/* blocks must be word aligned */
-	__ASSERT(((slab->block_size | (uintptr_t)slab->buffer)
-					& (sizeof(void *) - 1)) == 0,
-		 "slab at %p not word aligned", slab);
+	CHECKIF(((slab->block_size | (uintptr_t)slab->buffer) &
+				(sizeof(void *) - 1)) != 0) {
+		return -EINVAL;
+	}
 
 	slab->free_list = NULL;
 	p = slab->buffer;
@@ -46,6 +48,7 @@
 		slab->free_list = p;
 		p += slab->block_size;
 	}
+	return 0;
 }
 
 /**
@@ -57,31 +60,45 @@
  */
 static int init_mem_slab_module(struct device *dev)
 {
+	int rc = 0;
 	ARG_UNUSED(dev);
 
 	Z_STRUCT_SECTION_FOREACH(k_mem_slab, slab) {
-		create_free_list(slab);
+		rc = create_free_list(slab);
+		if (rc < 0) {
+			goto out;
+		}
 		SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
 		z_object_init(slab);
 	}
-	return 0;
+
+out:
+	return rc;
 }
 
 SYS_INIT(init_mem_slab_module, PRE_KERNEL_1,
 	 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
 
-void k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
+int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
 		    size_t block_size, u32_t num_blocks)
 {
+	int rc = 0;
+
 	slab->num_blocks = num_blocks;
 	slab->block_size = block_size;
 	slab->buffer = buffer;
 	slab->num_used = 0U;
-	create_free_list(slab);
+	rc = create_free_list(slab);
+	if (rc < 0) {
+		goto out;
+	}
 	z_waitq_init(&slab->wait_q);
 	SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
 
 	z_object_init(slab);
+
+out:
+	return rc;
 }
 
 int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)