drivers/nvme: Integrate to disk subsystem

Hooking each namespace to the disk subsystem at runtime.

Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
diff --git a/drivers/disk/nvme/CMakeLists.txt b/drivers/disk/nvme/CMakeLists.txt
index 5559296..ca5bd68 100644
--- a/drivers/disk/nvme/CMakeLists.txt
+++ b/drivers/disk/nvme/CMakeLists.txt
@@ -6,4 +6,5 @@
   nvme_cmd.c
   nvme_controller_cmd.c
   nvme_namespace.c
+  nvme_disk.c
 )
diff --git a/drivers/disk/nvme/nvme_cmd.h b/drivers/disk/nvme/nvme_cmd.h
index c5cd7f8..5dc17b8 100644
--- a/drivers/disk/nvme/nvme_cmd.h
+++ b/drivers/disk/nvme/nvme_cmd.h
@@ -7,6 +7,7 @@
 #define ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_
 
 #include <zephyr/sys/slist.h>
+#include <zephyr/sys/byteorder.h>
 
 struct nvme_command {
 	/* dword 0 */
@@ -480,6 +481,42 @@
 	return request;
 }
 
+/*
+ * Command building helper functions
+ * These functions assume allocator zeros out cmd structure
+ */
+static inline
+void nvme_namespace_flush_cmd(struct nvme_command *cmd, uint32_t nsid)
+{
+	cmd->cdw0.opc = NVME_OPC_FLUSH;
+	cmd->nsid = sys_cpu_to_le32(nsid);
+}
+
+static inline
+void nvme_namespace_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd,
+			   uint32_t nsid, uint64_t lba, uint32_t count)
+{
+	cmd->cdw0.opc = rwcmd;
+	cmd->nsid = sys_cpu_to_le32(nsid);
+	cmd->cdw10 = sys_cpu_to_le32(lba & 0xffffffffu);
+	cmd->cdw11 = sys_cpu_to_le32(lba >> 32);
+	cmd->cdw12 = sys_cpu_to_le32(count-1);
+}
+
+static inline
+void nvme_namespace_write_cmd(struct nvme_command *cmd, uint32_t nsid,
+			      uint64_t lba, uint32_t count)
+{
+	nvme_namespace_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count);
+}
+
+static inline
+void nvme_namespace_read_cmd(struct nvme_command *cmd, uint32_t nsid,
+			     uint64_t lba, uint32_t count)
+{
+	nvme_namespace_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count);
+}
+
 static inline void nvme_completion_swapbytes(struct nvme_completion *cpl)
 {
 #if _BYTE_ORDER != _LITTLE_ENDIAN
diff --git a/drivers/disk/nvme/nvme_disk.c b/drivers/disk/nvme/nvme_disk.c
new file mode 100644
index 0000000..0ec9be5
--- /dev/null
+++ b/drivers/disk/nvme/nvme_disk.c
@@ -0,0 +1,182 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2022 Intel Corp.
+ */
+
+#include <zephyr/logging/log.h>
+LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL);
+
+#include <zephyr/kernel.h>
+#include <zephyr/sys/byteorder.h>
+
+#include "nvme.h"
+
+static int nvme_disk_init(struct disk_info *disk)
+{
+	return 0;
+}
+
+static int nvme_disk_status(struct disk_info *disk)
+{
+	return 0;
+}
+
+static int nvme_disk_read(struct disk_info *disk,
+			  uint8_t *data_buf,
+			  uint32_t start_sector,
+			  uint32_t num_sector)
+{
+	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
+						 struct nvme_namespace, name);
+	struct nvme_completion_poll_status status =
+		NVME_CPL_STATUS_POLL_INIT(status);
+	struct nvme_request *request;
+	uint32_t payload_size;
+
+	payload_size = num_sector * nvme_namespace_get_sector_size(ns);
+
+	request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
+					      nvme_completion_poll_cb, &status);
+	if (request == NULL) {
+		return -ENOMEM;
+	}
+
+	nvme_namespace_read_cmd(&request->cmd, ns->id,
+				start_sector, num_sector);
+
+	/* We use only the first ioq atm
+	 * ToDo: use smp cpu id and use it to select ioq
+	 */
+	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
+
+	nvme_completion_poll(&status);
+	if (nvme_cpl_status_is_error(&status)) {
+		LOG_WRN("Reading at sector %u (count %d) on disk %s failed",
+			start_sector, num_sector, ns->name);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int nvme_disk_write(struct disk_info *disk,
+			   const uint8_t *data_buf,
+			   uint32_t start_sector,
+			   uint32_t num_sector)
+{
+	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
+						 struct nvme_namespace, name);
+	struct nvme_completion_poll_status status =
+		NVME_CPL_STATUS_POLL_INIT(status);
+	struct nvme_request *request;
+	uint32_t payload_size;
+
+	payload_size = num_sector * nvme_namespace_get_sector_size(ns);
+
+	request = nvme_allocate_request_vaddr((void *)data_buf, payload_size,
+					      nvme_completion_poll_cb, &status);
+	if (request == NULL) {
+		return -ENOMEM;
+	}
+
+	nvme_namespace_write_cmd(&request->cmd, ns->id,
+				 start_sector, num_sector);
+
+	/* We use only the first ioq atm
+	 * ToDo: use smp cpu id and use it to select ioq
+	 */
+	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
+
+	nvme_completion_poll(&status);
+	if (nvme_cpl_status_is_error(&status)) {
+		LOG_WRN("Writing at sector %u (count %d) on disk %s failed",
+			start_sector, num_sector, ns->name);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int nvme_disk_flush(struct nvme_namespace *ns)
+{
+	struct nvme_completion_poll_status status =
+		NVME_CPL_STATUS_POLL_INIT(status);
+	struct nvme_request *request;
+
+	request = nvme_allocate_request_null(nvme_completion_poll_cb, &status);
+	if (request == NULL) {
+		return -ENOMEM;
+	}
+
+	nvme_namespace_flush_cmd(&request->cmd, ns->id);
+
+	/* We use only the first ioq
+	 * ToDo: use smp cpu id and use it to select ioq
+	 */
+	nvme_cmd_qpair_submit_request(ns->ctrlr->ioq, request);
+
+	nvme_completion_poll(&status);
+	if (nvme_cpl_status_is_error(&status)) {
+		LOG_ERR("Flushing disk %s failed", ns->name);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int nvme_disk_ioctl(struct disk_info *disk, uint8_t cmd, void *buff)
+{
+	struct nvme_namespace *ns = CONTAINER_OF(disk->name,
+						 struct nvme_namespace, name);
+
+	switch (cmd) {
+	case DISK_IOCTL_GET_SECTOR_COUNT:
+		if (!buff) {
+			return -EINVAL;
+		}
+
+		*(uint32_t *)buff = nvme_namespace_get_num_sectors(ns);
+
+		break;
+	case DISK_IOCTL_GET_SECTOR_SIZE:
+		if (!buff) {
+			return -EINVAL;
+		}
+
+		*(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
+
+		break;
+	case DISK_IOCTL_GET_ERASE_BLOCK_SZ:
+		if (!buff) {
+			return -EINVAL;
+		}
+
+		*(uint32_t *)buff = nvme_namespace_get_sector_size(ns);
+
+		break;
+	case DISK_IOCTL_CTRL_SYNC:
+		return nvme_disk_flush(ns);
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct disk_operations nvme_disk_ops = {
+	.init = nvme_disk_init,
+	.status = nvme_disk_status,
+	.read = nvme_disk_read,
+	.write = nvme_disk_write,
+	.ioctl = nvme_disk_ioctl,
+};
+
+int nvme_namespace_disk_setup(struct nvme_namespace *ns,
+			      struct disk_info *disk)
+{
+	disk->name = ns->name;
+	disk->ops = &nvme_disk_ops;
+	disk->dev = ns->ctrlr->dev;
+
+	return disk_access_register(disk);
+}
diff --git a/drivers/disk/nvme/nvme_namespace.c b/drivers/disk/nvme/nvme_namespace.c
index 4ce9c97..61df4c1 100644
--- a/drivers/disk/nvme/nvme_namespace.c
+++ b/drivers/disk/nvme/nvme_namespace.c
@@ -9,6 +9,8 @@
 #include <zephyr/kernel.h>
 #include <zephyr/sys/byteorder.h>
 
+#include <stdio.h>
+
 #include "nvme.h"
 
 uint32_t nvme_namespace_get_sector_size(struct nvme_namespace *ns)
@@ -122,5 +124,12 @@
 		ns->flags |= NVME_NS_FLUSH_SUPPORTED;
 	}
 
+	snprintf(ns->name, NVME_NAMESPACE_NAME_MAX_LENGTH, "nvme%dn%d",
+		 ctrlr->id, ns->id-1);
+
+	if (nvme_namespace_disk_setup(ns, &ns->disk) != 0) {
+		LOG_ERR("Could not register no disk subsystem");
+	}
+
 	return 0;
 }
diff --git a/drivers/disk/nvme/nvme_namespace.h b/drivers/disk/nvme/nvme_namespace.h
index 38cd603..bd193f3 100644
--- a/drivers/disk/nvme/nvme_namespace.h
+++ b/drivers/disk/nvme/nvme_namespace.h
@@ -7,6 +7,8 @@
 #ifndef ZEPHYR_DRIVERS_DISK_NVME_NVME_NAMESPACE_H_
 #define ZEPHYR_DRIVERS_DISK_NVME_NVME_NAMESPACE_H_
 
+#include <zephyr/drivers/disk.h>
+
 struct nvme_namespace_data {
 	/** namespace size */
 	uint64_t		nsze;
@@ -156,6 +158,7 @@
 struct nvme_namespace {
 	struct nvme_controller *ctrlr;
 	struct nvme_namespace_data data;
+	struct disk_info disk;
 	uint32_t id;
 	uint32_t flags;
 	uint32_t boundary;
@@ -188,4 +191,7 @@
 			     uint32_t id,
 			     struct nvme_controller *ctrlr);
 
+int nvme_namespace_disk_setup(struct nvme_namespace *ns,
+			      struct disk_info *disk);
+
 #endif /* ZEPHYR_DRIVERS_DISK_NVME_NVME_NAMESPACE_H_ */