dma/cavs_hda: DMA driver for HDA on cAVS
Adds an initial driver for HDA streams on cAVS. A common code base is
provided for all HDA streams while the drivers are identified
differently as they have small behavior differences.
Uses dma_status to describe the positions for read/write. Uses dma_reload
to inform when to move the read/write positions. This closely follows
how HDA is being used in SoF
Simple test case is provided for both drivers.
Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
diff --git a/drivers/dma/CMakeLists.txt b/drivers/dma/CMakeLists.txt
index 9a47043..e0efc5c 100644
--- a/drivers/dma/CMakeLists.txt
+++ b/drivers/dma/CMakeLists.txt
@@ -17,3 +17,4 @@
zephyr_library_sources_ifdef(CONFIG_DMA_IPROC_PAX dma_iproc_pax_v1.c)
zephyr_library_sources_ifdef(CONFIG_DMA_IPROC_PAX_V2 dma_iproc_pax_v2.c)
zephyr_library_sources_ifdef(CONFIG_DMA_CAVS_GPDMA dma_cavs_gpdma.c dma_dw_common.c)
+zephyr_library_sources_ifdef(CONFIG_DMA_CAVS_HDA dma_cavs_hda.c dma_cavs_hda_host_in.c dma_cavs_hda_host_out.c)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 316f1600..b4fdb5e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -46,4 +46,6 @@
source "drivers/dma/Kconfig.cavs_gpdma"
+source "drivers/dma/Kconfig.cavs_hda"
+
endif # DMA
diff --git a/drivers/dma/Kconfig.cavs_hda b/drivers/dma/Kconfig.cavs_hda
new file mode 100644
index 0000000..8b12242
--- /dev/null
+++ b/drivers/dma/Kconfig.cavs_hda
@@ -0,0 +1,9 @@
+# cAVS HDA configuration options
+
+# Copyright (c) 2022 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+config DMA_CAVS_HDA
+ bool "Intel cAVS HDA DMA driver"
+ help
+ Intel cAVS HDA DMA driver.
diff --git a/drivers/dma/dma_cavs_hda.c b/drivers/dma/dma_cavs_hda.c
new file mode 100644
index 0000000..061ddd2
--- /dev/null
+++ b/drivers/dma/dma_cavs_hda.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2022 Intel Corporation.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include <drivers/dma.h>
+#include <cavs_hda.h>
+#include "dma_cavs_hda.h"
+
+#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
+#include <logging/log.h>
+LOG_MODULE_REGISTER(dma_cavs_hda_dma);
+
+/**
+ * @brief Intel CAVS HDA DMA (Stream) driver
+ *
+ * HDA is effectively, from the DSP, a ringbuffer (fifo) where the read
+ * and write positions are maintained by the hardware and the software may
+ * commit read/writes by writing to another register (DGFPBI) the length of
+ * the read or write.
+ *
+ * It's important that the software knows the position in the ringbuffer to read
+ * or write from. It's also important that the buffer be placed in the correct
+ * memory region and aligned to 128 bytes. Lastly it's important the host and
+ * dsp coordinate the order in which operations takes place. Doing all that
+ * HDA streams are a fantastic bit of hardware and do their job well.
+ *
+ * There are 4 types of streams, with a set of each available to be used to
+ * communicate to or from the Host or Link. Each stream set is uni directional.
+ */
+
+
+int cavs_hda_dma_host_in_config(const struct device *dev,
+ uint32_t channel,
+ struct dma_config *dma_cfg)
+{
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+ struct dma_block_config *blk_cfg;
+ uint8_t *buf;
+
+ __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
+ __ASSERT(dma_cfg->block_count == 1,
+ "HDA does not support scatter gather or chained "
+ "block transfers.");
+ __ASSERT(dma_cfg->channel_direction == cfg->direction,
+ "Unexpected channel direction, HDA host in supports "
+ "MEMORY_TO_HOST");
+
+ blk_cfg = dma_cfg->head_block;
+ buf = (uint8_t *)(uintptr_t)(blk_cfg->source_address);
+ return cavs_hda_set_buffer(cfg->base, channel, buf,
+ blk_cfg->block_size);
+}
+
+
+int cavs_hda_dma_host_out_config(const struct device *dev,
+ uint32_t channel,
+ struct dma_config *dma_cfg)
+{
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+ uint8_t *buf;
+ struct dma_block_config *blk_cfg;
+
+ __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
+ __ASSERT(dma_cfg->block_count == 1,
+ "HDA does not support scatter gather or chained "
+ "block transfers.");
+ __ASSERT(dma_cfg->channel_direction == cfg->direction,
+ "Unexpected channel direction, HDA host out supports "
+ "HOST_TO_MEMORY");
+
+ blk_cfg = dma_cfg->head_block;
+ buf = (uint8_t *)(uintptr_t)(blk_cfg->dest_address);
+
+ return cavs_hda_set_buffer(cfg->base, channel, buf,
+ blk_cfg->block_size);
+}
+
+int cavs_hda_dma_host_reload(const struct device *dev, uint32_t channel,
+ uint32_t src, uint32_t dst, size_t size)
+{
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+
+ __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
+
+ cavs_hda_commit(cfg->base, channel, size);
+
+ return 0;
+}
+
+int cavs_hda_dma_status(const struct device *dev, uint32_t channel,
+ struct dma_status *stat)
+{
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+
+ __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
+
+ stat->dir = cfg->direction;
+ stat->busy = *DGCS(cfg->base, channel) & DGCS_GBUSY;
+ stat->write_position = *DGBWP(cfg->base, channel);
+ stat->read_position = *DGBRP(cfg->base, channel);
+
+ return 0;
+}
+
+int cavs_hda_dma_start(const struct device *dev, uint32_t channel)
+{
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+
+ __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
+
+ cavs_hda_enable(cfg->base, channel);
+
+ return 0;
+}
+
+int cavs_hda_dma_stop(const struct device *dev, uint32_t channel)
+{
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+
+ __ASSERT(channel < cfg->dma_channels, "Channel does not exist");
+
+ cavs_hda_disable(cfg->base, channel);
+
+ return 0;
+}
+
+int cavs_hda_dma_init(const struct device *dev)
+{
+ struct cavs_hda_dma_data *data = dev->data;
+ const struct cavs_hda_dma_cfg *const cfg = dev->config;
+
+ for (uint32_t i = 0; i < cfg->dma_channels; i++) {
+ cavs_hda_init(cfg->base, i);
+ }
+
+ data->ctx.dma_channels = cfg->dma_channels;
+ data->ctx.atomic = data->channels_atomic;
+ data->ctx.magic = DMA_MAGIC;
+
+ LOG_INF("Intel cAVS HDA %s initialized", dev->name);
+
+ return 0;
+}
diff --git a/drivers/dma/dma_cavs_hda.h b/drivers/dma/dma_cavs_hda.h
new file mode 100644
index 0000000..2125107
--- /dev/null
+++ b/drivers/dma/dma_cavs_hda.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Intel Corporation.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef ZEPHYR_DRIVERS_DMA_DMA_CAVS_HDA_COMMON_H_
+#define ZEPHYR_DRIVERS_DMA_DMA_CAVS_HDA_COMMON_H_
+
+#define CAVS_HDA_MAX_CHANNELS 32
+
+#include <drivers/dma.h>
+
+struct cavs_hda_dma_data {
+ struct dma_context ctx;
+
+ ATOMIC_DEFINE(channels_atomic, CAVS_HDA_MAX_CHANNELS);
+};
+
+struct cavs_hda_dma_cfg {
+ uint32_t base;
+ uint32_t dma_channels;
+ enum dma_channel_direction direction;
+};
+
+int cavs_hda_dma_host_in_config(const struct device *dev,
+ uint32_t channel,
+ struct dma_config *dma_cfg);
+
+int cavs_hda_dma_host_out_config(const struct device *dev,
+ uint32_t channel,
+ struct dma_config *dma_cfg);
+
+int cavs_hda_dma_host_reload(const struct device *dev, uint32_t channel,
+ uint32_t src, uint32_t dst, size_t size);
+
+int cavs_hda_dma_status(const struct device *dev, uint32_t channel,
+ struct dma_status *stat);
+
+int cavs_hda_dma_start(const struct device *dev, uint32_t channel);
+
+int cavs_hda_dma_stop(const struct device *dev, uint32_t channel);
+
+int cavs_hda_dma_init(const struct device *dev);
+
+
+#endif /* ZEPHYR_DRIVERS_DMA_DMA_CAVS_HDA_COMMON_H_ */
diff --git a/drivers/dma/dma_cavs_hda_host_in.c b/drivers/dma/dma_cavs_hda_host_in.c
new file mode 100644
index 0000000..12071e3
--- /dev/null
+++ b/drivers/dma/dma_cavs_hda_host_in.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2022 Intel Corporation.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#define DT_DRV_COMPAT intel_cavs_hda_host_in
+
+#include <drivers/dma.h>
+#include <cavs_hda.h>
+#include "dma_cavs_hda.h"
+
+#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
+#include <logging/log.h>
+LOG_MODULE_REGISTER(dma_cavs_hda_dma_host_in);
+
+static const struct dma_driver_api cavs_hda_dma_host_in_api = {
+ .config = cavs_hda_dma_host_in_config,
+ .reload = cavs_hda_dma_host_reload,
+ .start = cavs_hda_dma_start,
+ .stop = cavs_hda_dma_stop,
+ .get_status = cavs_hda_dma_status,
+};
+
+#define CAVS_HDA_DMA_HOST_IN_INIT(inst) \
+ static const struct cavs_hda_dma_cfg cavs_hda_dma##inst##_config = { \
+ .base = DT_INST_REG_ADDR(inst), \
+ .dma_channels = DT_INST_PROP(inst, dma_channels), \
+ .direction = MEMORY_TO_HOST \
+ }; \
+ \
+ static struct cavs_hda_dma_data cavs_hda_dma##inst##_data = {}; \
+ \
+ DEVICE_DT_INST_DEFINE(inst, &cavs_hda_dma_init, NULL, &cavs_hda_dma##inst##_data, \
+ &cavs_hda_dma##inst##_config, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, \
+ &cavs_hda_dma_host_in_api);
+
+DT_INST_FOREACH_STATUS_OKAY(CAVS_HDA_DMA_HOST_IN_INIT)
diff --git a/drivers/dma/dma_cavs_hda_host_out.c b/drivers/dma/dma_cavs_hda_host_out.c
new file mode 100644
index 0000000..d31310e
--- /dev/null
+++ b/drivers/dma/dma_cavs_hda_host_out.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2022 Intel Corporation.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#define DT_DRV_COMPAT intel_cavs_hda_host_out
+
+#include <drivers/dma.h>
+#include <cavs_hda.h>
+#include "dma_cavs_hda.h"
+
+#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
+#include <logging/log.h>
+LOG_MODULE_REGISTER(dma_cavs_hda_dma_host_out);
+
+static const struct dma_driver_api cavs_hda_dma_host_out_api = {
+ .config = cavs_hda_dma_host_out_config,
+ .reload = cavs_hda_dma_host_reload,
+ .start = cavs_hda_dma_start,
+ .stop = cavs_hda_dma_stop,
+ .get_status = cavs_hda_dma_status,
+};
+
+#define CAVS_HDA_DMA_HOST_OUT_INIT(inst) \
+ static const struct cavs_hda_dma_cfg cavs_hda_dma##inst##_config = { \
+ .base = DT_INST_REG_ADDR(inst), \
+ .dma_channels = DT_INST_PROP(inst, dma_channels), \
+ .direction = HOST_TO_MEMORY \
+ }; \
+ \
+ static struct cavs_hda_dma_data cavs_hda_dma##inst##_data = {}; \
+ \
+ DEVICE_DT_INST_DEFINE(inst, &cavs_hda_dma_init, NULL, &cavs_hda_dma##inst##_data, \
+ &cavs_hda_dma##inst##_config, POST_KERNEL, CONFIG_DMA_INIT_PRIORITY, \
+ &cavs_hda_dma_host_out_api);
+
+DT_INST_FOREACH_STATUS_OKAY(CAVS_HDA_DMA_HOST_OUT_INIT)
diff --git a/dts/bindings/dma/intel,cavs-hda-host-in.yaml b/dts/bindings/dma/intel,cavs-hda-host-in.yaml
new file mode 100644
index 0000000..da50fdb
--- /dev/null
+++ b/dts/bindings/dma/intel,cavs-hda-host-in.yaml
@@ -0,0 +1,8 @@
+# Copyright (c) 2022 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+description: Intel cAVS HDA Host In controller
+
+compatible: "intel,cavs-hda-host-in"
+
+include: intel,cavs-hda.yaml
diff --git a/dts/bindings/dma/intel,cavs-hda-host-out.yaml b/dts/bindings/dma/intel,cavs-hda-host-out.yaml
new file mode 100644
index 0000000..cb92f0f
--- /dev/null
+++ b/dts/bindings/dma/intel,cavs-hda-host-out.yaml
@@ -0,0 +1,8 @@
+# Copyright (c) 2022 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+description: Intel cAVS HDA Host Out controller
+
+compatible: "intel,cavs-hda-host-out"
+
+include: intel,cavs-hda.yaml
diff --git a/dts/bindings/dma/intel,cavs-hda.yaml b/dts/bindings/dma/intel,cavs-hda.yaml
new file mode 100644
index 0000000..5218373
--- /dev/null
+++ b/dts/bindings/dma/intel,cavs-hda.yaml
@@ -0,0 +1,16 @@
+# Copyright (c) 2022 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# Common fields for HDA DMA controllers
+
+include: dma-controller.yaml
+
+properties:
+ reg:
+ required: true
+
+ dma-channels:
+ required: true
+
+ "#dma-cells":
+ const: 1
diff --git a/dts/xtensa/intel/intel_cavs.dtsi b/dts/xtensa/intel/intel_cavs.dtsi
index b9b89fb..4983578 100644
--- a/dts/xtensa/intel/intel_cavs.dtsi
+++ b/dts/xtensa/intel/intel_cavs.dtsi
@@ -31,5 +31,25 @@
status = "okay";
};
+
+ hda_host_out: dma@0x72800 {
+ compatible = "intel,cavs-hda-host-out";
+ #dma-cells = <1>;
+ reg = <0x00072800 0x40>;
+ dma-channels = <7>;
+ label = "HDA_HOST_OUT";
+
+ status = "okay";
+ };
+
+ hda_host_in: dma@0x72c00 {
+ compatible = "intel,cavs-hda-host-in";
+ #dma-cells = <1>;
+ reg = <0x00072c00 0x40>;
+ dma-channels = <7>;
+ label = "HDA_HOST_IN";
+
+ status = "okay";
+ };
};
};
diff --git a/soc/xtensa/intel_adsp/Kconfig.defconfig b/soc/xtensa/intel_adsp/Kconfig.defconfig
index 1ced4c6..0b118dc 100644
--- a/soc/xtensa/intel_adsp/Kconfig.defconfig
+++ b/soc/xtensa/intel_adsp/Kconfig.defconfig
@@ -19,6 +19,10 @@
default y
depends on DMA
+config DMA_CAVS_HDA
+ default y
+ depends on DMA
+
config I2S_CAVS
default y
depends on I2S
diff --git a/tests/boards/intel_adsp/hda/CMakeLists.txt b/tests/boards/intel_adsp/hda/CMakeLists.txt
index 732ab1b..004b82a 100644
--- a/tests/boards/intel_adsp/hda/CMakeLists.txt
+++ b/tests/boards/intel_adsp/hda/CMakeLists.txt
@@ -4,4 +4,4 @@
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(intel_adsp)
-target_sources(app PRIVATE src/main.c src/smoke.c)
+target_sources(app PRIVATE src/main.c src/smoke.c src/dma.c)
diff --git a/tests/boards/intel_adsp/hda/src/dma.c b/tests/boards/intel_adsp/hda/src/dma.c
new file mode 100644
index 0000000..829b2d2
--- /dev/null
+++ b/tests/boards/intel_adsp/hda/src/dma.c
@@ -0,0 +1,242 @@
+/* Copyright (c) 2022 Intel Corporation
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include "arch/xtensa/cache.h"
+#include <kernel.h>
+#include <ztest.h>
+#include <cavs_ipc.h>
+#include <cavs_hda.h>
+#include <drivers/dma.h>
+#include "tests.h"
+
+#define IPC_TIMEOUT K_MSEC(500)
+#define DMA_BUF_SIZE 256
+#define TRANSFER_SIZE 256
+#define TRANSFER_COUNT 8
+
+static __aligned(128) uint8_t dma_buf[DMA_BUF_SIZE];
+
+
+static volatile int msg_cnt;
+static volatile int msg_res;
+
+static bool ipc_message(const struct device *dev, void *arg,
+ uint32_t data, uint32_t ext_data)
+{
+ printk("HDA message received, data %u, ext_data %u\n", data, ext_data);
+ msg_res = data;
+ msg_cnt++;
+ return true;
+}
+
+/*
+ * Tests host input streams with the DMA API
+ *
+ * Note that the order of operations in this test are important and things potentially will not
+ * work in horrible and unexpected ways if not done as they are here.
+ */
+void test_hda_host_in_dma(void)
+{
+ const struct device *dma;
+ int res, channel;
+ uint32_t last_msg_cnt;
+
+ printk("smoke testing hda with fifo buffer at address %p, size %d\n",
+ dma_buf, DMA_BUF_SIZE);
+
+ cavs_ipc_set_message_handler(CAVS_HOST_DEV, ipc_message, NULL);
+
+ printk("Using buffer of size %d at addr %p\n", DMA_BUF_SIZE, dma_buf);
+
+ /* setup a ramp in the buffer */
+ for (uint32_t i = 0; i < DMA_BUF_SIZE; i++) {
+ dma_buf[i] = i & 0xff;
+ }
+
+#if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
+ zassert_true(arch_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
+#else
+ /* The buffer is in the cached address range and must be flushed */
+ zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
+ z_xtensa_cache_flush(dma_buf, DMA_BUF_SIZE);
+#endif
+
+ dma = device_get_binding("HDA_HOST_IN");
+ zassert_not_null(dma, "Expected a valid DMA device pointer");
+
+ channel = dma_request_channel(dma, NULL);
+ zassert_true(channel >= 0, "Expected a valid DMA channel");
+
+ printk("dma channel: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_RESET, channel, IPC_TIMEOUT);
+
+ printk("host reset: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_CONFIG,
+ channel | (DMA_BUF_SIZE << 8), IPC_TIMEOUT);
+ printk("host config: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+
+
+ struct dma_block_config block_cfg = {
+ .block_size = DMA_BUF_SIZE,
+ .source_address = (uint32_t)(&dma_buf[0]),
+ };
+
+ struct dma_config dma_cfg = {
+ .block_count = 1,
+ .channel_direction = MEMORY_TO_HOST,
+ .head_block = &block_cfg,
+ };
+
+ res = dma_config(dma, channel, &dma_cfg);
+ printk("dsp dma config: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+ zassert_ok(res, "Expected dma config to succeed");
+
+ res = dma_start(dma, channel);
+ printk("dsp dma start: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+ zassert_ok(res, "Expected dma start to succeed");
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_START, channel, IPC_TIMEOUT);
+
+ printk("host start: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+
+ for (uint32_t i = 0; i < TRANSFER_COUNT; i++) {
+ res = dma_reload(dma, channel, 0, 0, DMA_BUF_SIZE);
+ zassert_ok(res, "Expected dma reload to succeed");
+ printk("dsp dma reload: "); cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+
+ struct dma_status status;
+ int j;
+ /* up to 10mS wait time */
+ for (j = 0; j < 100; j++) {
+ res = dma_get_status(dma, channel, &status);
+ zassert_ok(res, "Expected dma status to succeed");
+ if (status.read_position == status.write_position) {
+ break;
+ }
+ k_busy_wait(100);
+ }
+ printk("dsp read write equal after %d uS: ", j*100);
+ cavs_hda_dbg("host_in", HDA_HOST_IN_BASE, channel);
+
+ last_msg_cnt = msg_cnt;
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_VALIDATE, channel,
+ IPC_TIMEOUT);
+
+ WAIT_FOR(msg_cnt > last_msg_cnt);
+ zassert_true(msg_res == 1,
+ "Expected data validation to be true from Host");
+ }
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_RESET,
+ channel, IPC_TIMEOUT);
+
+ res = dma_stop(dma, channel);
+ zassert_ok(res, "Expected dma stop to succeed");
+}
+
+/*
+ * Tests host output streams with the DMA API
+ */
+void test_hda_host_out_dma(void)
+{
+ const struct device *dma;
+ int res, channel;
+ bool is_ramp;
+
+
+ printk("smoke testing hda with fifo buffer at address %p, size %d\n",
+ dma_buf, DMA_BUF_SIZE);
+
+ cavs_ipc_set_message_handler(CAVS_HOST_DEV, ipc_message, NULL);
+
+ printk("Using buffer of size %d at addr %p\n", DMA_BUF_SIZE, dma_buf);
+
+ dma = device_get_binding("HDA_HOST_OUT");
+ zassert_not_null(dma, "Expected a valid DMA device pointer");
+
+ channel = dma_request_channel(dma, NULL);
+ zassert_true(channel >= 0, "Expected a valid DMA channel");
+
+ printk("dma channel: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_RESET,
+ (channel + 7), IPC_TIMEOUT);
+
+ printk("host reset: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_CONFIG,
+ (channel + 7) | (DMA_BUF_SIZE << 8), IPC_TIMEOUT);
+
+ printk("host config: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+ struct dma_block_config block_cfg = {
+ .block_size = DMA_BUF_SIZE,
+ .source_address = (uint32_t)(&dma_buf[0]),
+ };
+
+ struct dma_config dma_cfg = {
+ .block_count = 1,
+ .channel_direction = HOST_TO_MEMORY,
+ .head_block = &block_cfg,
+ };
+
+ res = dma_config(dma, channel, &dma_cfg);
+ printk("dsp dma config: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+ zassert_ok(res, "Expected dma config to succeed");
+
+ res = dma_start(dma, channel);
+ printk("dsp dma start: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+ zassert_ok(res, "Expected dma start to succeed");
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_START, (channel + 7), IPC_TIMEOUT);
+
+ printk("host start: ");
+ cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+ for (uint32_t i = 0; i < TRANSFER_COUNT; i++) {
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_SEND,
+ (channel + 7) | (DMA_BUF_SIZE << 8), IPC_TIMEOUT);
+
+ printk("host send: ");
+ cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+ /* TODO add a dma_poll() style call for xfer ready/complete maybe? */
+ WAIT_FOR(cavs_hda_buf_full(HDA_HOST_OUT_BASE, channel));
+ printk("dsp wait for full: ");
+ cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+#if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
+ zassert_true(arch_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
+#else
+ /* The buffer is in the cached address range and must be invalidated
+ * prior to reading.
+ */
+ zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
+ z_xtensa_cache_inv(dma_buf, DMA_BUF_SIZE);
+#endif
+
+ is_ramp = true;
+ for (int j = 0; j < DMA_BUF_SIZE; j++) {
+ printk("dma_buf[%d] = %d\n", j, dma_buf[j]);
+ if (dma_buf[j] != j) {
+ is_ramp = false;
+ }
+ }
+ zassert_true(is_ramp, "Expected data to be a ramp");
+
+ res = dma_reload(dma, channel, 0, 0, DMA_BUF_SIZE);
+ zassert_ok(res, "Expected dma reload to succeed");
+ printk("dsp dma reload: "); cavs_hda_dbg("host_out", HDA_HOST_IN_BASE, channel);
+ }
+
+ hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_RESET, (channel + 7), IPC_TIMEOUT);
+
+ printk("host reset: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+
+ res = dma_stop(dma, channel);
+ zassert_ok(res, "Expected dma stop to succeed");
+ printk("dsp dma stop: "); cavs_hda_dbg("host_out", HDA_HOST_OUT_BASE, channel);
+}
diff --git a/tests/boards/intel_adsp/hda/src/main.c b/tests/boards/intel_adsp/hda/src/main.c
index cea6fd8..0448149 100644
--- a/tests/boards/intel_adsp/hda/src/main.c
+++ b/tests/boards/intel_adsp/hda/src/main.c
@@ -10,7 +10,8 @@
{
ztest_test_suite(intel_adsp_hda,
ztest_unit_test(test_hda_host_in_smoke),
- ztest_unit_test(test_hda_host_out_smoke)
+ ztest_unit_test(test_hda_host_out_smoke),
+ ztest_unit_test(test_hda_host_in_dma)
);
ztest_run_test_suite(intel_adsp_hda);
diff --git a/tests/boards/intel_adsp/hda/src/tests.h b/tests/boards/intel_adsp/hda/src/tests.h
index 0b091e9..dd7dae3 100644
--- a/tests/boards/intel_adsp/hda/src/tests.h
+++ b/tests/boards/intel_adsp/hda/src/tests.h
@@ -13,6 +13,7 @@
void test_hda_host_in_smoke(void);
void test_hda_host_out_smoke(void);
+void test_hda_host_in_dma(void);
static inline void hda_ipc_msg(const struct device *dev, uint32_t data,
uint32_t ext, k_timeout_t timeout)