diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index de914a91362903ff7e9dfebf9f80c5142edbe28c..2c8c8df07c7745de9f04dac6c00edd441e758086 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1755,8 +1755,10 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms)
static void create_ub(VirtMachineState *vms)
{
DeviceState *ubc;
+ DeviceState *ummu;
MemoryRegion *mmio_reg;
MemoryRegion *mmio_alias;
+ BusControllerState *ubc_state;
if (ub_cfg_addr_map_table_init() < 0) {
qemu_log("failed to init ub cfg addr map table\n");
@@ -1795,6 +1797,19 @@ static void create_ub(VirtMachineState *vms)
vms->memmap[VIRT_UB_IDEV_ERS].size);
memory_region_add_subregion(get_system_memory(),
vms->memmap[VIRT_UB_IDEV_ERS].base, mmio_alias);
+ if (vms->ummu) {
+ ummu = qdev_new(TYPE_UB_UMMU);
+ ubc_state = BUS_CONTROLLER(ubc);
+ object_property_set_link(OBJECT(ummu), "primary-bus", OBJECT(ubc_state->bus), &error_abort);
+ /* default set ummu nestd */
+ object_property_set_bool(OBJECT(ummu), "nested", true, &error_abort);
+ qdev_prop_set_uint64(ummu, "ub-ummu-reg-size", UMMU_REG_SIZE);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(ummu), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(ummu), 0,
+ vms->memmap[VIRT_UBC_BASE_REG].base + UMMU_REG_OFFSET);
+ } else {
+ qemu_log("ummu disabled.\n");
+ }
}
#endif // CONFIG_UB
static void create_pcie(VirtMachineState *vms)
diff --git a/hw/ub/meson.build b/hw/ub/meson.build
index d629174ef8bf508712ce317920f7a8f5ff6173ba..400fa553d810a5e1d447183310a2551c6b6e6e1e 100644
--- a/hw/ub/meson.build
+++ b/hw/ub/meson.build
@@ -2,6 +2,7 @@ ub_ss = ss.source_set()
ub_ss.add(files(
'ub.c',
'ub_ubc.c',
+ 'ub_ummu.c',
'ub_config.c',
'ub_acpi.c',
'ub_enum.c',
diff --git a/hw/ub/trace-events b/hw/ub/trace-events
index d24c754de1a46ce36db149941349a43e76e476f0..e53af1bd757b58b5c423d1a77f0c1f93eea1658a 100644
--- a/hw/ub/trace-events
+++ b/hw/ub/trace-events
@@ -1,3 +1,20 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# ub_ummu.c
+ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process: idx(%u), prod(%u), cons(%u)"
+mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s"
+mcmdq_cmd_sync_handler(uint32_t mcmdq_idx, uint64_t usi_addr, uint32_t usi_data) "CMD_SYNC: mcmdq_idx(%u) usi_addr(0x%lx) usi_data(0x%x)"
+mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)"
+mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)"
+mcmdq_cmd_create_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid, uint32_t tecte_tag) "CMD_CREATE_KVTBL: mcmdq_idx(%u) dest_eid(%u) tecte_tag(%u)"
+mcmdq_cmd_delete_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid) "CMD_DELETE_KVTBL: mcmdq_idx(%u) dest_eid(%u)"
+mcmdq_cmd_null(uint32_t mcmdq_idx, uint64_t addr, void *hva, uint64_t size, uint64_t rb_size) "CMD_NULL: mcmdq_idx(%u) addr(0x%lx) hva(%p) size(0x%lx) rb_size(0x%lx)"
+ummu_mcmdq_base_reg_writell(uint8_t idx, uint64_t base, uint8_t log2size) "idx(%u) base(0x%lx) log2size(0x%x)"
+ummu_eventq_req_writell(uint64_t base, uint8_t log2size) "base(0x%lx) log2size(0x%x)"
+ummu_eventq_usi_reg_writell(uint64_t addr) "set eventq usi addr 0x%lx"
+ummu_glberr_usi_reg_writell(uint64_t addr) "set glb err usi addr 0x%lx"
+ummu_mapt_ctx_base_reg_writell(uint64_t addr) "config mapt ctx base 0x%lx"
+
# ub.c
ub_update_mappings(int i, uint64_t region_size, uint64_t old_addr, uint64_t new_addr) "region[%d], size: 0x%lx, old_addr: 0x%lx, new_addr: 0x%lx"
ub_update_mappings_add(uint64_t new_addr) "commit region addr to 0x%lx"
diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c
index 6d2441f3800ba513860a3b93310a8d39923f737d..0d5a31a22ab2f0ab42ceac71a05ea7119f921418 100644
--- a/hw/ub/ub_ubc.c
+++ b/hw/ub/ub_ubc.c
@@ -26,6 +26,7 @@
#include "hw/ub/ub.h"
#include "hw/ub/ub_bus.h"
#include "hw/ub/ub_ubc.h"
+#include "hw/ub/ub_ummu.h"
#include "hw/ub/ub_config.h"
#include "hw/ub/hisi/ubc.h"
#include "hw/ub/hisi/ub_mem.h"
@@ -433,6 +434,9 @@ static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp)
dev->dev_type = UB_TYPE_IBUS_CONTROLLER;
ub_bus_controller_dev_config_space_init(dev);
+ if (0 > ummu_associating_with_ubc(ubc)) {
+ qemu_log("failed to associating ubc with ummu. %s\n", dev->name);
+ }
}
static Property ub_bus_controller_dev_properties[] = {
diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c
new file mode 100644
index 0000000000000000000000000000000000000000..033fcb9a34e461117258808dfdf182d0e2243d55
--- /dev/null
+++ b/hw/ub/ub_ummu.c
@@ -0,0 +1,1201 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/arm/virt.h"
+#include "hw/qdev-properties.h"
+#include "hw/ub/ub.h"
+#include "hw/ub/hisi/ummu.h"
+#include "hw/ub/ub_bus.h"
+#include "hw/ub/ub_ubc.h"
+#include "hw/ub/ub_ummu.h"
+#include "hw/ub/ub_config.h"
+#include "hw/ub/hisi/ubc.h"
+#include "migration/vmstate.h"
+#include "ub_ummu_internal.h"
+#include "sysemu/dma.h"
+#include "hw/arm/mmu-translate-common.h"
+#include "hw/ub/ub_ubc.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+static const char *const mcmdq_cmd_strings[MCMDQ_CMD_MAX] = {
+ [CMD_SYNC] = "CMD_SYNC",
+ [CMD_STALL_RESUME] = "CMD_STALL_RESUME",
+ [CMD_PREFET_CFG] = "CMD_PREFET_CFG",
+ [CMD_CFGI_TECT] = "CMD_CFGI_TECT",
+ [CMD_CFGI_TECT_RANGE] = "CMD_CFGI_TECT_RANGE",
+ [CMD_CFGI_TCT] = "CMD_CFGI_TCT",
+ [CMD_CFGI_TCT_ALL] = "CMD_CFGI_TCT_ALL",
+ [CMD_CFGI_VMS_PIDM] = "CMD_CFGI_VMS_PIDM",
+ [CMD_PLBI_OS_EID] = "CMD_PLBI_OS_EID",
+ [CMD_PLBI_OS_EIDTID] = "CMD_PLBI_OS_EIDTID",
+ [CMD_PLBI_OS_VA] = "CMD_PLBI_OS_VA",
+ [CMD_TLBI_OS_ALL] = "CMD_TLBI_OS_ALL",
+ [CMD_TLBI_OS_TID] = "CMD_TLBI_OS_TID",
+ [CMD_TLBI_OS_VA] = "CMD_TLBI_OS_VA",
+ [CMD_TLBI_OS_VAA] = "CMD_TLBI_OS_VAA",
+ [CMD_TLBI_HYP_ALL] = "CMD_TLBI_HYP_ALL",
+ [CMD_TLBI_HYP_TID] = "CMD_TLBI_HYP_TID",
+ [CMD_TLBI_HYP_VA] = "CMD_TLBI_HYP_VA",
+ [CMD_TLBI_HYP_VAA] = "CMD_TLBI_HYP_VAA",
+ [CMD_TLBI_S1S2_VMALL] = "CMD_TLBI_S1S2_VMALL",
+ [CMD_TLBI_S2_IPA] = "CMD_TLBI_S2_IPA",
+ [CMD_TLBI_NS_OS_ALL] = "CMD_TLBI_NS_OS_ALL",
+ [CMD_RESUME] = "CMD_RESUME",
+ [CMD_CREATE_KVTBL] = "CMD_CREATE_KVTBL",
+ [CMD_DELETE_KVTBL] = "CMD_DELETE_KVTBL",
+ [CMD_TLBI_OS_ALL_U] = "CMD_TLBI_OS_ALL_U",
+ [CMD_TLBI_OS_ASID_U] = "CMD_TLBI_OS_ASID_U",
+ [CMD_TLBI_OS_VA_U] = "CMD_TLBI_OS_VA_U",
+ [CMD_TLBI_OS_VAA_U] = "CMD_TLBI_OS_VAA_U",
+ [CMD_TLBI_HYP_ASID_U] = "CMD_TLBI_HYP_ASID_U",
+ [CMD_TLBI_HYP_VA_U] = "CMD_TLBI_HYP_VA_U",
+ [CMD_TLBI_S1S2_VMALL_U] = "CMD_TLBI_S1S2_VMALL_U",
+ [CMD_TLBI_S2_IPA_U] = "CMD_TLBI_S2_IPA_U",
+};
+
+QLIST_HEAD(, UMMUState) ub_umms;
+UMMUState *ummu_find_by_bus_num(uint8_t bus_num)
+{
+ UMMUState *ummu;
+ QLIST_FOREACH(ummu, &ub_umms, node) {
+ if (ummu->bus_num == bus_num) {
+ return ummu;
+ }
+ }
+ return NULL;
+}
+
+static void ummu_cr0_process_task(UMMUState *u)
+{
+ u->ctrl0_ack = u->ctrl[0];
+}
+
+static uint64_t ummu_mcmdq_reg_readl(UMMUState *u, hwaddr offset)
+{
+ uint8_t mcmdq_idx;
+ uint64_t val = UINT64_MAX;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return val;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case MCMDQ_PROD_BASE_ADDR:
+ val = u->mcmdqs[mcmdq_idx].queue.prod;
+ break;
+ case MCMDQ_CONS_BASE_ADDR:
+ val = u->mcmdqs[mcmdq_idx].queue.cons;
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit mcmdq reg read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static int ummu_mapt_get_cmdq_base(UMMUState *u, dma_addr_t base_addr, uint32_t qid, MAPTCmdqBase *base)
+{
+ int ret, i;
+ dma_addr_t addr = base_addr + qid * MAPT_CMDQ_CTXT_BASE_BYTES;
+
+ ret = dma_memory_read(&address_space_memory, addr, base, sizeof(*base),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("Cannot fetch mapt cmdq ctx at address=0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(base->word); i++) {
+ le32_to_cpus(&base->word[i]);
+ }
+
+ return 0;
+}
+
+static int ummu_mapt_update_cmdq_base(UMMUState *u, dma_addr_t base_addr, uint32_t qid, MAPTCmdqBase *base)
+{
+ int i;
+ dma_addr_t addr = base_addr + qid * MAPT_CMDQ_CTXT_BASE_BYTES;
+
+ for (i = 0; i < ARRAY_SIZE(base->word); i++, addr += sizeof(uint32_t)) {
+ uint32_t tmp = cpu_to_le32(base->word[i]);
+ if (dma_memory_write(&address_space_memory, addr, &tmp,
+ sizeof(uint32_t), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log("dma failed to write to addr 0x%lx\n", addr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t ummu_mapt_ctrlr_page_read_process(UMMUState *u, hwaddr offset)
+{
+ MAPTCmdqBase base;
+ uint32_t qid = ummu_mapt_cmdq_get_qid(u, offset);
+ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base);
+ int ret;
+ uint64_t val = UINT64_MAX;
+
+ if (!addr) {
+ /* mapt ctrlr page not init, return default val 0 */
+ return 0;
+ }
+
+ ret = ummu_mapt_get_cmdq_base(u, addr, qid, &base);
+ if (ret) {
+ qemu_log("failed to get mapt cmdq base.\n");
+ return val;
+ }
+
+ switch (offset & UCMDQ_UCPLQ_CI_PI_MASK) {
+ case UCMDQ_PI:
+ val = ummu_mapt_cmdq_base_get_ucmdq_pi(&base);
+ break;
+ case UCMDQ_CI:
+ val = ummu_mapt_cmdq_base_get_ucmdq_ci(&base);
+ break;
+ case UCPLQ_PI:
+ val = ummu_mapt_cmdq_base_get_ucplq_pi(&base);
+ break;
+ case UCPLQ_CI:
+ val = ummu_mapt_cmdq_base_get_ucplq_ci(&base);
+ break;
+ default:
+ qemu_log("cannot process addr(0x%lx) mpat ctrlr page read.\n", offset);
+ return val;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_readw(UMMUState *u, hwaddr offset)
+{
+ uint64_t val = UINT64_MAX;
+
+ switch (offset) {
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ val = ummu_mapt_ctrlr_page_read_process(u, offset);
+ break;
+ default:
+ qemu_log("ummu cannot handle 16-bit read access at: 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_readl(UMMUState *u, hwaddr offset)
+{
+ uint64_t val = UINT64_MAX;
+
+ switch (offset) {
+ case A_CAP0...A_CAP6:
+ val = u->cap[(offset - A_CAP0) / 4];
+ break;
+ case A_CTRL0:
+ val = u->ctrl[0];
+ break;
+ case A_CTRL0_ACK:
+ val = u->ctrl0_ack;
+ break;
+ case A_CTRL1:
+ val = u->ctrl[1];
+ break;
+ case A_CTRL2:
+ val = u->ctrl[2];
+ break;
+ case A_CTRL3:
+ val = u->ctrl[3];
+ break;
+ case A_TECT_BASE_CFG:
+ val = u->tect_base_cfg;
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ val = ummu_mcmdq_reg_readl(u, offset);
+ break;
+ case A_EVENT_QUE_PI:
+ val = u->eventq.queue.prod;
+ break;
+ case A_EVENT_QUE_CI:
+ val = u->eventq.queue.cons;
+ break;
+ case A_EVENT_QUE_USI_DATA:
+ val = u->eventq.usi_data;
+ break;
+ case A_EVENT_QUE_USI_ATTR:
+ val = u->eventq.usi_attr;
+ break;
+ case A_GLB_INT_EN:
+ val = 0;
+ /* glb err interrupt bit enabled int bit 0 */
+ if (ummu_glb_err_int_en(u)) {
+ val |= 0x1;
+ }
+
+ /* event que interrupt bit enabled in bit 1 */
+ if (ummu_event_que_int_en(u)) {
+ val |= (1 << 1);
+ }
+ break;
+ case A_GLB_ERR:
+ val = u->glb_err.glb_err;
+ break;
+ case A_GLB_ERR_RESP:
+ val = u->glb_err.glb_err_resp;
+ break;
+ case A_GLB_ERR_INT_USI_DATA:
+ val = u->glb_err.usi_data;
+ break;
+ case A_GLB_ERR_INT_USI_ATTR:
+ val = u->glb_err.usi_attr;
+ break;
+ case A_RELEASE_UM_QUEUE_ID:
+ val = u->release_um_queue_id;
+ break;
+ case A_RELEASE_UM_QUEUE:
+ val = u->release_um_queue;
+ break;
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ val = ummu_mapt_ctrlr_page_read_process(u, offset);
+ break;
+ case A_UMCMD_PAGE_SEL:
+ val = u->ucmdq_page_sel;
+ break;
+ case A_UMMU_USER_CONFIG0...A_UMMU_USER_CONFIG11:
+ case A_UMMU_MEM_USI_DATA:
+ case A_UMMU_MEM_USI_ATTR:
+ case A_UMMU_INT_MASK:
+ case A_UMMU_DSTEID_CAM_TABLE_BASE_CFG:
+ /* do nothing, reg return val 0 */
+ val = 0;
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_mcmdq_reg_readll(UMMUState *u, hwaddr offset)
+{
+ uint8_t mcmdq_idx;
+ uint64_t val = UINT64_MAX;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return val;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case A_MCMD_QUE_BASE:
+ val = u->mcmdqs[mcmdq_idx].queue.base;
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit mcmdq reg read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_readll(UMMUState *u, hwaddr offset)
+{
+ uint64_t val = UINT64_MAX;
+
+ switch (offset) {
+ case A_TECT_BASE0:
+ val = u->tect_base;
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ val = ummu_mcmdq_reg_readll(u, offset);
+ break;
+ case A_EVENT_QUE_BASE0:
+ val = u->eventq.queue.base;
+ break;
+ case A_EVENT_QUE_USI_ADDR0:
+ val = u->eventq.usi_addr;
+ break;
+ case A_GLB_ERR_INT_USI_ADDR0:
+ val = u->glb_err.usi_addr;
+ break;
+ case A_MAPT_CMDQ_CTXT_BADDR0:
+ val = u->mapt_cmdq_ctxt_base;
+ break;
+ case A_UMMU_MEM_USI_ADDR0:
+ /* do nothing, reg return val 0 */
+ val = 0;
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size)
+{
+ UMMUState *u = opaque;
+ uint64_t val = UINT64_MAX;
+
+ switch (size) {
+ case 2:
+ val = ummu_reg_readw(u, offset);
+ break;
+ case 4:
+ val = ummu_reg_readl(u, offset);
+ break;
+ case 8:
+ val = ummu_reg_readll(u, offset);
+ break;
+ default:
+ break;
+ }
+
+ return val;
+}
+
+static void mcmdq_cmd_sync_usi_irq(uint64_t addr, uint32_t data)
+{
+ cpu_physical_memory_rw(addr, &data, sizeof(uint32_t), true);
+}
+
+static void mcmdq_cmd_sync_sev_irq(void)
+{
+ qemu_log("cannot support CMD_SYNC SEV event.\n");
+}
+
+static void mcmdq_cmd_sync_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint32_t cm = CMD_SYNC_CM(cmd);
+
+ trace_mcmdq_cmd_sync_handler(mcmdq_idx, CMD_SYNC_USI_ADDR(cmd), CMD_SYNC_USI_DATA(cmd));
+ if (cm & CMD_SYNC_CM_USI) {
+ mcmdq_cmd_sync_usi_irq(CMD_SYNC_USI_ADDR(cmd), CMD_SYNC_USI_DATA(cmd));
+ } else if (cm & CMD_SYNC_CM_SEV) {
+ mcmdq_cmd_sync_sev_irq();
+ }
+}
+
+static void mcmdq_cmd_create_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ UMMUKVTblEntry *entry = NULL;
+ uint32_t dst_eid = CMD_CREATE_KVTBL_DEST_EID(cmd);
+ uint32_t tecte_tag = CMD_CREATE_KVTBL_TECTE_TAG(cmd);
+
+ trace_mcmdq_cmd_create_kvtbl(mcmdq_idx, dst_eid, tecte_tag);
+
+ QLIST_FOREACH(entry, &u->kvtbl, list) {
+ if (entry->dst_eid == dst_eid) {
+ qemu_log("update kvtlb dst_eid(0x%x) tecte_tag from 0x%x to 0x%x\n",
+ dst_eid, entry->tecte_tag, tecte_tag);
+ entry->tecte_tag = tecte_tag;
+ return;
+ }
+ }
+
+ entry = g_malloc(sizeof(UMMUKVTblEntry));
+ if (!entry) {
+ qemu_log("failed to malloc for kvtbl entry for dst_eid(0x%x)\n", dst_eid);
+ return;
+ }
+
+ entry->dst_eid = dst_eid;
+ entry->tecte_tag = tecte_tag;
+ QLIST_INSERT_HEAD(&u->kvtbl, entry, list);
+}
+
+static void mcmdq_cmd_delete_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ UMMUKVTblEntry *entry = NULL;
+ uint32_t dst_eid = CMD_DELETE_KVTBL_DEST_EID(cmd);
+
+ trace_mcmdq_cmd_delete_kvtbl(mcmdq_idx, dst_eid);
+
+ QLIST_FOREACH(entry, &u->kvtbl, list) {
+ if (entry->dst_eid == dst_eid) {
+ break;
+ }
+ }
+
+ if (entry) {
+ QLIST_REMOVE(entry, list);
+ g_free(entry);
+ } else {
+ qemu_log("cannot find dst_eid(0x%x) entry in kvtbl.\n", dst_eid);
+ }
+}
+
+static void mcmdq_cmd_plbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ trace_mcmdq_cmd_plbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]);
+}
+
+static void mcmdq_cmd_tlbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ trace_mcmdq_cmd_tlbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]);
+}
+
+static void mcmdq_check_pa_continuity_fill_result(UMMUMcmdQueue *mcmdq, bool continuity)
+{
+ uint8_t result = 0;
+ dma_addr_t addr;
+
+ result |= UMMU_RUN_IN_VM_FLAG;
+ if (continuity) {
+ result |= PA_CONTINUITY;
+ } else {
+ result |= PA_NOT_CONTINUITY;
+ }
+
+#define CHECK_PA_CONTINUITY_RESULT_OFFSET 0x2
+ addr = MCMD_QUE_BASE_ADDR(&mcmdq->queue) +
+ MCMD_QUE_RD_IDX(&mcmdq->queue) * mcmdq->queue.entry_size;
+ if (dma_memory_write(&address_space_memory, addr + CHECK_PA_CONTINUITY_RESULT_OFFSET,
+ &result, sizeof(result), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log("dma failed to wirte result(0x%x) to addr 0x%lx\n", result, addr);
+ return;
+ }
+
+ qemu_log("mcmdq check pa continuity update result(0x%x) success.\n", result);
+}
+
+static void mcmdq_cmd_null(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint64_t size;
+ uint64_t addr;
+ void *hva = NULL;
+ ram_addr_t rb_offset;
+ RAMBlock *rb = NULL;
+ size_t rb_page_size = 0;
+
+ if (CMD_NULL_SUBOP(cmd) != CMD_NULL_SUBOP_CHECK_PA_CONTINUITY) {
+ qemu_log("current cannot process CMD_NULL subop %u.\n", CMD_NULL_SUBOP(cmd));
+ return;
+ }
+
+ size = CMD_NULL_CHECK_PA_CONTI_SIZE(cmd);
+ addr = CMD_NULL_CHECK_PA_CONTI_ADDR(cmd);
+ hva = cpu_physical_memory_map(addr, &size, false);
+ rb = qemu_ram_block_from_host(hva, false, &rb_offset);
+ if (rb) {
+ rb_page_size = qemu_ram_pagesize(rb);
+ } else {
+ qemu_log("failed to get ram block from host(%p)\n", hva);
+ }
+
+ trace_mcmdq_cmd_null(mcmdq_idx, addr, hva, size, rb_page_size);
+
+#define PAGESZ_2M 0x200000
+ if (rb_page_size < PAGESZ_2M) {
+ mcmdq_check_pa_continuity_fill_result(&u->mcmdqs[mcmdq_idx], false);
+ } else {
+ mcmdq_check_pa_continuity_fill_result(&u->mcmdqs[mcmdq_idx], true);
+ }
+}
+
+static void mcmdq_cmd_prefet_cfg(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ /* do nothing */
+}
+
+static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) = {
+ [CMD_SYNC] = mcmdq_cmd_sync_handler,
+ [CMD_STALL_RESUME] = NULL,
+ [CMD_PREFET_CFG] = mcmdq_cmd_prefet_cfg,
+ [CMD_CFGI_TECT] = NULL,
+ [CMD_CFGI_TECT_RANGE] = NULL,
+ [CMD_CFGI_TCT] = NULL,
+ [CMD_CFGI_TCT_ALL] = NULL,
+ [CMD_CFGI_VMS_PIDM] = NULL,
+ [CMD_PLBI_OS_EID] = mcmdq_cmd_plbi_x_process,
+ [CMD_PLBI_OS_EIDTID] = mcmdq_cmd_plbi_x_process,
+ [CMD_PLBI_OS_VA] = mcmdq_cmd_plbi_x_process,
+ [CMD_TLBI_OS_ALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_OS_TID] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_OS_VA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_OS_VAA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_ALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_TID] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_VA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_VAA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_S1S2_VMALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_S2_IPA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_NS_OS_ALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_RESUME] = NULL,
+ [CMD_CREATE_KVTBL] = mcmdq_cmd_create_kvtbl,
+ [CMD_DELETE_KVTBL] = mcmdq_cmd_delete_kvtbl,
+ [CMD_NULL] = mcmdq_cmd_null,
+ [CMD_TLBI_OS_ALL_U] = NULL,
+ [CMD_TLBI_OS_ASID_U] = NULL,
+ [CMD_TLBI_OS_VA_U] = NULL,
+ [CMD_TLBI_OS_VAA_U] = NULL,
+ [CMD_TLBI_HYP_ASID_U] = NULL,
+ [CMD_TLBI_HYP_VA_U] = NULL,
+ [CMD_TLBI_S1S2_VMALL_U] = NULL,
+ [CMD_TLBI_S2_IPA_U] = NULL,
+};
+
+static MemTxResult ummu_cmdq_fetch_cmd(UMMUMcmdQueue *mcmdq, UMMUMcmdqCmd *cmd)
+{
+ uint64_t addr, mcmdq_base_addr;
+ MemTxResult ret;
+ int i;
+
+ mcmdq_base_addr = MCMD_QUE_BASE_ADDR(&mcmdq->queue);
+ addr = mcmdq_base_addr + MCMD_QUE_RD_IDX(&mcmdq->queue) * mcmdq->queue.entry_size;
+ ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(UMMUMcmdqCmd),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("addr 0x%lx failed to fectch mcmdq cmd\n", addr);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
+ le32_to_cpus(&cmd->word[i]);
+ }
+
+ return ret;
+}
+
+static void mcmdq_process_task(UMMUState *u, uint8_t mcmdq_idx)
+{
+ UMMUMcmdQueue *mcmdq = &u->mcmdqs[mcmdq_idx];
+ UMMUMcmdqCmd cmd;
+ UmmuMcmdqCmdType cmd_type;
+
+ if (!ummu_mcmdq_enabled(mcmdq)) {
+ ummu_mcmdq_disable_resp(mcmdq);
+ return;
+ }
+
+ while (!ummu_mcmdq_empty(mcmdq)) {
+ if (ummu_cmdq_fetch_cmd(mcmdq, &cmd) != MEMTX_OK) {
+ /* eventq generate later */
+ break;
+ }
+
+ cmd_type = CMD_TYPE(&cmd);
+ if (cmd_type >= MCMDQ_CMD_MAX) {
+ /* eventq generate later */
+ break;
+ }
+
+ if (mcmdq_cmd_handlers[cmd_type]) {
+ trace_mcmdq_process_task(mcmdq_idx, mcmdq_cmd_strings[cmd_type]);
+ mcmdq_cmd_handlers[cmd_type](u, &cmd, mcmdq_idx);
+ } else {
+ qemu_log("current cannot process mcmdq cmd: %s.\n", mcmdq_cmd_strings[cmd_type]);
+ }
+
+ ummu_mcmdq_cons_incr(mcmdq);
+ }
+
+ ummu_mcmdq_enable_resp(mcmdq);
+}
+
+static void ummu_mcmdq_reg_writel(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ uint8_t mcmdq_idx;
+ UMMUMcmdQueue *q = NULL;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case MCMDQ_PROD_BASE_ADDR:
+ update_reg32_by_wmask(&u->mcmdqs[mcmdq_idx].queue.prod, data, UMMU_MCMDQ_PI_WMASK);
+ mcmdq_process_task(u, mcmdq_idx);
+ break;
+ case MCMDQ_CONS_BASE_ADDR:
+ update_reg32_by_wmask(&u->mcmdqs[mcmdq_idx].queue.cons, data, UMMU_MCMDQ_CI_WMASK);
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit mcmdq reg write access at 0x%lx\n", offset);
+ break;
+ }
+
+ q = &u->mcmdqs[mcmdq_idx];
+ trace_ummu_mcmdq_reg_writel(mcmdq_idx, MCMD_QUE_WD_IDX(&q->queue), MCMD_QUE_RD_IDX(&q->queue));
+}
+
+static void ummu_glb_int_en_process(UMMUState *u, uint64_t data)
+{
+}
+
+static MemTxResult ummu_mapt_cmdq_fetch_cmd(MAPTCmdqBase *base, MAPTCmd *cmd)
+{
+ dma_addr_t base_addr = MAPT_UCMDQ_BASE_ADDR(base);
+ dma_addr_t addr = base_addr + MAPT_UCMDQ_CI(base) * sizeof(*cmd);
+ int ret, i;
+
+ ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(*cmd),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("addr 0x%lx failed to fectch mapt ucmdq cmd.\n", addr);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
+ le32_to_cpus(&cmd->word[i]);
+ }
+
+ return ret;
+}
+
+static void ummu_mapt_cplq_add_entry(MAPTCmdqBase *base, MAPTCmdCpl *cpl)
+{
+ dma_addr_t base_addr = MAPT_UCPLQ_BASE_ADDR(base);
+ dma_addr_t addr = base_addr + MAPT_UCPLQ_PI(base) * sizeof(*cpl);
+ uint32_t tmp = cpu_to_le32(*(uint32_t *)cpl);
+
+ if (dma_memory_write(&address_space_memory, addr, &tmp,
+ sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log("dma failed to wirte cpl entry to addr 0x%lx\n", addr);
+ }
+}
+
+static void ummu_process_mapt_cmd(UMMUState *u, MAPTCmdqBase *base, MAPTCmd *cmd, uint32_t ci)
+{
+ uint32_t type = MAPT_UCMD_TYPE(cmd);
+ MAPTCmdCpl cpl;
+ uint16_t tecte_tag;
+ uint32_t tid;
+
+ /* default set cpl staus invalid */
+ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_INVALID, 0);
+ tecte_tag = ummu_mapt_cmdq_base_get_tecte_tag(base);
+ tid = ummu_mapt_cmdq_base_get_token_id(base);
+ qemu_log("tid: %u, tecte_tag: %u\n", tid, tecte_tag);
+ switch (type) {
+ case MAPT_UCMD_TYPE_PSYNC:
+ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PSYNC.\n");
+ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_PSYNC_SUCCESS, ci);
+ break;
+ case MAPT_UCMD_TYPE_PLBI_USR_ALL:
+ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_ALL.\n");
+ break;
+ case MAPT_UCMD_TYPE_PLBI_USR_VA:
+ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_VA.\n");
+ break;
+ default:
+ qemu_log("unknown mapt cmd type: 0x%x\n", type);
+ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_TYPE_ERROR, ci);
+ break;
+ }
+
+ if (cpl.cpl_status == MAPT_UCPL_STATUS_INVALID) {
+ return;
+ }
+
+ if (ummu_mapt_ucplq_full(base)) {
+ qemu_log("mapt ucplq full, failed to add cpl entry.\n");
+ return;
+ }
+ ummu_mapt_cplq_add_entry(base, &cpl);
+ ummu_mapt_ucqlq_prod_incr(base);
+ qemu_log("mapt cplq add entry success, cplpi: %u, cplci: %u.\n",
+ MAPT_UCPLQ_PI(base), MAPT_UCPLQ_CI(base));
+}
+
+static void ummu_process_mapt_cmdq(UMMUState *u, MAPTCmdqBase *base)
+{
+ MAPTCmd cmd;
+ int ret;
+
+ while (!ummu_mapt_ucmdq_empty(base)) {
+ ret = ummu_mapt_cmdq_fetch_cmd(base, &cmd);
+ if (ret) {
+ qemu_log("failed to fetch matp cmdq cmd.\n");
+ return;
+ }
+ ummu_process_mapt_cmd(u, base, &cmd, MAPT_UCMDQ_CI(base));
+ ummu_mapt_ucmdq_cons_incr(base);
+ }
+ qemu_log("after cmdq process, log2size: %u, cmdpi: %u, cmdci: %u, cplpi: %u, cplci: %u\n",
+ MAPT_UCMDQ_LOG2SIZE(base), MAPT_UCMDQ_PI(base), MAPT_UCMDQ_CI(base),
+ MAPT_UCPLQ_PI(base), MAPT_UCPLQ_CI(base));
+}
+
+static void ummu_mapt_ctrlr_page_write_process(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ MAPTCmdqBase base;
+ uint32_t qid = ummu_mapt_cmdq_get_qid(u, offset);
+ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base);
+ int ret;
+
+ qemu_log("qid: %u, mapt_ctxt_base: 0x%lx\n", qid, addr);
+ ret = ummu_mapt_get_cmdq_base(u, addr, qid, &base);
+ if (ret) {
+ qemu_log("failed to get mapt cmdq base.\n");
+ return;
+ }
+
+ switch (offset & UCMDQ_UCPLQ_CI_PI_MASK) {
+ case UCMDQ_PI:
+ ummu_mapt_cmdq_base_update_ucmdq_pi(&base, (uint16_t)data);
+ ummu_process_mapt_cmdq(u, &base);
+ break;
+ case UCMDQ_CI:
+ ummu_mapt_cmdq_base_update_ucmdq_ci(&base, (uint16_t)data);
+ break;
+ case UCPLQ_PI:
+ ummu_mapt_cmdq_base_update_ucplq_pi(&base, (uint16_t)data);
+ break;
+ case UCPLQ_CI:
+ ummu_mapt_cmdq_base_update_ucplq_ci(&base, (uint16_t)data);
+ break;
+ default:
+ qemu_log("cannot process addr(0x%lx) mpat ctrlr page write.\n", offset);
+ return;
+ }
+
+ ret = ummu_mapt_update_cmdq_base(u, addr, qid, &base);
+ if (ret) {
+ qemu_log("failed to update mapt cmdq ctx.\n");
+ return;
+ }
+}
+
+static void ummu_reg_writew(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ switch (offset) {
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ ummu_mapt_ctrlr_page_write_process(u, offset, data);
+ break;
+ default:
+ qemu_log("ummu cannot handle 16-bit write access at: 0x%lx\n", offset);
+ break;
+ }
+}
+
+static int ummu_mapt_process_release_um_queue(UMMUState *u)
+{
+ MAPTCmdqBase base;
+ uint32_t qid = u->release_um_queue_id;
+ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base);
+
+ memset(&base, 0, sizeof(base));
+ if (ummu_mapt_update_cmdq_base(u, addr, qid, &base)) {
+ qemu_log("failed to release um queue(qid: %u)\n", qid);
+ return -1;
+ }
+
+ qemu_log("release um queue(qid: %u) success.\n", qid);
+ return 0;
+}
+
+static void ummu_reg_writel(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ switch (offset) {
+ case A_CTRL0:
+ update_reg32_by_wmask(&u->ctrl[0], data, UMMU_CTRL0_WMASK);
+ ummu_cr0_process_task(u);
+ break;
+ case A_CTRL1:
+ update_reg32_by_wmask(&u->ctrl[1], data, UMMU_CTRL1_WMASK);
+ break;
+ case A_CTRL2:
+ update_reg32_by_wmask(&u->ctrl[2], data, UMMU_CTRL2_WMASK);
+ break;
+ case A_CTRL3:
+ update_reg32_by_wmask(&u->ctrl[3], data, UMMU_CTRL3_WMASK);
+ break;
+ case A_TECT_BASE_CFG:
+ update_reg32_by_wmask(&u->tect_base_cfg, data, UMMU_TECT_BASE_CFG_WMASK);
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ ummu_mcmdq_reg_writel(u, offset, data);
+ break;
+ case A_EVENT_QUE_PI:
+ update_reg32_by_wmask(&u->eventq.queue.prod, data, UMMU_EVENTQ_PI_WMASK);
+ break;
+ case A_EVENT_QUE_CI:
+ update_reg32_by_wmask(&u->eventq.queue.cons, data, UMMU_EVENTQ_CI_WMASK);
+ break;
+ case A_EVENT_QUE_USI_DATA:
+ update_reg32_by_wmask(&u->eventq.usi_data, data, UMMU_EVENT_QUE_USI_DATA_WMASK);
+ break;
+ case A_EVENT_QUE_USI_ATTR:
+ update_reg32_by_wmask(&u->eventq.usi_attr, data, UMMU_EVENTQ_USI_ATTR_WMASK);
+ break;
+ case A_GLB_ERR_INT_USI_DATA:
+ update_reg32_by_wmask(&u->glb_err.usi_data, data, UMMU_GLB_ERR_INT_USI_DATA_WMASK);
+ break;
+ case A_GLB_ERR_INT_USI_ATTR:
+ update_reg32_by_wmask(&u->glb_err.usi_attr, data, UMMU_GLB_ERR_INT_USI_ATTR_WMASK);
+ break;
+ case A_GLB_INT_EN:
+ ummu_glb_int_en_process(u, data);
+ break;
+ case A_GLB_ERR_RESP:
+ update_reg32_by_wmask(&u->glb_err.glb_err_resp, data, UMMU_GLB_ERR_RESP_WMASK);
+ break;
+ case A_RELEASE_UM_QUEUE:
+ /* release_um_queue reg set 1 to release um_queue */
+ if ((data & RELEASE_UM_QUEUE_WMASK) != 1) {
+ break;
+ }
+ if (ummu_mapt_process_release_um_queue(u)) {
+ u->release_um_queue = 1;
+ break;
+ }
+ /* release success, set release_um_queue reg to 0, means release success */
+ u->release_um_queue = 0;
+ break;
+ case A_RELEASE_UM_QUEUE_ID:
+ update_reg32_by_wmask(&u->release_um_queue_id, data, RELEASE_UM_QUEUE_ID_WMASK);
+ break;
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ ummu_mapt_ctrlr_page_write_process(u, offset, data);
+ break;
+ case A_UMCMD_PAGE_SEL:
+ qemu_log("ucmdq set page sel to %s\n",
+ data == MAPT_CMDQ_CTRLR_PAGE_SIZE_4K ? "4K" : "64K");
+ update_reg32_by_wmask(&u->ucmdq_page_sel, data, UMCMD_PAGE_SEL_WMASK);
+ break;
+ case A_DSTEID_KV_TABLE_BASE_CFG:
+ case A_UMMU_DSTEID_KV_TABLE_HASH_CFG0:
+ case A_UMMU_DSTEID_KV_TABLE_HASH_CFG1:
+ case A_UMMU_USER_CONFIG0...A_UMMU_USER_CONFIG11:
+ case A_UMMU_MEM_USI_DATA:
+ case A_UMMU_MEM_USI_ATTR:
+ case A_UMMU_INT_MASK:
+ case A_UMMU_DSTEID_CAM_TABLE_BASE_CFG:
+ /* do nothing */
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit write access at 0x%lx\n", offset);
+ break;
+ }
+}
+
+static void ummu_mcmdq_reg_writell(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ uint8_t mcmdq_idx;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case A_MCMD_QUE_BASE:
+ update_reg64_by_wmask(&u->mcmdqs[mcmdq_idx].queue.base, data, UMMU_MCMDQ_BASE_WMASK);
+ u->mcmdqs[mcmdq_idx].queue.log2size = MCMD_QUE_LOG2SIZE(data);
+ trace_ummu_mcmdq_base_reg_writell(mcmdq_idx, u->mcmdqs[mcmdq_idx].queue.base,
+ u->mcmdqs[mcmdq_idx].queue.log2size);
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit mcmdq reg write access at 0x%lx\n", offset);
+ break;
+ }
+}
+
+static void ummu_reg_writell(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ switch (offset) {
+ case A_TECT_BASE0:
+ update_reg64_by_wmask(&u->tect_base, data, UMMU_TECT_BASE_WMASK);
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ ummu_mcmdq_reg_writell(u, offset, data);
+ break;
+ case A_EVENT_QUE_BASE0:
+ update_reg64_by_wmask(&u->eventq.queue.base, data, UMMU_EVENTQ_BASE_WMASK);
+ u->eventq.queue.log2size = EVENT_QUE_LOG2SIZE(data);
+ trace_ummu_eventq_req_writell(u->eventq.queue.base, u->eventq.queue.log2size);
+ break;
+ case A_EVENT_QUE_USI_ADDR0:
+ update_reg64_by_wmask(&u->eventq.usi_addr, data, UMMU_EVENTQ_USI_ADDR_WMASK);
+ trace_ummu_eventq_usi_reg_writell(data);
+ break;
+ case A_GLB_ERR_INT_USI_ADDR0:
+ update_reg64_by_wmask(&u->glb_err.usi_addr, data, UMMU_GLB_ERR_INT_USI_ADDR_WMASK);
+ trace_ummu_glberr_usi_reg_writell(data);
+ break;
+ case A_MAPT_CMDQ_CTXT_BADDR0:
+ update_reg64_by_wmask(&u->mapt_cmdq_ctxt_base, data, MAPT_CMDQ_CTXT_BADDR_WMASK);
+ trace_ummu_mapt_ctx_base_reg_writell(u->mapt_cmdq_ctxt_base);
+ break;
+ case A_DSTEID_KV_TABLE_BASE0:
+ case A_UMMU_DSTEID_CAM_TABLE_BASE0:
+ case A_UMMU_MEM_USI_ADDR0:
+ /* do nothing */
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit write access at 0x%lx\n", offset);
+ break;
+ }
+}
+
+static void ummu_reg_write(void *opaque, hwaddr offset, uint64_t data, unsigned size)
+{
+ UMMUState *u = opaque;
+
+ switch (size) {
+ case 2:
+ ummu_reg_writew(u, offset, data);
+ break;
+ case 4:
+ ummu_reg_writel(u, offset, data);
+ break;
+ case 8:
+ ummu_reg_writell(u, offset, data);
+ break;
+ default:
+ qemu_log("cann't process ummu reg write for size: %u\n", size);
+ break;
+ }
+}
+
+static const MemoryRegionOps ummu_reg_ops = {
+ .read = ummu_reg_read,
+ .write = ummu_reg_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 2,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 2,
+ .max_access_size = 8,
+ },
+};
+
+static void ummu_registers_init(UMMUState *u)
+{
+ int i;
+
+ memset(u->cap, 0, sizeof(u->cap));
+ /* cap 0 init */
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, DSTEID_SIZE, 0x10);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TOKENID_SIZE, 0x14);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, ATTR_PERMS_OVR, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, ATTR_TYPES_OVR, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, S2_ATTR_TYPE, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TCT_LEVEL, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TECT_MODE, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TECT_LEVEL, 0x1);
+ /* cap 1 init */
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_SIZE, 0x13);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_NUMB, 0x0);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_SUPPORT, 0x1);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_SIZE, 0xF);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_NUMB, 0x3);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_SUPPORT, 0x1);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENT_GEN, 0x1);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, STALL_MAX, 0x80);
+ /* cap 2 init */
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, VMID_TLBI, 0x0);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, TLB_BOARDCAST, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, RANGE_TLBI, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, OA_SIZE, 0x5);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN4K_T, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN16K_T, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN64K_T, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, VA_EXTEND, 0x0);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, S2_TRANS, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, S1_TRANS, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, SMALL_TRANS, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, TRANS_FORM, 0x2);
+ /* cap 3 init */
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HIER_ATTR_DISABLE, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, S2_EXEC_NEVER_CTRL, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, BBM_LEVEL, 0x2);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, COHERENT_ACCESS, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, TTENDIAN_MODE, 0x0);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, MTM_SUPPORT, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HTTU_SUPPORT, 0x2);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HYP_S1CONTEXT, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, USI_SUPPORT, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, STALL_MODEL, 0x0);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, TERM_MODEL, 0x0);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, SATI_MAX, 0x1);
+ /* cap 4 init */
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCMDQ_UCPLQ_NUMB, 0x10);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCMDQ_SIZE, 0xF);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCPLQ_SIZE, 0xF);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_SIZE, 0xF);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_NUMB, 0x5);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_SUPPORT, 0x1);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, PPLB_SUPPORT, 0x0);
+
+ /* cap 5 init */
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, MAPT_SUPPORT, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, MAPT_MODE, 0x3);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, GRAN2M_P, 0x0);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, GRAN4K_P, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, TOKENVAL_CHK, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, TOKENVAL_CHK_MODE, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, RANGE_PLBI, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, PLB_BORDCAST, 0x0);
+ /* cap 6 init */
+ u->cap[6] = FIELD_DP32(u->cap[6], CAP6, MTM_ID_MAX, 0x00FF);
+ u->cap[6] = FIELD_DP32(u->cap[6], CAP6, MTM_GP_MAX, 0x03);
+
+ /* ctrlr init */
+ memset(u->ctrl, 0, sizeof(u->ctrl));
+ u->ctrl[1] = FIELD_DP32(u->ctrl[1], CTRL1, TECT_MODE_SEL, 0x1);
+
+ /* tect init */
+ u->tect_base = 0;
+ u->tect_base_cfg = 0;
+
+ /* mcmdq init */
+ for (i = 0; i < UMMU_MAX_MCMDQS; i++) {
+ u->mcmdqs[i].queue.base = 0;
+ u->mcmdqs[i].queue.prod = 0;
+ u->mcmdqs[i].queue.cons = 0;
+ u->mcmdqs[i].queue.entry_size = sizeof(UMMUMcmdqCmd);
+ }
+
+ /* eventq init */
+ memset(&u->eventq, 0, sizeof(u->eventq));
+
+ /* glb err init */
+ memset(&u->glb_err, 0, sizeof(u->glb_err));
+
+ /* evt queue init */
+ u->eventq.queue.base = 0;
+ u->eventq.queue.prod = 0;
+ u->eventq.queue.cons = 0;
+ u->eventq.queue.entry_size = sizeof(UMMUEvent);
+
+ /* mapt cmdq ctxt base addr init */
+ u->mapt_cmdq_ctxt_base = 0;
+
+ /* umcmdq default page set to 4K */
+ u->ucmdq_page_sel = MAPT_CMDQ_CTRLR_PAGE_SIZE_4K;
+}
+
+int ummu_associating_with_ubc(BusControllerState *ubc)
+{
+ UMMUState *ummu;
+ unsigned int bus_num;
+
+ if (1 != sscanf(ubc->bus->qbus.name, "ubus.%u", &bus_num)) {
+ qemu_log("failed to get bus num %s\n",
+ ubc->bus->qbus.name);
+ return -1;
+ }
+ ummu = ummu_find_by_bus_num(bus_num);
+ if (!ummu) {
+ qemu_log("failed to get ummu %u\n", bus_num);
+ return -1;
+ }
+ return 0;
+}
+
+static void ub_save_ummu_list(UMMUState *u)
+{
+ QLIST_INSERT_HEAD(&ub_umms, u, node);
+}
+
+static void ub_remove_ummu_list(UMMUState *u)
+{
+ QLIST_REMOVE(u, node);
+}
+
+static void ummu_base_realize(DeviceState *dev, Error **errp)
+{
+ static uint8_t NO = 0;
+ UMMUState *u = UB_UMMU(dev);
+ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev);
+
+ u->bus_num = NO;
+ sysdev->parent_obj.id = g_strdup_printf("ummu.%u", NO++);
+
+ memory_region_init_io(&u->ummu_reg_mem, OBJECT(u), &ummu_reg_ops,
+ u, TYPE_UB_UMMU, u->ummu_reg_size);
+ sysbus_init_mmio(sysdev, &u->ummu_reg_mem);
+ ummu_registers_init(u);
+ ub_save_ummu_list(u);
+
+ QLIST_INIT(&u->kvtbl);
+}
+
+static void ummu_base_unrealize(DeviceState *dev)
+{
+ UMMUState *u = UB_UMMU(dev);
+ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev);
+ UMMUKVTblEntry *entry = NULL;
+ UMMUKVTblEntry *next_entry = NULL;
+
+ ub_remove_ummu_list(u);
+ if (sysdev->parent_obj.id) {
+ g_free(sysdev->parent_obj.id);
+ }
+
+ QLIST_FOREACH_SAFE(entry, &u->kvtbl, list, next_entry) {
+ QLIST_REMOVE(entry, list);
+ g_free(entry);
+ }
+}
+
+static void ummu_base_reset(DeviceState *dev)
+{
+ /* reset ummu relative struct later */
+}
+
+static Property ummu_dev_properties[] = {
+ DEFINE_PROP_UINT64("ub-ummu-reg-size", UMMUState,
+ ummu_reg_size, 0),
+ DEFINE_PROP_LINK("primary-bus", UMMUState, primary_bus,
+ TYPE_UB_BUS, UBBus *),
+ DEFINE_PROP_BOOL("nested", UMMUState, nested, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ummu_base_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, ummu_dev_properties);
+ dc->realize = ummu_base_realize;
+ dc->unrealize = ummu_base_unrealize;
+ dc->reset = ummu_base_reset;
+}
+
+static const TypeInfo ummu_base_info = {
+ .name = TYPE_UB_UMMU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(UMMUState),
+ .class_data = NULL,
+ .class_size = sizeof(UMMUBaseClass),
+ .class_init = ummu_base_class_init,
+};
+
+static void ummu_base_register_types(void)
+{
+ type_register_static(&ummu_base_info);
+}
+type_init(ummu_base_register_types)
diff --git a/hw/ub/ub_ummu_internal.h b/hw/ub/ub_ummu_internal.h
new file mode 100644
index 0000000000000000000000000000000000000000..68724e5ce1912ab551d2c45989598dcea1b1a8e3
--- /dev/null
+++ b/hw/ub/ub_ummu_internal.h
@@ -0,0 +1,958 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#ifndef UB_UMMU_INTERNAL_H
+#define UB_UMMU_INTERNAL_H
+#include "hw/registerfields.h"
+#include "hw/ub/ub_usi.h"
+#include "sysemu/dma.h"
+#include "sysemu/iommufd.h"
+#include
+
+/* ummu spec register define */
+REG32(CAP0, 0x0010)
+ FIELD(CAP0, DSTEID_SIZE, 0, 8)
+ FIELD(CAP0, TOKENID_SIZE, 8, 5)
+ FIELD(CAP0, ATTR_PERMS_OVR, 13, 1)
+ FIELD(CAP0, ATTR_TYPES_OVR, 14, 1)
+ FIELD(CAP0, S2_ATTR_TYPE, 15, 1)
+ FIELD(CAP0, TCT_LEVEL, 16, 1)
+ FIELD(CAP0, TECT_MODE, 17, 2)
+ FIELD(CAP0, TECT_LEVEL, 19, 1)
+
+REG32(CAP1, 0x0014)
+ FIELD(CAP1, EVENTQ_SIZE, 0, 5)
+ FIELD(CAP1, EVENTQ_NUMB, 5, 4)
+ FIELD(CAP1, EVENTQ_SUPPORT, 9, 1)
+ FIELD(CAP1, MCMDQ_SIZE, 10, 4)
+ FIELD(CAP1, MCMDQ_NUMB, 14, 4)
+ FIELD(CAP1, MCMDQ_SUPPORT, 18, 1)
+ FIELD(CAP1, EVENT_GEN, 19, 1)
+ FIELD(CAP1, STALL_MAX, 20, 12)
+
+REG32(CAP2, 0x0018)
+ FIELD(CAP2, VMID_TLBI, 0, 1)
+ FIELD(CAP2, TLB_BOARDCAST, 1, 1)
+ FIELD(CAP2, RANGE_TLBI, 2, 1)
+ FIELD(CAP2, OA_SIZE, 3, 3)
+ FIELD(CAP2, GRAN4K_T, 6, 1)
+ FIELD(CAP2, GRAN16K_T, 7, 1)
+ FIELD(CAP2, GRAN64K_T, 8, 1)
+ FIELD(CAP2, VA_EXTEND, 9, 2)
+ FIELD(CAP2, S2_TRANS, 11, 1)
+ FIELD(CAP2, S1_TRANS, 12, 1)
+ FIELD(CAP2, SMALL_TRANS, 13, 1)
+ FIELD(CAP2, TRANS_FORM, 14, 2)
+
+REG32(CAP3, 0x001C)
+ FIELD(CAP3, HIER_ATTR_DISABLE, 0, 1)
+ FIELD(CAP3, S2_EXEC_NEVER_CTRL, 1, 1)
+ FIELD(CAP3, BBM_LEVEL, 2, 2)
+ FIELD(CAP3, COHERENT_ACCESS, 4, 1)
+ FIELD(CAP3, TTENDIAN_MODE, 5, 2)
+ FIELD(CAP3, MTM_SUPPORT, 7, 1)
+ FIELD(CAP3, HTTU_SUPPORT, 8, 2)
+ FIELD(CAP3, HYP_S1CONTEXT, 10, 1)
+ FIELD(CAP3, USI_SUPPORT, 11, 1)
+ FIELD(CAP3, STALL_MODEL, 12, 2)
+ FIELD(CAP3, TERM_MODEL, 14, 1)
+ FIELD(CAP3, SATI_MAX, 15, 6)
+
+REG32(CAP4, 0x0020)
+ FIELD(CAP4, UCMDQ_UCPLQ_NUMB, 0, 8)
+ FIELD(CAP4, UCMDQ_SIZE, 8, 4)
+ FIELD(CAP4, UCPLQ_SIZE, 12, 4)
+ FIELD(CAP4, UIEQ_SIZE, 16, 4)
+ FIELD(CAP4, UIEQ_NUMB, 20, 4)
+ FIELD(CAP4, UIEQ_SUPPORT, 24, 1)
+ FIELD(CAP4, PPLB_SUPPORT, 25, 1)
+
+REG32(CAP5, 0x0024)
+ FIELD(CAP5, MAPT_SUPPORT, 0, 1)
+ FIELD(CAP5, MAPT_MODE, 1, 2)
+ FIELD(CAP5, GRAN2M_P, 3, 1)
+ FIELD(CAP5, GRAN4K_P, 4, 1)
+ FIELD(CAP5, TOKENVAL_CHK, 5, 1)
+ FIELD(CAP5, TOKENVAL_CHK_MODE, 6, 2)
+ FIELD(CAP5, RANGE_PLBI, 8, 1)
+ FIELD(CAP5, PLB_BORDCAST, 9, 1)
+
+REG32(CAP6, 0x0028)
+ FIELD(CAP6, MTM_ID_MAX, 0, 16)
+ FIELD(CAP6, MTM_GP_MAX, 16, 8)
+
+#define UMMU_CTRL0_WMASK GENMASK(5, 0)
+REG32(CTRL0, 0x0030)
+ FIELD(CTRL0, UMMU_EN, 0, 1)
+ FIELD(CTRL0, EVENTQ_EN, 1, 1)
+ FIELD(CTRL0, VMID_WILDCARD_T, 2, 3)
+ FIELD(CTRL0, MAPT_EN, 5, 1)
+
+REG32(CTRL0_ACK, 0x0034)
+ FIELD(CTRL0_ACK, UMMU_EN, 0, 1)
+ FIELD(CTRL0_ACK, EVENTQ_EN, 1, 1)
+ FIELD(CTRL0_ACK, VMID_WILDCARD_T, 2, 3)
+ FIELD(CTRL0_ACK, MAPT_EN, 5, 1)
+
+#define UMMU_CTRL1_WMASK GENMASK(15, 0)
+REG32(CTRL1, 0x0038)
+ FIELD(CTRL1, QUEUE_IC_T, 0, 2)
+ FIELD(CTRL1, QUEUE_OC_T, 2, 2)
+ FIELD(CTRL1, QUEUE_SH_T, 4, 2)
+ FIELD(CTRL1, TABLE_IC_T, 6, 2)
+ FIELD(CTRL1, TABLE_OC_T, 8, 2)
+ FIELD(CTRL1, TABLE_SH_T, 10, 2)
+ FIELD(CTRL1, E2H, 12, 1)
+ FIELD(CTRL1, BAD_DSTEID_RECORD, 13, 1)
+ FIELD(CTRL1, PRIVATE_TLB, 14, 1)
+ FIELD(CTRL1, TECT_MODE_SEL, 15, 1)
+
+#define UMMU_CTRL2_WMASK GENMASK(6, 0)
+REG32(CTRL2, 0x003C)
+ FIELD(CTRL2, PRIVATE_PLB, 6, 1)
+ FIELD(CTRL2, UIE_QUEUE_SH_P, 4, 2)
+ FIELD(CTRL2, UIE_QUEUE_OC_P, 2, 2)
+ FIELD(CTRL2, UIE_QUEUE_IC_P, 0, 2)
+
+#define UMMU_CTRL3_WMASK (GENMASK(23, 0) | GENMASK(31, 31))
+REG32(CTRL3, 0x0040)
+ FIELD(CTRL3, UPDATE_FLG, 31, 1)
+ FIELD(CTRL3, UOTR_MTM_GP, 16, 8)
+ FIELD(CTRL3, UOTR_MTM_ID, 0, 16)
+
+#define UMMU_TECT_BASE_WMASK (GENMASK_ULL(51, 6) | GENMASK_ULL(63, 63))
+REG32(TECT_BASE0, 0x0070)
+ FIELD(TECT_BASE0, TECT_BASE_ADDR0, 6, 26)
+
+REG32(TECT_BASE1, 0x0074)
+ FIELD(TECT_BASE1, TECT_BASE_ADDR1, 0, 19)
+ FIELD(TECT_BASE1, TECT_RA_CFG, 31, 1)
+
+#define UMMU_TECT_BASE_CFG_WMASK GENMASK_ULL(12, 0)
+REG32(TECT_BASE_CFG, 0x0078)
+ FIELD(TECT_BASE_CFG, TECT_LOG2SIZE, 0, 6)
+ FIELD(TECT_BASE_CFG, TECT_SPLIT, 6, 5)
+ FIELD(TECT_BASE_CFG, TECT_FMT, 11, 2)
+
+#define UMMU_MCMDQ_BASE_WMASK (GENMASK_ULL(51, 0) | GENMASK_ULL(63, 63))
+#define UMMU_MCMDQ_PI_WMASK (GENMASK(19, 0) | GENMASK(23, 23) | GENMASK(31, 31))
+#define UMMU_MCMDQ_CI_WMASK (GENMASK(19, 0) | GENMASK(26, 23) | GENMASK(31, 31))
+#define A_MCMD_QUE_BASE 0x0100
+#define A_MCMD_QUE_LASTEST_CI 0x10FC
+
+#define UMMU_EVENTQ_BASE_WMASK (GENMASK_ULL(4, 0) | GENMASK_ULL(51, 6) | GENMASK_ULL(63, 63))
+REG32(EVENT_QUE_BASE0, 0x1100)
+ FIELD(EVENT_QUE_BASE0, EVENT_QUE_LOG2SIZE, 0, 5)
+ FIELD(EVENT_QUE_BASE0, EVENT_QUE_ADDR0, 6, 26)
+
+REG32(EVENT_QUE_BASE1, 0x1104)
+ FIELD(EVENT_QUE_BASE1, EVENT_QUE_ADDR1, 0, 20)
+ FIELD(EVENT_QUE_BASE1, EVENT_QUE_WA_CFG, 31, 1)
+
+#define UMMU_EVENTQ_PI_WMASK (GENMASK(19, 0) | GENMASK(31, 31))
+REG32(EVENT_QUE_PI, 0x1108)
+ FIELD(EVENT_QUE_PI, EVENT_QUE_WR_IDX, 0, 19)
+ FIELD(EVENT_QUE_PI, EVENT_QUE_WR_WRAP, 19, 1)
+ FIELD(EVENT_QUE_PI, EVENT_QUE_OVFLG, 31, 1)
+
+#define UMMU_EVENTQ_CI_WMASK (GENMASK(19, 0) | GENMASK(31, 31))
+REG32(EVENT_QUE_CI, 0x110C)
+ FIELD(EVENT_QUE_CI, EVENT_QUE_RD_IDX, 0, 19)
+ FIELD(EVENT_QUE_CI, EVENT_QUE_RD_WRAP, 19, 1)
+ FIELD(EVENT_QUE_CI, EVENT_QUE_OVFLG_RESP, 31, 1)
+
+#define UMMU_EVENTQ_USI_ADDR_WMASK GENMASK_ULL(51, 2)
+REG32(EVENT_QUE_USI_ADDR0, 0x1110)
+ FIELD(EVENT_QUE_USI_ADDR0, USI_ADDR0, 2, 30)
+
+REG32(EVENT_QUE_USI_ADDR1, 0x1114)
+ FIELD(EVENT_QUE_USI_ADDR1, USI_ADDR1, 0, 20)
+
+#define UMMU_EVENT_QUE_USI_DATA_WMASK GENMASK(31, 0)
+REG32(EVENT_QUE_USI_DATA, 0x1118)
+ FIELD(EVENT_QUE_USI_DATA, USI_DATA, 0, 32)
+
+#define UMMU_EVENTQ_USI_ATTR_WMASK GENMASK(5, 0)
+REG32(EVENT_QUE_USI_ATTR, 0x111C)
+ FIELD(EVENT_QUE_USI_ATTR, USI_MEM_ATTR_CFG, 0, 4)
+ FIELD(EVENT_QUE_USI_ATTR, USI_SH_CFG, 4, 2)
+
+REG32(GLB_INT_EN, 0x1130)
+ FIELD(GLB_INT_EN, GLB_ERR_INT_EN, 0, 1)
+ FIELD(GLB_INT_EN, EVENT_QUE_INT_EN, 1, 1)
+
+REG32(GLB_ERR, 0x1134)
+ FIELD(GLB_ERR, MCMD_QUE_ERR, 0, 1)
+ FIELD(GLB_ERR, EVENT_QUE_ABT_ERR, 1, 1)
+ FIELD(GLB_ERR, USI_MCMD_QUE_ABT_ERR, 2, 1)
+ FIELD(GLB_ERR, USI_EVENT_QUE_ABT_ERR, 3, 1)
+ FIELD(GLB_ERR, USI_UIEQ_QUE_ABT_ERR, 4, 1)
+ FIELD(GLB_ERR, USI_GLB_ERR_ABT_ERR, 7, 1)
+
+#define UMMU_GLB_ERR_RESP_WMASK GENMASK(4, 0) | GENMASK(7, 7)
+REG32(GLB_ERR_RESP, 0x1138)
+ FIELD(GLB_ERR_RESP, MCMDQ_QUE_ERR, 0, 1)
+ FIELD(GLB_ERR_RESP, EVENT_QUE_ABT_ERR, 1, 1)
+ FIELD(GLB_ERR_RESP, USI_MCMDQ_QUE_ABT_ERR, 2, 1)
+ FIELD(GLB_ERR_RESP, USI_EVENT_QUE_ABT_ERR, 3, 1)
+ FIELD(GLB_ERR_RESP, USI_UIEQ_QUE_ABT_ERR, 4, 1)
+ FIELD(GLB_ERR_RESP, USI_GLB_ERR_ABT_ERR, 7, 1)
+
+#define UMMU_GLB_ERR_INT_USI_ADDR_WMASK GENMASK_ULL(51, 2)
+REG32(GLB_ERR_INT_USI_ADDR0, 0x1140)
+ FIELD(GLB_ERR_INT_USI_ADDR0, USI_ADDR0, 2, 29)
+
+REG32(GLB_ERR_INT_USI_ADDR1, 0x1144)
+ FIELD(GLB_ERR_INT_USI_ADDR1, USI_ADDR1, 0, 19)
+
+#define UMMU_GLB_ERR_INT_USI_DATA_WMASK GENMASK(31, 0)
+REG32(GLB_ERR_INT_USI_DATA, 0x1148)
+ FIELD(GLB_ERR_INT_USI_DATA, USI_DATA, 0, 32)
+
+#define UMMU_GLB_ERR_INT_USI_ATTR_WMASK GENMASK(5, 0)
+REG32(GLB_ERR_INT_USI_ATTR, 0x114C)
+ FIELD(GLB_ERR_INT_USI_ATTR, USI_MEM_ATTR_CFG, 0, 4)
+ FIELD(GLB_ERR_INT_USI_ATTR, USI_SH_CFG, 4, 2)
+
+#define MAPT_CMDQ_CTXT_BADDR_WMASK (((GENMASK_ULL(31, 31) | GENMASK_ULL(19, 0)) << 32) | \
+ (GENMASK_ULL(4, 0) | GENMASK_ULL(31, 6)))
+REG32(MAPT_CMDQ_CTXT_BADDR0, 0x1160)
+ FIELD(MAPT_CMDQ_CTXT_BADDR0, MAPT_CMDQ_CTXT_LOG2SIZE, 0, 5)
+ FIELD(MAPT_CMDQ_CTXT_BADDR0, MAPT_CMDQ_CTXT_ADDR0, 6, 26)
+
+REG32(MAPT_CMDQ_CTXT_BADDR1, 0x1164)
+ FIELD(MAPT_CMDQ_CTXT_BADDR1, MAPT_CMDQ_CTXT_ADDR1, 0, 20)
+ FIELD(MAPT_CMDQ_CTXT_BADDR1, MAPT_CMDQ_CTXT_RA_CFG, 31, 1)
+
+#define RELEASE_UM_QUEUE_WMASK 0x1
+REG32(RELEASE_UM_QUEUE, 0x1178)
+ FIELD(RELEASE_UM_QUEUE, MAPT_RLSE_UM_CMDQ, 0, 1)
+
+#define RELEASE_UM_QUEUE_ID_WMASK GENMASK(30, 0)
+REG32(RELEASE_UM_QUEUE_ID, 0x117C)
+ FIELD(RELEASE_UM_QUEUE_ID, MAPT_RLSE_UM_CMDQ_ID, 0, 31)
+
+#define A_UCMDQ_PI_START_REG 0x20000
+/* MAPT Commd queue control page 4k: 0x2000C + 2^16 * 0x1000
+ * MAPT Commd queue control page 64k: 0x2000C + 2^12 * 0x10000 */
+#define A_UCPLQ_CI_END_REG 0x1002000C
+
+/* ummu user register define */
+REG32(UMMU_INT_MASK, 0x3404)
+ FIELD(UMMU_INT_MASK, UIEQ_USI_MASK, 0, 1)
+ FIELD(UMMU_INT_MASK, UBIF_USI_MASK, 1, 1)
+
+REG32(DSTEID_KV_TABLE_BASE0, 0x3800)
+ FIELD(DSTEID_KV_TABLE_BASE0, DSTEID_TV_TABLE_BASE_ADDR0, 5, 27)
+
+REG32(DSTEID_KV_TABLE_BASE1, 0x3804)
+ FIELD(DSTEID_KV_TABLE_BASE1, DSTEID_TV_TABLE_BASE_ADDR1, 0, 20)
+
+REG32(DSTEID_KV_TABLE_BASE_CFG, 0x3808)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_MEMATTR, 0, 4)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_SH, 4, 2)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_BANK_NUM, 8, 8)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_DEPTH, 16, 16)
+
+REG32(UMMU_DSTEID_KV_TABLE_HASH_CFG0, 0x380C)
+ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG0, DSTEID_KV_TABLE_HASH_SEL, 0, 4)
+ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG0, DSTEID_KV_TABLE_HASH_WIDTH, 4, 4)
+
+REG32(UMMU_DSTEID_KV_TABLE_HASH_CFG1, 0x3810)
+ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG1, DSTEID_KV_TABLE_HASH_CRC32_SEED, 0, 32)
+
+REG32(UMMU_DSTEID_CAM_TABLE_BASE0, 0x3820)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE0, DSTEID_CAM_TABLE_BASE_ADDR0, 5, 27)
+
+REG32(UMMU_DSTEID_CAM_TABLE_BASE1, 0x3824)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE1, DSTEID_CAM_TABLE_BASE_ADDR1, 0, 20)
+
+REG32(UMMU_DSTEID_CAM_TABLE_BASE_CFG, 0x3828)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_MEMATTR, 0, 4)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_SH, 4, 2)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_DEPTH, 16, 32)
+
+#define MAPT_CMDQ_CTRLR_PAGE_SIZE_4K 1
+#define MAPT_CMDQ_CTRLR_PAGE_SIZE_64K 0
+#define UMCMD_PAGE_SEL_WMASK 0x1
+REG32(UMCMD_PAGE_SEL, 0x3834)
+ FIELD(UMCMD_PAGE_SEL, PAGE_MODEL_SEL_EN, 0, 1)
+
+
+/* ummu user logic register define */
+REG32(UMMU_USER_CONFIG0, 0x4C00)
+
+REG32(UMMU_USER_CONFIG1, 0x4C04)
+
+REG32(UMMU_USER_CONFIG2, 0x4C08)
+ FIELD(UMMU_USER_CONFIG2, INV_TLB_ALL_NS, 0, 1)
+ FIELD(UMMU_USER_CONFIG2, TBU_L2_MEM_INIT_EN, 1, 1)
+ FIELD(UMMU_USER_CONFIG2, TBU_L2_MEM_INITING, 2, 1)
+ FIELD(UMMU_USER_CONFIG2, MCMDQ_MEM_INIT_EN, 3, 1)
+ FIELD(UMMU_USER_CONFIG2, MCMDQ_MEM_INITING, 4, 1)
+
+REG32(UMMU_USER_CONFIG3, 0x4C0C)
+
+REG32(UMMU_USER_CONFIG4, 0x4C10)
+
+REG32(UMMU_USER_CONFIG5, 0x4C14)
+
+REG32(UMMU_USER_CONFIG6, 0x4C18)
+
+REG32(UMMU_USER_CONFIG7, 0x4C1C)
+
+REG32(UMMU_USER_CONFIG8, 0x4C20)
+
+REG32(UMMU_USER_CONFIG9, 0x4C24)
+
+REG32(UMMU_USER_CONFIG10, 0x4C28)
+
+REG32(UMMU_USER_CONFIG11, 0x4C2C)
+
+REG32(UMMU_MEM_USI_ADDR0, 0x4D90)
+ FIELD(UMMU_MEM_USI_ADDR0, UBIF_MEM_USI_ADDR0, 2, 30)
+
+REG32(UMMU_MEM_USI_ADDR1, 0x4D94)
+ FIELD(UMMU_MEM_USI_ADDR1, UBIF_MEM_USI_ADDR1, 0, 20)
+
+REG32(UMMU_MEM_USI_DATA, 0x4D98)
+ FIELD(UMMU_MEM_USI_DATA, UBIF_MEM_USI_DATA, 0, 32)
+
+REG32(UMMU_MEM_USI_ATTR, 0x4D9C)
+ FIELD(UMMU_MEM_USI_ATTR, UBIF_MEM_USI_MEM_ATTR_CFG, 0, 4)
+ FIELD(UMMU_MEM_USI_ATTR, UBIF_MEM_USI_SH_CFG, 4, 2)
+
+#define TYPE_UMMU_IOMMU_MEMORY_REGION "ummu-iommu-memory-region"
+
+#define CMD_TYPE(x) extract32((x)->word[0], 0, 8)
+#define CMD_SYNC_CM(x) extract32((x)->word[0], 12, 2)
+#define CMD_SYNC_CM_NONE 0x0
+#define CMD_SYNC_CM_USI 0x1
+#define CMD_SYNC_CM_SEV 0x2
+#define CMD_SYNC_USI_SH(x) extract32((x)->word[0], 14, 2)
+#define CMD_SYNC_USI_ATTR(x) extract32((x)->word[0], 16, 4)
+#define CMD_SYNC_USI_DATA(x) extract32((x)->word[1], 0, 32)
+#define CMD_SYNC_USI_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 2))
+#define CMD_CREATE_KVTBL_DEST_EID(x) extract32((x)->word[4], 0, 32)
+#define CMD_CREATE_KVTBL_BASE_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 6))
+#define CMD_CREATE_KVTBL_TECTE_TAG(x) extract32((x)->word[0], 16, 16)
+#define CMD_DELETE_KVTBL_DEST_EID(x) extract32((x)->word[4], 0, 32)
+#define CMD_DELETE_KVTBL_TECTE_TAG(x) extract32((x)->word[0], 16, 16)
+#define CMD_TECTE_TAG(x) extract32((x)->word[4], 0, 16)
+#define CMD_TECTE_RANGE(x) extract32((x)->word[1], 20, 5)
+/* according to UB SPEC, if range val is 31, invalid all tecte */
+#define CMD_TECTE_RANGE_INVILID_ALL(x) ((x) == 31)
+#define CMD_NULL_SUBOP_CHECK_PA_CONTINUITY 1
+#define CMD_NULL_SUBOP(x) extract32((x)->word[0], 8, 8)
+#define CMD_NULL_CHECK_PA_CONTI_SIZE(x) (1 << extract32((x)->word[0], 24, 6))
+#define CMD_NULL_CHECK_PA_CONTI_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(47, 12))
+#define UMMU_RUN_IN_VM_FLAG 0x10
+#define PA_CONTINUITY 0x00
+#define PA_NOT_CONTINUITY 0x01
+
+#define MCMDQ_BASE_ADDR_MASK ~0xf0UL
+#define MCMDQ_IDX_MASK 0xf0
+#define MCMDQ_PROD_WMASK 0x808fffff
+#define MCMDQ_CONS_WMASK 0x878fffff
+#define MCMDQ_PROD_BASE_ADDR 0x108
+#define MCMDQ_CONS_BASE_ADDR 0x10C
+#define MCMD_QUE_LOG2SIZE(x) extract32(x, 0, 5)
+#define MCMD_QUE_BASE_ADDR(que) ((que)->base & GENMASK_ULL(51, 5))
+#define MCMD_QUE_RD_IDX(que) (extract32((que)->cons, 0, 19) & ((1 << (que)->log2size) - 1))
+#define MCMD_QUE_WD_IDX(que) (extract32((que)->prod, 0, 19) & ((1 << (que)->log2size) - 1))
+#define MCMD_QUE_RD_WRAP(que) extract32((que)->cons, (que)->log2size, 1)
+#define MCMD_QUE_WD_WRAP(que) extract32((que)->prod, (que)->log2size, 1)
+#define MCMD_QUE_EN_BIT(que) extract32((que)->prod, 31, 1)
+#define MCMD_QUE_EN_RESP_BIT 31
+
+#define EVENT_QUE_LOG2SIZE(x) extract32(x, 0, 5)
+#define EVENT_QUE_BASE_ADDR(que) ((que)->base & GENMASK_ULL(51, 6))
+#define EVENT_QUE_RD_IDX(que) (extract32((que)->cons, 0, 19) & ((1 << (que)->log2size) - 1))
+#define EVENT_QUE_WR_IDX(que) (extract32((que)->prod, 0, 19) & ((1 << (que)->log2size) - 1))
+#define EVENT_QUE_RD_WRAP(que) extract32((que)->cons, (que)->log2size, 1)
+#define EVENT_QUE_WR_WRAP(que) extract32((que)->prod, (que)->log2size, 1)
+
+#define TECT_BASE_ADDR(x) ((x) & GENMASK_ULL(51, 6))
+#define TECT_L2TECTE_PTR(x) ((*(uint64_t *)&(x)->word[0]) & GENMASK_ULL(51, 6))
+#define TECT_DESC_V(x) extract32((x)->word[0], 0, 1)
+#define TECTE_TCT_PTR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 6))
+#define TECTE_TCT_NUM(x) extract32((x)->word[2], 0, 5)
+#define TECTE_TCT_FMT(x) extract32((x)->word[3], 20, 2)
+#define TECTE_VALID(x) extract32((x)->word[0], 0, 1)
+#define TECTE_ST_MODE(x) extract32((x)->word[0], 1, 3)
+#define TECTE_ST_MODE_ABORT 0
+#define TECTE_ST_MODE_BYPASS 4
+#define TECTE_ST_MODE_S1 5
+#define TECTE_ST_MODE_S2 6
+#define TECTE_ST_MODE_NESTED 7
+
+#define TCT_FMT_LINEAR 0
+#define TCT_FMT_LVL2_4K 1
+#define TCT_FMT_LVL2_64K 2
+#define TCT_SPLIT_64K 10
+#define TCT_L2_ENTRIES (1UL << TCT_SPLIT_64K)
+#define TCT_L1TCTE_V(x) extract32((x)->word[0], 0, 1)
+#define TCT_L2TCTE_PTR(x) ((*(uint64_t *)&(x)->word[0]) & GENMASK_ULL(51, 12))
+#define TCTE_TTBA(x) ((*(uint64_t *)&(x)->word[4]) & GENMASK_ULL(51, 4))
+#define TCTE_TCT_V(x) extract32((x)->word[0], 0, 1)
+#define TCTE_SZ(x) extract32((x)->word[2], 0, 6)
+#define TCTE_TGS(x) extract32((x)->word[2], 6, 2)
+/* according ub spec Chapter 9, tct max num is 2 ^ tct_num */
+#define TCTE_MAX_NUM(x) (1 << (x))
+
+#define MAPT_CMDQ_CTXT_BASE_BYTES 64
+#define MAPT_CMDQ_CTXT_BASE_ADDR(x) ((x) & GENMASK_ULL(51, 6))
+#define UCMDQ_UCPLQ_CI_PI_MASK 0xFULL
+#define UCMDQ_PI 0x00
+#define UCMDQ_CI 0x04
+#define UCPLQ_PI 0x08
+#define UCPLQ_CI 0x0C
+#define MAPT_4K_CMDQ_CTXT_QID(offset) ((((offset) & (~0xFULL)) - A_UCMDQ_PI_START_REG) / 0x1000)
+#define MAPT_64K_CMDQ_CTXT_QID(offset) ((((offset) & (~0xFULL)) - A_UCMDQ_PI_START_REG) / 0x10000)
+#define MAPT_UCMDQ_LOG2SIZE(base) extract32((base)->word[0], 2, 4)
+#define MAPT_UCMDQ_PI(base) (extract32((base)->word[10], 0, 16) & \
+ ((1 << MAPT_UCMDQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCMDQ_PI_WRAP(base) extract32((base)->word[10], MAPT_UCMDQ_LOG2SIZE(base), 1)
+#define MAPT_UCMDQ_CI(base) (extract32((base)->word[10], 16, 16) & \
+ ((1 << MAPT_UCMDQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCMDQ_CI_WRAP(base) extract32((base)->word[10], 16 + MAPT_UCMDQ_LOG2SIZE(base), 1)
+#define MAPT_UCMDQ_BASE_ADDR(base) ((*(uint64_t *)&(base)->word[0]) & GENMASK_ULL(51, 12))
+
+#define MAPT_UCMD_TYPE_PSYNC 0x01
+#define MAPT_UCMD_TYPE_PLBI_USR_ALL 0x10
+#define MAPT_UCMD_TYPE_PLBI_USR_VA 0x11
+#define MAPT_UCMD_TYPE(cmd) ((cmd)->word[0] & GENMASK(7, 0))
+
+#define MAPT_UCPLQ_LOG2SIZE(base) extract32((base)->word[2], 2, 4)
+#define MAPT_UCPLQ_PI(base) (extract32((base)->word[11], 0, 16) & \
+ ((1 << MAPT_UCPLQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCPLQ_PI_WRAP(base) extract32((base)->word[11], MAPT_UCPLQ_LOG2SIZE(base), 1)
+#define MAPT_UCPLQ_CI(base) (extract32((base)->word[11], 16, 16) & \
+ ((1 << MAPT_UCPLQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCPLQ_CI_WRAP(base) extract32((base)->word[11], 16 + MAPT_UCPLQ_LOG2SIZE(base), 1)
+#define MAPT_UCPLQ_BASE_ADDR(base) ((*(uint64_t *)&(base)->word[2]) & GENMASK_ULL(51, 12))
+#define MAPT_UCPL_STATUS_INVALID 0x0
+#define MAPT_UCPL_STATUS_PSYNC_SUCCESS 0x1
+#define MAPT_UCPL_STATUS_TYPE_ERROR 0x2
+#define MAPT_UCPL_STATUS_PROCESS_ERROR 0x3
+
+typedef struct UMMUMcmdqCmd {
+ uint32_t word[8];
+} UMMUMcmdqCmd;
+
+typedef struct UMMUEvent {
+ uint32_t word[16];
+} UMMUEvent;
+
+typedef enum UmmuMcmdqCmdType {
+ CMD_SYNC = 0x1,
+ CMD_STALL_RESUME = 0x02,
+ CMD_PREFET_CFG = 0x04,
+ CMD_CFGI_TECT = 0x08,
+ CMD_CFGI_TECT_RANGE = 0x09,
+ CMD_CFGI_TCT = 0x0A,
+ CMD_CFGI_TCT_ALL = 0x0B,
+ CMD_CFGI_VMS_PIDM = 0x0C,
+ CMD_PLBI_OS_EID = 0x14,
+ CMD_PLBI_OS_EIDTID = 0x15,
+ CMD_PLBI_OS_VA = 0x16,
+ CMD_TLBI_OS_ALL = 0x10,
+ CMD_TLBI_OS_TID = 0x11,
+ CMD_TLBI_OS_VA = 0x12,
+ CMD_TLBI_OS_VAA = 0x13,
+ CMD_TLBI_HYP_ALL = 0x18,
+ CMD_TLBI_HYP_TID = 0x19,
+ CMD_TLBI_HYP_VA = 0x1A,
+ CMD_TLBI_HYP_VAA = 0x1B,
+ CMD_TLBI_S1S2_VMALL = 0x28,
+ CMD_TLBI_S2_IPA = 0x2a,
+ CMD_TLBI_NS_OS_ALL = 0x2C,
+ CMD_RESUME = 0x44,
+ CMD_CREATE_KVTBL = 0x60,
+ CMD_DELETE_KVTBL = 0x61,
+ CMD_NULL = 0x62,
+ CMD_TLBI_OS_ALL_U = 0x90,
+ CMD_TLBI_OS_ASID_U = 0x91,
+ CMD_TLBI_OS_VA_U = 0x92,
+ CMD_TLBI_OS_VAA_U = 0x93,
+ CMD_TLBI_HYP_ASID_U = 0x99,
+ CMD_TLBI_HYP_VA_U = 0x9A,
+ CMD_TLBI_S1S2_VMALL_U = 0xA8,
+ CMD_TLBI_S2_IPA_U = 0xAa,
+ MCMDQ_CMD_MAX,
+} UmmuMcmdqCmdType;
+
+typedef struct UMMUS2Hwpt {
+ IOMMUFDBackend *iommufd;
+ uint32_t hwpt_id;
+ uint32_t ioas_id;
+} UMMUS2Hwpt;
+
+typedef struct UMMUViommu {
+ UMMUState *ummu;
+ IOMMUFDBackend *iommufd;
+ IOMMUFDViommu *core;
+ UMMUS2Hwpt *s2_hwpt;
+ QLIST_HEAD(, UMMUDevice) device_list;
+ QLIST_ENTRY(UMMUViommu) next;
+} UMMUViommu;
+
+typedef struct UMMUS1Hwpt {
+ void *ummu;
+ IOMMUFDBackend *iommufd;
+ UMMUViommu *viommu;
+ uint32_t hwpt_id;
+ QLIST_HEAD(, UMMUDevice) device_list;
+ QLIST_ENTRY(UMMUViommu) next;
+} UMMUS1Hwpt;
+
+typedef struct UMMUVdev {
+ UMMUViommu *vummu;
+ IOMMUFDVdev *core;
+ uint32_t sid;
+} UMMUVdev;
+
+typedef struct UMMUDevice {
+ UMMUState *ummu;
+ IOMMUMemoryRegion iommu;
+ AddressSpace as;
+ AddressSpace as_sysmem;
+ HostIOMMUDeviceIOMMUFD *idev;
+ UMMUViommu *viommu;
+ UMMUS1Hwpt *s1_hwpt;
+ UBDevice *udev;
+ UMMUVdev *vdev;
+ QLIST_ENTRY(UMMUDevice) next;
+} UMMUDevice;
+
+typedef struct UMMUTransCfg {
+ dma_addr_t tct_ptr;
+ uint64_t tct_num;
+ uint64_t tct_fmt;
+ dma_addr_t tct_ttba;
+ uint32_t tct_sz;
+ uint32_t tct_tgs;
+ uint32_t tecte_tag;
+ uint32_t tid;
+} UMMUTransCfg;
+
+typedef enum UMMUEventType {
+ EVT_NONE = 0,
+ /* unsupport translation type */
+ EVT_UT,
+ /* dstEid overflow */
+ EVT_BAD_DSTEID,
+ /* abort when visit tect, or addr overflow */
+ EVT_TECT_FETCH,
+ /* TECT not valid, (V=0) */
+ EVT_BAD_TECT,
+ /* tect ent lack tokenid */
+ EVT_RESERVE_0 = 5,
+ /* reserved, no content */
+ EVT_BAD_TOKENID,
+ /* 1. TECT.TCT_MAXNUM = 0, tokenid disable,
+ * 2. TECT.ST_MODE[0] = 0, stage 1 translation close.
+ * 3. tokenid > TECT.TCT_MAXNUM
+ * 4. lvl1 tct invalid in two-level tct
+ */
+ EVT_TCT_FETCH,
+ /* invalid tct */
+ EVT_BAD_TCT,
+ /* error when Address Table walk */
+ EVT_A_PTW_EABT,
+ /* translation input bigger than max valid value,
+ * or no valid translation table descriptor
+ */
+ EVT_A_TRANSLATION = 10,
+ /* address translation out put bigger than max valid value */
+ EVT_A_ADDR_SIZE,
+ /* Access flag fault because of AF=0 */
+ EVT_ACCESS,
+ /* address translation permission error */
+ EVT_A_PERMISSION,
+ /* TLB or PLB conflicted in translation */
+ EVT_TBU_CONFLICT,
+ /* config cache conflicted in translation */
+ EVT_CFG_CONFLICT = 15,
+ /* error occured when getting VMS */
+ EVT_VMS_FETCH,
+ /* error when Permission Table walk */
+ EVT_P_PTW_EABT,
+ /* abnormal software configuration in PTW */
+ EVT_P_CFG_ERROR,
+ /* permission exception in PTW process */
+ EVT_P_PERMISSION,
+ /* E-Bit verification failed */
+ EVT_RESERVE_1 = 20,
+ /* reserved, no content */
+ EVT_EBIT_DENY,
+ /* the UMMU hardware reports the execution result
+ * of the CMD_CREAT_DSTEID_TECT_RELATION command
+ * to the software.
+ */
+ EVT_CREATE_DSTEID_TECT_RELATION_RESULT = 60,
+ /* the UMMU hardware reports the execution result
+ * of the CMD_DELETE_DSTEID_TECT_RELATION command
+ * to the software.
+ */
+ EVT_DELETE_DSTEID_TECT_RELATION_RESULT,
+ EVT_MAX
+} UMMUEventType;
+
+typedef struct UMMUEventInfo {
+ UMMUEventType type;
+ uint32_t tecte_tag;
+ uint32_t tid;
+ union {
+ struct {
+ bool stall;
+ } f_translation;
+ } u;
+ /* TODO */
+} UMMUEventInfo;
+
+typedef enum {
+ UMMU_PTW_ERR_NONE,
+ UMMU_PTW_ERR_TRANSLATION,
+ UMMU_PTW_ERR_PERMISSION
+} UMMUPTWEventType;
+
+typedef struct UMMUPTWEventInfo {
+ UMMUPTWEventType type;
+} UMMUPTWEventInfo;
+
+#define EVT_SET_TYPE(x, v) ((x)->word[0] = deposit32((x)->word[0], 0, 8, v))
+#define EVT_SET_TECTE_TAG(x, v) ((x)->word[8] = deposit32((x)->word[8], 0, 16, v))
+#define EVT_SET_TID(x, v) ((x)->word[1] = deposit32((x)->word[1], 0, 20, v))
+
+/* TECTE Level 1 Description */
+typedef struct TECTEDesc {
+ uint32_t word[2];
+} TECTEDesc;
+
+/* TCTE Level1 Description */
+typedef struct TCTEDesc {
+ uint32_t word[2];
+} TCTEDesc;
+
+/* Target Entity Config Table Entry (TECTE) */
+typedef struct TECTE {
+ uint32_t word[16];
+} TECTE;
+
+/* Target Contex Table Entry (TCTE) */
+typedef struct TCTE {
+ uint32_t word[16];
+} TCTE;
+
+typedef struct MAPTCmdqBase {
+ uint32_t word[16];
+} MAPTCmdqBase;
+
+typedef struct MAPTCmd {
+ uint32_t word[4];
+} MAPTCmd;
+
+typedef struct MAPTCmdCpl {
+ uint32_t cpl_status : 4;
+ uint32_t rsv : 12;
+ uint32_t cmdq_ci : 16;
+} MAPTCmdCpl;
+
+typedef struct UMMUTecteRange {
+ bool invalid_all;
+ uint32_t start;
+ uint32_t end;
+} UMMUTecteRange;
+
+static inline void update_reg32_by_wmask(uint32_t *old, uint32_t new, uint32_t wmask)
+{
+ *old = (*old & ~wmask) | (new & wmask);
+}
+
+static inline void update_reg64_by_wmask(uint64_t *old, uint64_t new, uint64_t wmask)
+{
+ *old = (*old & ~wmask) | (new & wmask);
+}
+
+static inline bool ummu_mcmdq_enabled(UMMUMcmdQueue *mcmdq)
+{
+ return MCMD_QUE_EN_BIT(&mcmdq->queue);
+}
+
+static inline void ummu_mcmdq_enable_resp(UMMUMcmdQueue *mcmdq)
+{
+ mcmdq->queue.cons |= GENMASK(MCMD_QUE_EN_RESP_BIT, MCMD_QUE_EN_RESP_BIT);
+}
+
+static inline void ummu_mcmdq_disable_resp(UMMUMcmdQueue *mcmdq)
+{
+ mcmdq->queue.cons &= ~(GENMASK(MCMD_QUE_EN_RESP_BIT, MCMD_QUE_EN_RESP_BIT));
+}
+
+static inline bool ummu_mcmdq_empty(UMMUMcmdQueue *mcmdq)
+{
+ UMMUQueue *q = &mcmdq->queue;
+
+ return MCMD_QUE_WD_IDX(q) == MCMD_QUE_RD_IDX(q) &&
+ MCMD_QUE_WD_WRAP(q) == MCMD_QUE_RD_WRAP(q);
+}
+
+static inline void ummu_mcmdq_cons_incr(UMMUMcmdQueue *mcmdq)
+{
+ mcmdq->queue.cons =
+ deposit32(mcmdq->queue.cons, 0, mcmdq->queue.log2size + 1, mcmdq->queue.cons + 1);
+}
+
+static inline void ummu_set_event_que_int_en(UMMUState *u, uint64_t data)
+{
+ u->eventq.event_que_int_en = FIELD_EX32(data, GLB_INT_EN, EVENT_QUE_INT_EN);
+}
+
+static inline void ummu_set_glb_err_int_en(UMMUState *u, uint64_t data)
+{
+ u->glb_err.glb_err_int_en = FIELD_EX32(data, GLB_INT_EN, GLB_ERR_INT_EN);
+}
+
+static inline bool ummu_event_que_int_en(UMMUState *u)
+{
+ return u->eventq.event_que_int_en;
+}
+
+static inline bool ummu_glb_err_int_en(UMMUState *u)
+{
+ return u->glb_err.glb_err_int_en;
+}
+
+static inline USIMessage ummu_get_eventq_usi_message(UMMUState *u)
+{
+ USIMessage msg;
+
+ msg.address = u->eventq.usi_addr;
+ msg.data = u->eventq.usi_data;
+
+ return msg;
+}
+
+static inline USIMessage ummu_get_gerror_usi_message(UMMUState *u)
+{
+ USIMessage msg;
+
+ msg.address = u->glb_err.usi_addr;
+ msg.data = u->glb_err.usi_data;
+
+ return msg;
+}
+
+#define UMMU_TECT_MODE_SPARSE_TABLE 0x1
+static inline uint32_t ummu_tect_mode_sparse_table(UMMUState *u)
+{
+ return FIELD_EX32(u->ctrl[1], CTRL1, TECT_MODE_SEL) & UMMU_TECT_MODE_SPARSE_TABLE;
+}
+
+#define UMMU_FEAT_2_LVL_TECT 0x1
+static inline uint32_t ummu_tect_fmt_2level(UMMUState *u)
+{
+ return FIELD_EX32(u->tect_base_cfg, TECT_BASE_CFG, TECT_FMT) & UMMU_FEAT_2_LVL_TECT;
+}
+
+static inline uint32_t ummu_tect_split(UMMUState *u)
+{
+ return FIELD_EX32(u->tect_base_cfg, TECT_BASE_CFG, TECT_SPLIT);
+}
+
+static inline int tgs2granule(int bits)
+{
+ switch (bits) {
+ case 0:
+ /* Translation Granule Size: 2 ^ 12 = 4K */
+ return 12;
+ case 1:
+ /* Translation Granule Size: 2 ^ 16 = 64K */
+ return 16;
+ case 2:
+ /* Translation Granule Size: 2 ^ 14 = 16K */
+ return 14;
+ default:
+ return 0;
+ }
+}
+
+static inline bool ummu_eventq_enabled(UMMUState *u)
+{
+ return !!FIELD_EX32(u->ctrl[0], CTRL0, EVENTQ_EN);
+}
+
+static inline bool ummu_eventq_full(UMMUEventQueue *eventq)
+{
+ UMMUQueue *q = &eventq->queue;
+
+ return EVENT_QUE_WR_IDX(q) == EVENT_QUE_RD_IDX(q) &&
+ EVENT_QUE_WR_WRAP(q) != EVENT_QUE_RD_WRAP(q);
+}
+
+static inline bool ummu_eventq_empty(UMMUEventQueue *eventq)
+{
+ UMMUQueue *q = &eventq->queue;
+
+ return EVENT_QUE_WR_IDX(q) == EVENT_QUE_RD_IDX(q) &&
+ EVENT_QUE_WR_WRAP(q) == EVENT_QUE_RD_WRAP(q);
+}
+
+static inline void ummu_eventq_prod_incr(UMMUEventQueue *eventq)
+{
+ UMMUQueue *q = &eventq->queue;
+
+ /* qlog2size + 1: add 1 which is consider for queue wrap bit.
+ * when cons == prod, the queue may full or empty, according warp bit
+ * to detemin full or emtpy. if cons.wrap == prod.wrap, the queue empty,
+ * if cons.wrap != prod.wrap, the queue full.
+ * */
+ q->prod = deposit32(q->prod, 0, q->log2size + 1, q->prod + 1);
+}
+
+/*
+ * MAPT Cmd Queue Base Struct
+ * ┌──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┐
+ * │31│30│29│28│27│26│25│24│23│22│21│20│19│18│17│16│15│14│13│12│11│10│ 9│ 8│ 7│ 6│ 5│ 4│ 3│ 2│ 1│ 0│
+ * 0 │ UCMD QUEUE BASE ADDRESS[31:12] │ │
+ * 1 │ │ UCMD QUEUE BASE ADDRESS[51:32] │
+ * 2 │ UCPL QUEUE BASE ADDRESS[31:12] │ │
+ * 3 │ │ UCPL QUEUE BASE ADDRESS[51:32] │
+ * 4 │ │ TECTE_TAG │
+ * 5 │ │
+ * 6 │ │
+ * 7 │ │
+ * 8 │ │ TokenID │
+ * 9 │ │
+ * 10 │ UCMQ_QUEUE CI │ UCMQ_QUEUE PI │
+ * 11 │ UCPL_QUEUE CI │ UCPL_QUEUE PI │
+ * 12 │ │
+ * 13 │ │
+ * 14 │ │
+ * 15 │ │
+ * └───────────────────────────────────────────────────────────────────────────────────────────────┘
+ */
+static inline void ummu_mapt_cmdq_base_update_ucmdq_pi(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[10] = deposit32(base->word[10], 0, 16, data);
+}
+
+static inline void ummu_mapt_cmdq_base_update_ucmdq_ci(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[10] = deposit32(base->word[10], 16, 16, data);
+}
+
+static inline void ummu_mapt_cmdq_base_update_ucplq_pi(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[11] = deposit32(base->word[11], 0, 16, data);
+}
+
+static inline void ummu_mapt_cmdq_base_update_ucplq_ci(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[11] = deposit32(base->word[11], 16, 16, data);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucmdq_pi(MAPTCmdqBase *base)
+{
+ return extract32(base->word[10], 0, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucmdq_ci(MAPTCmdqBase *base)
+{
+ return extract32(base->word[10], 16, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucplq_pi(MAPTCmdqBase *base)
+{
+ return extract32(base->word[11], 0, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucplq_ci(MAPTCmdqBase *base)
+{
+ return extract32(base->word[11], 16, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_tecte_tag(MAPTCmdqBase *base)
+{
+ return extract32(base->word[4], 0, 16);
+}
+
+static inline uint32_t ummu_mapt_cmdq_base_get_token_id(MAPTCmdqBase *base)
+{
+ return extract32(base->word[8], 0, 20);
+}
+
+static inline bool ummu_mapt_ucmdq_empty(MAPTCmdqBase *base)
+{
+ return MAPT_UCMDQ_PI(base) == MAPT_UCMDQ_CI(base) &&
+ MAPT_UCMDQ_PI_WRAP(base) == MAPT_UCMDQ_CI_WRAP(base);
+}
+
+static inline void ummu_mapt_ucmdq_cons_incr(MAPTCmdqBase *base)
+{
+ base->word[10] = deposit32(base->word[10], 16,
+ MAPT_UCMDQ_LOG2SIZE(base) + 1,
+ ummu_mapt_cmdq_base_get_ucmdq_ci(base) + 1);
+}
+
+static inline bool ummu_mapt_ucplq_full(MAPTCmdqBase *base)
+{
+ return MAPT_UCPLQ_PI(base) == MAPT_UCPLQ_CI(base) &&
+ MAPT_UCPLQ_PI_WRAP(base) != MAPT_UCPLQ_CI_WRAP(base);
+}
+
+static inline void ummu_mapt_ucqlq_prod_incr(MAPTCmdqBase *base)
+{
+ base->word[11] = deposit32(base->word[11], 0,
+ MAPT_UCPLQ_LOG2SIZE(base) + 1,
+ ummu_mapt_cmdq_base_get_ucplq_pi(base) + 1);
+}
+
+static inline void ummu_mapt_ucplq_set_cpl(MAPTCmdCpl *cpl, uint16_t status, uint16_t ci)
+{
+ cpl->cpl_status = status;
+ cpl->cmdq_ci = ci;
+}
+
+static inline uint32_t ummu_mapt_cmdq_get_qid(UMMUState *u, uint64_t offset)
+{
+ if (u->ucmdq_page_sel == MAPT_CMDQ_CTRLR_PAGE_SIZE_4K) {
+ return MAPT_4K_CMDQ_CTXT_QID(offset);
+ } else {
+ return MAPT_64K_CMDQ_CTXT_QID(offset);
+ }
+}
+
+static inline void ummu_mcmdq_construct_plbi_os_eidtid(UMMUMcmdqCmd *mcmd_cmd, uint32_t tid, uint16_t tag)
+{
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 0, 8, CMD_PLBI_OS_EIDTID);
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 12, 20, tid);
+ mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag);
+}
+
+static inline void ummu_plib_usr_va_to_pibi_os_va(MAPTCmd *mapt_cmd, UMMUMcmdqCmd *mcmd_cmd,
+ uint32_t tid, uint16_t tag)
+{
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 0, 8, CMD_PLBI_OS_VA);
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 12, 20, tid);
+ mcmd_cmd->word[1] = deposit32(mcmd_cmd->word[1], 0, 6, extract32(mapt_cmd->word[1], 0, 6));
+ mcmd_cmd->word[2] = mapt_cmd->word[2] & 0xFFFFF000;
+ mcmd_cmd->word[3] = mapt_cmd->word[3];
+ mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag);
+}
+
+#endif
diff --git a/include/hw/ub/hisi/ummu.h b/include/hw/ub/hisi/ummu.h
new file mode 100644
index 0000000000000000000000000000000000000000..192f45e7e679229ac914e66ffee244fa71a000f0
--- /dev/null
+++ b/include/hw/ub/hisi/ummu.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#ifndef HISI_UMMU_H
+#define HISI_UMMU_H
+#include "hw/ub/hisi/ubc.h"
+
+#endif
diff --git a/include/hw/ub/ub_ummu.h b/include/hw/ub/ub_ummu.h
index f8b65a0bbe861af11f709e1b790c9fd5b4fdb30a..fc33fd55494cd4a29ed38fc9e3e42f1bf5de1fbb 100644
--- a/include/hw/ub/ub_ummu.h
+++ b/include/hw/ub/ub_ummu.h
@@ -26,4 +26,99 @@
#define UMMU_INTERRUPT_ID 0x8989 // UMMU DEVICE ID need allocate later
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define TYPE_UB_UMMU "ub-ummu"
+OBJECT_DECLARE_TYPE(UMMUState, UMMUBaseClass, UB_UMMU)
+
+typedef struct UMMUQueue {
+ uint64_t base; /* base register */
+ uint32_t prod;
+ uint32_t cons;
+ uint64_t entry_size;
+ uint8_t log2size;
+} UMMUQueue;
+
+typedef struct UMMUMcmdQueue {
+ UMMUQueue queue;
+} UMMUMcmdQueue;
+
+typedef struct UMMUEventQueue {
+ UMMUQueue queue;
+ uint64_t usi_addr;
+ uint32_t usi_data;
+ uint32_t usi_attr;
+ bool event_que_int_en;
+} UMMUEventQueue;
+
+typedef struct UMMUGlbErr {
+ uint64_t usi_addr;
+ uint32_t usi_data;
+ uint32_t usi_attr;
+ bool glb_err_int_en;
+ uint32_t glb_err;
+ uint32_t glb_err_resp;
+} UMMUGlbErr;
+
+typedef enum UMMUUSIVectorType {
+ UMMU_USI_VECTOR_EVETQ,
+ UMMU_USI_VECTOR_GERROR,
+ UMMU_USI_VECTOR_MAX,
+} UMMUUSIVectorType;
+
+typedef struct UMMUKVTblEntry {
+ uint32_t dst_eid;
+ uint32_t tecte_tag;
+ QLIST_ENTRY(UMMUKVTblEntry) list;
+} UMMUKVTblEntry;
+
+#define UMMU_MAX_MCMDQS 32
+#define UMMU_TECTE_TAG_MAX_NUM 32
+struct UMMUState {
+ /* */
+ SysBusDevice dev;
+ const char *mrtypename;
+ MemoryRegion ummu_reg_mem;
+ uint64_t ummu_reg_size;
+ MemoryRegion root;
+ MemoryRegion stage2;
+ MemoryRegion sysmem;
+
+ /* Nested */
+ bool nested;
+ UMMUViommu *viommu;
+
+ /* spec register define */
+ uint32_t cap[7];
+ uint32_t ctrl[4];
+ uint32_t ctrl0_ack;
+ uint64_t tect_base;
+ uint32_t tect_base_cfg;
+ UMMUMcmdQueue mcmdqs[UMMU_MAX_MCMDQS];
+ UMMUEventQueue eventq;
+ UMMUGlbErr glb_err;
+ uint64_t mapt_cmdq_ctxt_base;
+ uint32_t release_um_queue;
+ uint32_t release_um_queue_id;
+ uint32_t ucmdq_page_sel;
+
+ int usi_virq[UMMU_USI_VECTOR_MAX];
+ uint8_t bus_num;
+ QLIST_ENTRY(UMMUState) node;
+ uint32_t tecte_tag_cache[UMMU_TECTE_TAG_MAX_NUM];
+ uint32_t tecte_tag_num;
+
+ UBBus *primary_bus;
+ GHashTable *ummu_devs;
+ GHashTable *configs;
+ QLIST_HEAD(, UMMUKVTblEntry) kvtbl;
+};
+
+struct UMMUBaseClass {
+ /* */
+ SysBusDeviceClass parent_class;
+};
+
+UMMUState *ummu_find_by_bus_num(uint8_t bus_num);
+int ummu_associating_with_ubc(BusControllerState *ubc);
#endif
diff --git a/include/hw/ub/ub_usi.h b/include/hw/ub/ub_usi.h
new file mode 100644
index 0000000000000000000000000000000000000000..a9df04e686c77e0a53f353040ac565d45a054587
--- /dev/null
+++ b/include/hw/ub/ub_usi.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#ifndef UB_USI_H
+#define UB_USI_H
+#include "qemu/typedefs.h"
+#include "hw/ub/ub.h"
+
+struct USIMessage {
+ uint64_t address;
+ uint32_t data;
+};
+
+#endif
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index a1b15dd21959cbc28484e807b64f594cb82a41aa..f52ceea7a00d883490278c4d24044b847c798e05 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -140,7 +140,9 @@ typedef struct VMStateDescription VMStateDescription;
/* UB typedef */
typedef struct UBDevice UBDevice;
+typedef struct USIMessage USIMessage;
typedef struct UBBus UBBus;
+typedef struct UMMUViommu UMMUViommu;
/*
* Pointer types