From 6060f8cad07a3d2a49795fef19d585a9d205ecef Mon Sep 17 00:00:00 2001 From: Jia Qingtong Date: Tue, 24 Sep 2024 18:24:33 +0800 Subject: [PATCH] hw/arm/virt:Keep Guest L1 cache type consistent with KVM Linux KVM normalize the cache configuration and expose a fabricated CLIDR_EL1 value to guest, where L1 cache type could be unified or seperate instruction cache and data cache. Let's keep guest L1 cache type consistent with KVM by checking the guest visable CLIDR_EL1, which can avoid abnormal issue in guest when it's probing cache info conbined CLIDR_EL1 with ACPI PPTT and DT. Signed-off-by: Yanan Wang Signed-off-by: lishusen --- hw/acpi/aml-build.c | 165 ++--------------------------------- hw/arm/virt-acpi-build.c | 167 ++++++++++++++++++++++++++++++++++++ hw/arm/virt.c | 86 +++++++++++++++---- include/hw/acpi/aml-build.h | 54 ++---------- include/hw/arm/virt.h | 60 +++++++++++++ 5 files changed, 306 insertions(+), 226 deletions(-) diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index bf9c59f5448..0d4994bafe4 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -47,7 +47,7 @@ static void build_prepend_byte(GArray *array, uint8_t val) g_array_prepend_val(array, val); } -static void build_append_byte(GArray *array, uint8_t val) +void build_append_byte(GArray *array, uint8_t val) { g_array_append_val(array, val); } @@ -1990,10 +1990,10 @@ void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, * ACPI spec, Revision 6.3 * 5.2.29.1 Processor hierarchy node structure (Type 0) */ -static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, - uint32_t parent, uint32_t id, - uint32_t *priv_rsrc, - uint32_t priv_num) +void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num) { int i; @@ -2016,161 +2016,6 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, } } -/* - * ACPI spec, Revision 6.3 - * 5.2.29.2 Cache Type Structure (Type 1) - */ -static void build_cache_hierarchy_node(GArray *tbl, uint32_t next_level, - uint32_t cache_type) -{ - build_append_byte(tbl, 1); - build_append_byte(tbl, 24); - build_append_int_noprefix(tbl, 0, 2); - build_append_int_noprefix(tbl, 127, 4); - build_append_int_noprefix(tbl, next_level, 4); - - switch (cache_type) { - case ARM_L1D_CACHE: /* L1 dcache info */ - build_append_int_noprefix(tbl, ARM_L1DCACHE_SIZE, 4); - build_append_int_noprefix(tbl, ARM_L1DCACHE_SETS, 4); - build_append_byte(tbl, ARM_L1DCACHE_ASSOCIATIVITY); - build_append_byte(tbl, ARM_L1DCACHE_ATTRIBUTES); - build_append_int_noprefix(tbl, ARM_L1DCACHE_LINE_SIZE, 2); - break; - case ARM_L1I_CACHE: /* L1 icache info */ - build_append_int_noprefix(tbl, ARM_L1ICACHE_SIZE, 4); - build_append_int_noprefix(tbl, ARM_L1ICACHE_SETS, 4); - build_append_byte(tbl, ARM_L1ICACHE_ASSOCIATIVITY); - build_append_byte(tbl, ARM_L1ICACHE_ATTRIBUTES); - build_append_int_noprefix(tbl, ARM_L1ICACHE_LINE_SIZE, 2); - break; - case ARM_L2_CACHE: /* L2 cache info */ - build_append_int_noprefix(tbl, ARM_L2CACHE_SIZE, 4); - build_append_int_noprefix(tbl, ARM_L2CACHE_SETS, 4); - build_append_byte(tbl, ARM_L2CACHE_ASSOCIATIVITY); - build_append_byte(tbl, ARM_L2CACHE_ATTRIBUTES); - build_append_int_noprefix(tbl, ARM_L2CACHE_LINE_SIZE, 2); - break; - case ARM_L3_CACHE: /* L3 cache info */ - build_append_int_noprefix(tbl, ARM_L3CACHE_SIZE, 4); - build_append_int_noprefix(tbl, ARM_L3CACHE_SETS, 4); - build_append_byte(tbl, ARM_L3CACHE_ASSOCIATIVITY); - build_append_byte(tbl, ARM_L3CACHE_ATTRIBUTES); - build_append_int_noprefix(tbl, ARM_L3CACHE_LINE_SIZE, 2); - break; - default: - build_append_int_noprefix(tbl, 0, 4); - build_append_int_noprefix(tbl, 0, 4); - build_append_byte(tbl, 0); - build_append_byte(tbl, 0); - build_append_int_noprefix(tbl, 0, 2); - } -} - -/* - * ACPI spec, Revision 6.3 - * 5.2.29 Processor Properties Topology Table (PPTT) - */ -void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms, - const char *oem_id, const char *oem_table_id) -{ - MachineClass *mc = MACHINE_GET_CLASS(ms); - GQueue *list = g_queue_new(); - guint pptt_start = table_data->len; - guint parent_offset; - guint length, i; - int uid = 0; - int socket; - AcpiTable table = { .sig = "PPTT", .rev = 2, - .oem_id = oem_id, .oem_table_id = oem_table_id }; - - acpi_table_begin(&table, table_data); - - for (socket = 0; socket < ms->smp.sockets; socket++) { - uint32_t l3_cache_offset = table_data->len - pptt_start; - build_cache_hierarchy_node(table_data, 0, ARM_L3_CACHE); - - g_queue_push_tail(list, - GUINT_TO_POINTER(table_data->len - pptt_start)); - build_processor_hierarchy_node( - table_data, - /* - * Physical package - represents the boundary - * of a physical package - */ - (1 << 0), - 0, socket, &l3_cache_offset, 1); - } - - if (mc->smp_props.clusters_supported) { - length = g_queue_get_length(list); - for (i = 0; i < length; i++) { - int cluster; - - parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); - for (cluster = 0; cluster < ms->smp.clusters; cluster++) { - g_queue_push_tail(list, - GUINT_TO_POINTER(table_data->len - pptt_start)); - build_processor_hierarchy_node( - table_data, - (0 << 0), /* not a physical package */ - parent_offset, cluster, NULL, 0); - } - } - } - - length = g_queue_get_length(list); - for (i = 0; i < length; i++) { - int core; - - parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); - for (core = 0; core < ms->smp.cores; core++) { - uint32_t priv_rsrc[3] = {}; - priv_rsrc[0] = table_data->len - pptt_start; /* L2 cache offset */ - build_cache_hierarchy_node(table_data, 0, ARM_L2_CACHE); - - priv_rsrc[1] = table_data->len - pptt_start; /* L1 dcache offset */ - build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1D_CACHE); - - priv_rsrc[2] = table_data->len - pptt_start; /* L1 icache offset */ - build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1I_CACHE); - - if (ms->smp.threads > 1) { - g_queue_push_tail(list, - GUINT_TO_POINTER(table_data->len - pptt_start)); - build_processor_hierarchy_node( - table_data, - (0 << 0), /* not a physical package */ - parent_offset, core, priv_rsrc, 3); - } else { - build_processor_hierarchy_node( - table_data, - (1 << 1) | /* ACPI Processor ID valid */ - (1 << 3), /* Node is a Leaf */ - parent_offset, uid++, priv_rsrc, 3); - } - } - } - - length = g_queue_get_length(list); - for (i = 0; i < length; i++) { - int thread; - - parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); - for (thread = 0; thread < ms->smp.threads; thread++) { - build_processor_hierarchy_node( - table_data, - (1 << 1) | /* ACPI Processor ID valid */ - (1 << 2) | /* Processor is a Thread */ - (1 << 3), /* Node is a Leaf */ - parent_offset, uid++, NULL, 0); - } - } - - g_queue_free(list); - acpi_table_end(linker, &table); -} - /* * ACPI spec, Revision 6.3 * 5.2.29 Processor Properties Topology Table (PPTT) diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 179600d4fe7..86984b71675 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -63,6 +63,173 @@ #define ACPI_BUILD_TABLE_SIZE 0x20000 +/* + * ACPI spec, Revision 6.3 + * 5.2.29.2 Cache Type Structure (Type 1) + */ +static void build_cache_hierarchy_node(GArray *tbl, uint32_t next_level, + uint32_t cache_type) +{ + build_append_byte(tbl, 1); + build_append_byte(tbl, 24); + build_append_int_noprefix(tbl, 0, 2); + build_append_int_noprefix(tbl, 127, 4); + build_append_int_noprefix(tbl, next_level, 4); + + switch (cache_type) { + case ARM_L1D_CACHE: /* L1 dcache info */ + build_append_int_noprefix(tbl, ARM_L1DCACHE_SIZE, 4); + build_append_int_noprefix(tbl, ARM_L1DCACHE_SETS, 4); + build_append_byte(tbl, ARM_L1DCACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L1DCACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L1DCACHE_LINE_SIZE, 2); + break; + case ARM_L1I_CACHE: /* L1 icache info */ + build_append_int_noprefix(tbl, ARM_L1ICACHE_SIZE, 4); + build_append_int_noprefix(tbl, ARM_L1ICACHE_SETS, 4); + build_append_byte(tbl, ARM_L1ICACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L1ICACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L1ICACHE_LINE_SIZE, 2); + break; + case ARM_L1_CACHE: /* L1 cache info */ + build_append_int_noprefix(tbl, ARM_L1CACHE_SIZE, 4); + build_append_int_noprefix(tbl, ARM_L1CACHE_SETS, 4); + build_append_byte(tbl, ARM_L1CACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L1CACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L1CACHE_LINE_SIZE, 2); + break; + case ARM_L2_CACHE: /* L2 cache info */ + build_append_int_noprefix(tbl, ARM_L2CACHE_SIZE, 4); + build_append_int_noprefix(tbl, ARM_L2CACHE_SETS, 4); + build_append_byte(tbl, ARM_L2CACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L2CACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L2CACHE_LINE_SIZE, 2); + break; + case ARM_L3_CACHE: /* L3 cache info */ + build_append_int_noprefix(tbl, ARM_L3CACHE_SIZE, 4); + build_append_int_noprefix(tbl, ARM_L3CACHE_SETS, 4); + build_append_byte(tbl, ARM_L3CACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L3CACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L3CACHE_LINE_SIZE, 2); + break; + default: + build_append_int_noprefix(tbl, 0, 4); + build_append_int_noprefix(tbl, 0, 4); + build_append_byte(tbl, 0); + build_append_byte(tbl, 0); + build_append_int_noprefix(tbl, 0, 2); + } +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.29 Processor Properties Topology Table (PPTT) + */ +static void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + GQueue *list = g_queue_new(); + guint pptt_start = table_data->len; + guint parent_offset; + guint length, i; + int uid = 0; + int socket; + AcpiTable table = { .sig = "PPTT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + bool unified_l1 = cpu_l1_cache_unified(0); + + acpi_table_begin(&table, table_data); + + for (socket = 0; socket < ms->smp.sockets; socket++) { + uint32_t l3_cache_offset = table_data->len - pptt_start; + build_cache_hierarchy_node(table_data, 0, ARM_L3_CACHE); + + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + /* + * Physical package - represents the boundary + * of a physical package + */ + (1 << 0), + 0, socket, &l3_cache_offset, 1); + } + + if (mc->smp_props.clusters_supported) { + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int cluster; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (cluster = 0; cluster < ms->smp.clusters; cluster++) { + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + parent_offset, cluster, NULL, 0); + } + } + } + + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int core; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (core = 0; core < ms->smp.cores; core++) { + uint32_t priv_rsrc[3] = {}; + priv_rsrc[0] = table_data->len - pptt_start; /* L2 cache offset */ + build_cache_hierarchy_node(table_data, 0, ARM_L2_CACHE); + + if (unified_l1) { + priv_rsrc[1] = table_data->len - pptt_start; /* L1 cache offset */ + build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1_CACHE); + } else { + priv_rsrc[1] = table_data->len - pptt_start; /* L1 dcache offset */ + build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1D_CACHE); + priv_rsrc[2] = table_data->len - pptt_start; /* L1 icache offset */ + build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1I_CACHE); + } + + if (ms->smp.threads > 1) { + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + parent_offset, core, priv_rsrc, 3); + } else { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 3), /* Node is a Leaf */ + parent_offset, uid++, priv_rsrc, 3); + } + } + } + + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int thread; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (thread = 0; thread < ms->smp.threads; thread++) { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 2) | /* Processor is a Thread */ + (1 << 3), /* Node is a Leaf */ + parent_offset, uid++, NULL, 0); + } + } + + g_queue_free(list); + acpi_table_end(linker, &table); +} + static void acpi_dsdt_add_psd(Aml *dev, int cpus) { Aml *pkg; diff --git a/hw/arm/virt.c b/hw/arm/virt.c index e31c2899684..a9efcec85e1 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -401,6 +401,39 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms) INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags); } +/* + * In CLIDR_EL1 exposed to guest by the hypervisor, L1 cache type + * maybe unified or seperate ins and data. We need to read the + * guest visable CLIDR_EL1 and check L1 cache type. + */ +bool cpu_l1_cache_unified(int cpu) +{ + bool unified = false; + uint64_t clidr; + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); + int ret; + + if (kvm_enabled()) { + struct kvm_one_reg reg = { + .id = ARM64_REG_CLIDR_EL1, + .addr = (uintptr_t)&clidr + }; + + ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + if (ret) { + error_setg(&error_fatal, "Get vCPU clidr from KVM failed:%d", ret); + return unified; + } + + if (CLIDR_CTYPE(clidr, 1) == CTYPE_UNIFIED) { + unified = true; + } + } + + return unified; +} + static void fdt_add_l3cache_nodes(const VirtMachineState *vms) { int i; @@ -415,9 +448,10 @@ static void fdt_add_l3cache_nodes(const VirtMachineState *vms) qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache"); qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true"); qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-level", 3); - qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", 0x2000000); - qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", 128); - qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", 2048); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", ARM_L3CACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", + ARM_L3CACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L3CACHE_SETS); qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", qemu_fdt_alloc_phandle(ms->fdt)); g_free(nodename); @@ -436,10 +470,12 @@ static void fdt_add_l2cache_nodes(const VirtMachineState *vms) char *nodename = g_strdup_printf("/cpus/l2-cache%d", cpu); qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true"); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache"); - qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", 0x80000); - qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", 64); - qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", 1024); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", ARM_L2CACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", + ARM_L2CACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L2CACHE_SETS); qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache", next_path); qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", @@ -453,18 +489,32 @@ static void fdt_add_l2cache_nodes(const VirtMachineState *vms) static void fdt_add_l1cache_prop(const VirtMachineState *vms, char *nodename, int cpu) { - const MachineState *ms = MACHINE(vms); - char *cachename = g_strdup_printf("/cpus/l2-cache%d", cpu); - - qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-size", 0x10000); - qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-line-size", 64); - qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-sets", 256); - qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-size", 0x10000); - qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-line-size", 64); - qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-sets", 256); - qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache", - cachename); - g_free(cachename); + const MachineState *ms = MACHINE(vms); + char *next_path = g_strdup_printf("/cpus/l2-cache%d", cpu); + bool unified_l1 = cpu_l1_cache_unified(0); + + if (unified_l1) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", ARM_L1CACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", + ARM_L1CACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L1CACHE_SETS); + } else { + qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-size", + ARM_L1DCACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-line-size", + ARM_L1DCACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-sets", + ARM_L1DCACHE_SETS); + qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-size", + ARM_L1ICACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-line-size", + ARM_L1ICACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-sets", + ARM_L1ICACHE_SETS); + } + qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache", next_path); + + g_free(next_path); } static void fdt_add_cpu_nodes(const VirtMachineState *vms) diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index 7281c281f6d..91f9cbf4f10 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -221,51 +221,6 @@ struct AcpiBuildTables { BIOSLinker *linker; } AcpiBuildTables; -/* Definitions of the hardcoded cache info*/ - -typedef enum { - ARM_L1D_CACHE, - ARM_L1I_CACHE, - ARM_L2_CACHE, - ARM_L3_CACHE -} ArmCacheType; - -/* L1 data cache: */ -#define ARM_L1DCACHE_SIZE 65536 -#define ARM_L1DCACHE_SETS 256 -#define ARM_L1DCACHE_ASSOCIATIVITY 4 -#define ARM_L1DCACHE_ATTRIBUTES 2 -#define ARM_L1DCACHE_LINE_SIZE 64 - -/* L1 instruction cache: */ -#define ARM_L1ICACHE_SIZE 65536 -#define ARM_L1ICACHE_SETS 256 -#define ARM_L1ICACHE_ASSOCIATIVITY 4 -#define ARM_L1ICACHE_ATTRIBUTES 4 -#define ARM_L1ICACHE_LINE_SIZE 64 - -/* Level 2 unified cache: */ -#define ARM_L2CACHE_SIZE 524288 -#define ARM_L2CACHE_SETS 1024 -#define ARM_L2CACHE_ASSOCIATIVITY 8 -#define ARM_L2CACHE_ATTRIBUTES 10 -#define ARM_L2CACHE_LINE_SIZE 64 - -/* Level 3 unified cache: */ -#define ARM_L3CACHE_SIZE 33554432 -#define ARM_L3CACHE_SETS 2048 -#define ARM_L3CACHE_ASSOCIATIVITY 15 -#define ARM_L3CACHE_ATTRIBUTES 10 -#define ARM_L3CACHE_LINE_SIZE 128 - -struct offset_status { - uint32_t parent; - uint32_t l2_offset; - uint32_t l1d_offset; - uint32_t l1i_offset; -}; - - typedef struct CrsRangeEntry { uint64_t base; @@ -460,6 +415,7 @@ Aml *aml_sizeof(Aml *arg); Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target); Aml *aml_object_type(Aml *object); +void build_append_byte(GArray *array, uint8_t val); void build_append_int_noprefix(GArray *table, uint64_t value, int size); typedef struct AcpiTable { @@ -537,10 +493,12 @@ void build_srat_memory(GArray *table_data, uint64_t base, void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); -void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, - const char *oem_id, const char *oem_table_id); +void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num); -void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms, +void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 76a0d3fa5b8..4b7dc61c24f 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -47,6 +47,65 @@ /* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */ #define PVTIME_SIZE_PER_CPU 64 +/* ARM CLIDR_EL1 related definitions */ +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ +#define CTYPE_NONE 0b000 +#define CTYPE_INS 0b001 +#define CTYPE_DATA 0b010 +#define CTYPE_INS_DATA 0b011 +#define CTYPE_UNIFIED 0b100 + +#define ARM64_REG_CLIDR_EL1 ARM64_SYS_REG(3, 1, 0, 0, 1) + +#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) +#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) +#define CLIDR_CTYPE(clidr, level) \ + (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) + +/* L1 data cache */ +#define ARM_L1DCACHE_SIZE 65536 +#define ARM_L1DCACHE_SETS 256 +#define ARM_L1DCACHE_ASSOCIATIVITY 4 +#define ARM_L1DCACHE_ATTRIBUTES 2 +#define ARM_L1DCACHE_LINE_SIZE 64 + +/* L1 instruction cache */ +#define ARM_L1ICACHE_SIZE 65536 +#define ARM_L1ICACHE_SETS 256 +#define ARM_L1ICACHE_ASSOCIATIVITY 4 +#define ARM_L1ICACHE_ATTRIBUTES 4 +#define ARM_L1ICACHE_LINE_SIZE 64 + +/* L1 unified cache */ +#define ARM_L1CACHE_SIZE 131072 +#define ARM_L1CACHE_SETS 256 +#define ARM_L1CACHE_ASSOCIATIVITY 4 +#define ARM_L1CACHE_ATTRIBUTES 10 +#define ARM_L1CACHE_LINE_SIZE 128 + +/* L2 unified cache */ +#define ARM_L2CACHE_SIZE 524288 +#define ARM_L2CACHE_SETS 1024 +#define ARM_L2CACHE_ASSOCIATIVITY 8 +#define ARM_L2CACHE_ATTRIBUTES 10 +#define ARM_L2CACHE_LINE_SIZE 64 + +/* L3 unified cache */ +#define ARM_L3CACHE_SIZE 33554432 +#define ARM_L3CACHE_SETS 2048 +#define ARM_L3CACHE_ASSOCIATIVITY 15 +#define ARM_L3CACHE_ATTRIBUTES 10 +#define ARM_L3CACHE_LINE_SIZE 128 + +/* Definitions of the hardcoded cache info */ +typedef enum { + ARM_L1D_CACHE, + ARM_L1I_CACHE, + ARM_L1_CACHE, + ARM_L2_CACHE, + ARM_L3_CACHE +} ArmCacheType; + enum { VIRT_FLASH, VIRT_MEM, @@ -194,6 +253,7 @@ OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE) void virt_acpi_setup(VirtMachineState *vms); bool virt_is_acpi_enabled(VirtMachineState *vms); +bool cpu_l1_cache_unified(int cpu); /* Return number of redistributors that fit in the specified region */ static uint32_t virt_redist_capacity(VirtMachineState *vms, int region) -- Gitee