1 Star 0 Fork 131

xiaoyuliang/qemu

forked from src-openEuler/qemu 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
hw-arm-virt-Keep-Guest-L1-cache-type-consistent-with.patch 25.38 KB
一键复制 编辑 原始数据 按行查看 历史
Jiabo Feng 提交于 2024-11-30 08:43 +08:00 . QEMU update to version 8.2.0-25:
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
From 6060f8cad07a3d2a49795fef19d585a9d205ecef Mon Sep 17 00:00:00 2001
From: Jia Qingtong <jiaqingtong97@gmail.com>
Date: Tue, 24 Sep 2024 18:24:33 +0800
Subject: [PATCH] hw/arm/virt:Keep Guest L1 cache type consistent with KVM
Linux KVM normalize the cache configuration and expose a
fabricated CLIDR_EL1 value to guest, where L1 cache type
could be unified or seperate instruction cache and data
cache. Let's keep guest L1 cache type consistent with
KVM by checking the guest visable CLIDR_EL1, which can
avoid abnormal issue in guest when it's probing cache
info conbined CLIDR_EL1 with ACPI PPTT and DT.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Signed-off-by: lishusen <lishusen2@huawei.com>
---
hw/acpi/aml-build.c | 165 ++---------------------------------
hw/arm/virt-acpi-build.c | 167 ++++++++++++++++++++++++++++++++++++
hw/arm/virt.c | 86 +++++++++++++++----
include/hw/acpi/aml-build.h | 54 ++----------
include/hw/arm/virt.h | 60 +++++++++++++
5 files changed, 306 insertions(+), 226 deletions(-)
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index bf9c59f544..0d4994bafe 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -47,7 +47,7 @@ static void build_prepend_byte(GArray *array, uint8_t val)
g_array_prepend_val(array, val);
}
-static void build_append_byte(GArray *array, uint8_t val)
+void build_append_byte(GArray *array, uint8_t val)
{
g_array_append_val(array, val);
}
@@ -1990,10 +1990,10 @@ void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms,
* ACPI spec, Revision 6.3
* 5.2.29.1 Processor hierarchy node structure (Type 0)
*/
-static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
- uint32_t parent, uint32_t id,
- uint32_t *priv_rsrc,
- uint32_t priv_num)
+void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
+ uint32_t parent, uint32_t id,
+ uint32_t *priv_rsrc,
+ uint32_t priv_num)
{
int i;
@@ -2016,161 +2016,6 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
}
}
-/*
- * ACPI spec, Revision 6.3
- * 5.2.29.2 Cache Type Structure (Type 1)
- */
-static void build_cache_hierarchy_node(GArray *tbl, uint32_t next_level,
- uint32_t cache_type)
-{
- build_append_byte(tbl, 1);
- build_append_byte(tbl, 24);
- build_append_int_noprefix(tbl, 0, 2);
- build_append_int_noprefix(tbl, 127, 4);
- build_append_int_noprefix(tbl, next_level, 4);
-
- switch (cache_type) {
- case ARM_L1D_CACHE: /* L1 dcache info */
- build_append_int_noprefix(tbl, ARM_L1DCACHE_SIZE, 4);
- build_append_int_noprefix(tbl, ARM_L1DCACHE_SETS, 4);
- build_append_byte(tbl, ARM_L1DCACHE_ASSOCIATIVITY);
- build_append_byte(tbl, ARM_L1DCACHE_ATTRIBUTES);
- build_append_int_noprefix(tbl, ARM_L1DCACHE_LINE_SIZE, 2);
- break;
- case ARM_L1I_CACHE: /* L1 icache info */
- build_append_int_noprefix(tbl, ARM_L1ICACHE_SIZE, 4);
- build_append_int_noprefix(tbl, ARM_L1ICACHE_SETS, 4);
- build_append_byte(tbl, ARM_L1ICACHE_ASSOCIATIVITY);
- build_append_byte(tbl, ARM_L1ICACHE_ATTRIBUTES);
- build_append_int_noprefix(tbl, ARM_L1ICACHE_LINE_SIZE, 2);
- break;
- case ARM_L2_CACHE: /* L2 cache info */
- build_append_int_noprefix(tbl, ARM_L2CACHE_SIZE, 4);
- build_append_int_noprefix(tbl, ARM_L2CACHE_SETS, 4);
- build_append_byte(tbl, ARM_L2CACHE_ASSOCIATIVITY);
- build_append_byte(tbl, ARM_L2CACHE_ATTRIBUTES);
- build_append_int_noprefix(tbl, ARM_L2CACHE_LINE_SIZE, 2);
- break;
- case ARM_L3_CACHE: /* L3 cache info */
- build_append_int_noprefix(tbl, ARM_L3CACHE_SIZE, 4);
- build_append_int_noprefix(tbl, ARM_L3CACHE_SETS, 4);
- build_append_byte(tbl, ARM_L3CACHE_ASSOCIATIVITY);
- build_append_byte(tbl, ARM_L3CACHE_ATTRIBUTES);
- build_append_int_noprefix(tbl, ARM_L3CACHE_LINE_SIZE, 2);
- break;
- default:
- build_append_int_noprefix(tbl, 0, 4);
- build_append_int_noprefix(tbl, 0, 4);
- build_append_byte(tbl, 0);
- build_append_byte(tbl, 0);
- build_append_int_noprefix(tbl, 0, 2);
- }
-}
-
-/*
- * ACPI spec, Revision 6.3
- * 5.2.29 Processor Properties Topology Table (PPTT)
- */
-void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms,
- const char *oem_id, const char *oem_table_id)
-{
- MachineClass *mc = MACHINE_GET_CLASS(ms);
- GQueue *list = g_queue_new();
- guint pptt_start = table_data->len;
- guint parent_offset;
- guint length, i;
- int uid = 0;
- int socket;
- AcpiTable table = { .sig = "PPTT", .rev = 2,
- .oem_id = oem_id, .oem_table_id = oem_table_id };
-
- acpi_table_begin(&table, table_data);
-
- for (socket = 0; socket < ms->smp.sockets; socket++) {
- uint32_t l3_cache_offset = table_data->len - pptt_start;
- build_cache_hierarchy_node(table_data, 0, ARM_L3_CACHE);
-
- g_queue_push_tail(list,
- GUINT_TO_POINTER(table_data->len - pptt_start));
- build_processor_hierarchy_node(
- table_data,
- /*
- * Physical package - represents the boundary
- * of a physical package
- */
- (1 << 0),
- 0, socket, &l3_cache_offset, 1);
- }
-
- if (mc->smp_props.clusters_supported) {
- length = g_queue_get_length(list);
- for (i = 0; i < length; i++) {
- int cluster;
-
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
- for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
- g_queue_push_tail(list,
- GUINT_TO_POINTER(table_data->len - pptt_start));
- build_processor_hierarchy_node(
- table_data,
- (0 << 0), /* not a physical package */
- parent_offset, cluster, NULL, 0);
- }
- }
- }
-
- length = g_queue_get_length(list);
- for (i = 0; i < length; i++) {
- int core;
-
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
- for (core = 0; core < ms->smp.cores; core++) {
- uint32_t priv_rsrc[3] = {};
- priv_rsrc[0] = table_data->len - pptt_start; /* L2 cache offset */
- build_cache_hierarchy_node(table_data, 0, ARM_L2_CACHE);
-
- priv_rsrc[1] = table_data->len - pptt_start; /* L1 dcache offset */
- build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1D_CACHE);
-
- priv_rsrc[2] = table_data->len - pptt_start; /* L1 icache offset */
- build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1I_CACHE);
-
- if (ms->smp.threads > 1) {
- g_queue_push_tail(list,
- GUINT_TO_POINTER(table_data->len - pptt_start));
- build_processor_hierarchy_node(
- table_data,
- (0 << 0), /* not a physical package */
- parent_offset, core, priv_rsrc, 3);
- } else {
- build_processor_hierarchy_node(
- table_data,
- (1 << 1) | /* ACPI Processor ID valid */
- (1 << 3), /* Node is a Leaf */
- parent_offset, uid++, priv_rsrc, 3);
- }
- }
- }
-
- length = g_queue_get_length(list);
- for (i = 0; i < length; i++) {
- int thread;
-
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
- for (thread = 0; thread < ms->smp.threads; thread++) {
- build_processor_hierarchy_node(
- table_data,
- (1 << 1) | /* ACPI Processor ID valid */
- (1 << 2) | /* Processor is a Thread */
- (1 << 3), /* Node is a Leaf */
- parent_offset, uid++, NULL, 0);
- }
- }
-
- g_queue_free(list);
- acpi_table_end(linker, &table);
-}
-
/*
* ACPI spec, Revision 6.3
* 5.2.29 Processor Properties Topology Table (PPTT)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 179600d4fe..86984b7167 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -63,6 +63,173 @@
#define ACPI_BUILD_TABLE_SIZE 0x20000
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29.2 Cache Type Structure (Type 1)
+ */
+static void build_cache_hierarchy_node(GArray *tbl, uint32_t next_level,
+ uint32_t cache_type)
+{
+ build_append_byte(tbl, 1);
+ build_append_byte(tbl, 24);
+ build_append_int_noprefix(tbl, 0, 2);
+ build_append_int_noprefix(tbl, 127, 4);
+ build_append_int_noprefix(tbl, next_level, 4);
+
+ switch (cache_type) {
+ case ARM_L1D_CACHE: /* L1 dcache info */
+ build_append_int_noprefix(tbl, ARM_L1DCACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L1DCACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L1DCACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L1DCACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L1DCACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L1I_CACHE: /* L1 icache info */
+ build_append_int_noprefix(tbl, ARM_L1ICACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L1ICACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L1ICACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L1ICACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L1ICACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L1_CACHE: /* L1 cache info */
+ build_append_int_noprefix(tbl, ARM_L1CACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L1CACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L1CACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L1CACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L1CACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L2_CACHE: /* L2 cache info */
+ build_append_int_noprefix(tbl, ARM_L2CACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L2CACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L2CACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L2CACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L2CACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L3_CACHE: /* L3 cache info */
+ build_append_int_noprefix(tbl, ARM_L3CACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L3CACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L3CACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L3CACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L3CACHE_LINE_SIZE, 2);
+ break;
+ default:
+ build_append_int_noprefix(tbl, 0, 4);
+ build_append_int_noprefix(tbl, 0, 4);
+ build_append_byte(tbl, 0);
+ build_append_byte(tbl, 0);
+ build_append_int_noprefix(tbl, 0, 2);
+ }
+}
+
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29 Processor Properties Topology Table (PPTT)
+ */
+static void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+ const char *oem_id, const char *oem_table_id)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ GQueue *list = g_queue_new();
+ guint pptt_start = table_data->len;
+ guint parent_offset;
+ guint length, i;
+ int uid = 0;
+ int socket;
+ AcpiTable table = { .sig = "PPTT", .rev = 2,
+ .oem_id = oem_id, .oem_table_id = oem_table_id };
+ bool unified_l1 = cpu_l1_cache_unified(0);
+
+ acpi_table_begin(&table, table_data);
+
+ for (socket = 0; socket < ms->smp.sockets; socket++) {
+ uint32_t l3_cache_offset = table_data->len - pptt_start;
+ build_cache_hierarchy_node(table_data, 0, ARM_L3_CACHE);
+
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ /*
+ * Physical package - represents the boundary
+ * of a physical package
+ */
+ (1 << 0),
+ 0, socket, &l3_cache_offset, 1);
+ }
+
+ if (mc->smp_props.clusters_supported) {
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int cluster;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ (0 << 0), /* not a physical package */
+ parent_offset, cluster, NULL, 0);
+ }
+ }
+ }
+
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int core;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (core = 0; core < ms->smp.cores; core++) {
+ uint32_t priv_rsrc[3] = {};
+ priv_rsrc[0] = table_data->len - pptt_start; /* L2 cache offset */
+ build_cache_hierarchy_node(table_data, 0, ARM_L2_CACHE);
+
+ if (unified_l1) {
+ priv_rsrc[1] = table_data->len - pptt_start; /* L1 cache offset */
+ build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1_CACHE);
+ } else {
+ priv_rsrc[1] = table_data->len - pptt_start; /* L1 dcache offset */
+ build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1D_CACHE);
+ priv_rsrc[2] = table_data->len - pptt_start; /* L1 icache offset */
+ build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1I_CACHE);
+ }
+
+ if (ms->smp.threads > 1) {
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ (0 << 0), /* not a physical package */
+ parent_offset, core, priv_rsrc, 3);
+ } else {
+ build_processor_hierarchy_node(
+ table_data,
+ (1 << 1) | /* ACPI Processor ID valid */
+ (1 << 3), /* Node is a Leaf */
+ parent_offset, uid++, priv_rsrc, 3);
+ }
+ }
+ }
+
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int thread;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (thread = 0; thread < ms->smp.threads; thread++) {
+ build_processor_hierarchy_node(
+ table_data,
+ (1 << 1) | /* ACPI Processor ID valid */
+ (1 << 2) | /* Processor is a Thread */
+ (1 << 3), /* Node is a Leaf */
+ parent_offset, uid++, NULL, 0);
+ }
+ }
+
+ g_queue_free(list);
+ acpi_table_end(linker, &table);
+}
+
static void acpi_dsdt_add_psd(Aml *dev, int cpus)
{
Aml *pkg;
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index e31c289968..a9efcec85e 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -401,6 +401,39 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags);
}
+/*
+ * In CLIDR_EL1 exposed to guest by the hypervisor, L1 cache type
+ * maybe unified or seperate ins and data. We need to read the
+ * guest visable CLIDR_EL1 and check L1 cache type.
+ */
+bool cpu_l1_cache_unified(int cpu)
+{
+ bool unified = false;
+ uint64_t clidr;
+ ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
+ CPUState *cs = CPU(armcpu);
+ int ret;
+
+ if (kvm_enabled()) {
+ struct kvm_one_reg reg = {
+ .id = ARM64_REG_CLIDR_EL1,
+ .addr = (uintptr_t)&clidr
+ };
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ error_setg(&error_fatal, "Get vCPU clidr from KVM failed:%d", ret);
+ return unified;
+ }
+
+ if (CLIDR_CTYPE(clidr, 1) == CTYPE_UNIFIED) {
+ unified = true;
+ }
+ }
+
+ return unified;
+}
+
static void fdt_add_l3cache_nodes(const VirtMachineState *vms)
{
int i;
@@ -415,9 +448,10 @@ static void fdt_add_l3cache_nodes(const VirtMachineState *vms)
qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache");
qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true");
qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-level", 3);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", 0x2000000);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", 128);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", 2048);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", ARM_L3CACHE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size",
+ ARM_L3CACHE_LINE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L3CACHE_SETS);
qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
qemu_fdt_alloc_phandle(ms->fdt));
g_free(nodename);
@@ -436,10 +470,12 @@ static void fdt_add_l2cache_nodes(const VirtMachineState *vms)
char *nodename = g_strdup_printf("/cpus/l2-cache%d", cpu);
qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true");
qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache");
- qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", 0x80000);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", 64);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", 1024);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", ARM_L2CACHE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size",
+ ARM_L2CACHE_LINE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L2CACHE_SETS);
qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache",
next_path);
qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
@@ -453,18 +489,32 @@ static void fdt_add_l2cache_nodes(const VirtMachineState *vms)
static void fdt_add_l1cache_prop(const VirtMachineState *vms,
char *nodename, int cpu)
{
- const MachineState *ms = MACHINE(vms);
- char *cachename = g_strdup_printf("/cpus/l2-cache%d", cpu);
-
- qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-size", 0x10000);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-line-size", 64);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-sets", 256);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-size", 0x10000);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-line-size", 64);
- qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-sets", 256);
- qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache",
- cachename);
- g_free(cachename);
+ const MachineState *ms = MACHINE(vms);
+ char *next_path = g_strdup_printf("/cpus/l2-cache%d", cpu);
+ bool unified_l1 = cpu_l1_cache_unified(0);
+
+ if (unified_l1) {
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", ARM_L1CACHE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size",
+ ARM_L1CACHE_LINE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L1CACHE_SETS);
+ } else {
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-size",
+ ARM_L1DCACHE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-line-size",
+ ARM_L1DCACHE_LINE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-sets",
+ ARM_L1DCACHE_SETS);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-size",
+ ARM_L1ICACHE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-line-size",
+ ARM_L1ICACHE_LINE_SIZE);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-sets",
+ ARM_L1ICACHE_SETS);
+ }
+ qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache", next_path);
+
+ g_free(next_path);
}
static void fdt_add_cpu_nodes(const VirtMachineState *vms)
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index 7281c281f6..91f9cbf4f1 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -221,51 +221,6 @@ struct AcpiBuildTables {
BIOSLinker *linker;
} AcpiBuildTables;
-/* Definitions of the hardcoded cache info*/
-
-typedef enum {
- ARM_L1D_CACHE,
- ARM_L1I_CACHE,
- ARM_L2_CACHE,
- ARM_L3_CACHE
-} ArmCacheType;
-
-/* L1 data cache: */
-#define ARM_L1DCACHE_SIZE 65536
-#define ARM_L1DCACHE_SETS 256
-#define ARM_L1DCACHE_ASSOCIATIVITY 4
-#define ARM_L1DCACHE_ATTRIBUTES 2
-#define ARM_L1DCACHE_LINE_SIZE 64
-
-/* L1 instruction cache: */
-#define ARM_L1ICACHE_SIZE 65536
-#define ARM_L1ICACHE_SETS 256
-#define ARM_L1ICACHE_ASSOCIATIVITY 4
-#define ARM_L1ICACHE_ATTRIBUTES 4
-#define ARM_L1ICACHE_LINE_SIZE 64
-
-/* Level 2 unified cache: */
-#define ARM_L2CACHE_SIZE 524288
-#define ARM_L2CACHE_SETS 1024
-#define ARM_L2CACHE_ASSOCIATIVITY 8
-#define ARM_L2CACHE_ATTRIBUTES 10
-#define ARM_L2CACHE_LINE_SIZE 64
-
-/* Level 3 unified cache: */
-#define ARM_L3CACHE_SIZE 33554432
-#define ARM_L3CACHE_SETS 2048
-#define ARM_L3CACHE_ASSOCIATIVITY 15
-#define ARM_L3CACHE_ATTRIBUTES 10
-#define ARM_L3CACHE_LINE_SIZE 128
-
-struct offset_status {
- uint32_t parent;
- uint32_t l2_offset;
- uint32_t l1d_offset;
- uint32_t l1i_offset;
-};
-
-
typedef
struct CrsRangeEntry {
uint64_t base;
@@ -460,6 +415,7 @@ Aml *aml_sizeof(Aml *arg);
Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target);
Aml *aml_object_type(Aml *object);
+void build_append_byte(GArray *array, uint8_t val);
void build_append_int_noprefix(GArray *table, uint64_t value, int size);
typedef struct AcpiTable {
@@ -537,10 +493,12 @@ void build_srat_memory(GArray *table_data, uint64_t base,
void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms,
const char *oem_id, const char *oem_table_id);
-void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
- const char *oem_id, const char *oem_table_id);
+void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
+ uint32_t parent, uint32_t id,
+ uint32_t *priv_rsrc,
+ uint32_t priv_num);
-void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
const char *oem_id, const char *oem_table_id);
void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 76a0d3fa5b..4b7dc61c24 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -47,6 +47,65 @@
/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
#define PVTIME_SIZE_PER_CPU 64
+/* ARM CLIDR_EL1 related definitions */
+/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
+#define CTYPE_NONE 0b000
+#define CTYPE_INS 0b001
+#define CTYPE_DATA 0b010
+#define CTYPE_INS_DATA 0b011
+#define CTYPE_UNIFIED 0b100
+
+#define ARM64_REG_CLIDR_EL1 ARM64_SYS_REG(3, 1, 0, 0, 1)
+
+#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
+#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
+#define CLIDR_CTYPE(clidr, level) \
+ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+
+/* L1 data cache */
+#define ARM_L1DCACHE_SIZE 65536
+#define ARM_L1DCACHE_SETS 256
+#define ARM_L1DCACHE_ASSOCIATIVITY 4
+#define ARM_L1DCACHE_ATTRIBUTES 2
+#define ARM_L1DCACHE_LINE_SIZE 64
+
+/* L1 instruction cache */
+#define ARM_L1ICACHE_SIZE 65536
+#define ARM_L1ICACHE_SETS 256
+#define ARM_L1ICACHE_ASSOCIATIVITY 4
+#define ARM_L1ICACHE_ATTRIBUTES 4
+#define ARM_L1ICACHE_LINE_SIZE 64
+
+/* L1 unified cache */
+#define ARM_L1CACHE_SIZE 131072
+#define ARM_L1CACHE_SETS 256
+#define ARM_L1CACHE_ASSOCIATIVITY 4
+#define ARM_L1CACHE_ATTRIBUTES 10
+#define ARM_L1CACHE_LINE_SIZE 128
+
+/* L2 unified cache */
+#define ARM_L2CACHE_SIZE 524288
+#define ARM_L2CACHE_SETS 1024
+#define ARM_L2CACHE_ASSOCIATIVITY 8
+#define ARM_L2CACHE_ATTRIBUTES 10
+#define ARM_L2CACHE_LINE_SIZE 64
+
+/* L3 unified cache */
+#define ARM_L3CACHE_SIZE 33554432
+#define ARM_L3CACHE_SETS 2048
+#define ARM_L3CACHE_ASSOCIATIVITY 15
+#define ARM_L3CACHE_ATTRIBUTES 10
+#define ARM_L3CACHE_LINE_SIZE 128
+
+/* Definitions of the hardcoded cache info */
+typedef enum {
+ ARM_L1D_CACHE,
+ ARM_L1I_CACHE,
+ ARM_L1_CACHE,
+ ARM_L2_CACHE,
+ ARM_L3_CACHE
+} ArmCacheType;
+
enum {
VIRT_FLASH,
VIRT_MEM,
@@ -194,6 +253,7 @@ OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE)
void virt_acpi_setup(VirtMachineState *vms);
bool virt_is_acpi_enabled(VirtMachineState *vms);
+bool cpu_l1_cache_unified(int cpu);
/* Return number of redistributors that fit in the specified region */
static uint32_t virt_redist_capacity(VirtMachineState *vms, int region)
--
2.41.0.windows.1
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/xiao-yuliang/qemu.git
git@gitee.com:xiao-yuliang/qemu.git
xiao-yuliang
qemu
qemu
master

搜索帮助