diff --git a/Makefile.target b/Makefile.target index 933b27453a1dbb9273e0a9ff2d586c84ddf38fae..5ea840964c0b0e1aab2ef2473443e964b2d0b499 100644 --- a/Makefile.target +++ b/Makefile.target @@ -161,6 +161,7 @@ obj-y += qapi/ obj-y += memory.o obj-y += memory_mapping.o obj-y += migration/ram.o +obj-y += migration/dirtyrate.o LIBS := $(libs_softmmu) $(LIBS) # Hardware support diff --git a/backends/hostmem.c b/backends/hostmem.c index 463102aa15df7d21fd034c02dfb68770d9342431..9e1b3a0afc34c1931d19fb4b91fbe13b44d33d42 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -108,6 +108,7 @@ host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name, ret: visit_type_uint16List(v, name, &host_nodes, errp); + qapi_free_uint16List(host_nodes); } static void diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst new file mode 100644 index 0000000000000000000000000000000000000000..c79dcffb55562c9c2cebb80ac6fc5cfe324f6a8b --- /dev/null +++ b/docs/arm-cpu-features.rst @@ -0,0 +1,137 @@ +================ +ARM CPU Features +================ + +Examples of probing and using ARM CPU features + +Introduction +============ + +CPU features are optional features that a CPU of supporting type may +choose to implement or not. In QEMU, optional CPU features have +corresponding boolean CPU proprieties that, when enabled, indicate +that the feature is implemented, and, conversely, when disabled, +indicate that it is not implemented. An example of an ARM CPU feature +is the Performance Monitoring Unit (PMU). CPU types such as the +Cortex-A15 and the Cortex-A57, which respectively implement ARM +architecture reference manuals ARMv7-A and ARMv8-A, may both optionally +implement PMUs. For example, if a user wants to use a Cortex-A15 without +a PMU, then the `-cpu` parameter should contain `pmu=off` on the QEMU +command line, i.e. `-cpu cortex-a15,pmu=off`. + +As not all CPU types support all optional CPU features, then whether or +not a CPU property exists depends on the CPU type. For example, CPUs +that implement the ARMv8-A architecture reference manual may optionally +support the AArch32 CPU feature, which may be enabled by disabling the +`aarch64` CPU property. A CPU type such as the Cortex-A15, which does +not implement ARMv8-A, will not have the `aarch64` CPU property. + +QEMU's support may be limited for some CPU features, only partially +supporting the feature or only supporting the feature under certain +configurations. For example, the `aarch64` CPU feature, which, when +disabled, enables the optional AArch32 CPU feature, is only supported +when using the KVM accelerator and when running on a host CPU type that +supports the feature. + +CPU Feature Probing +=================== + +Determining which CPU features are available and functional for a given +CPU type is possible with the `query-cpu-model-expansion` QMP command. +Below are some examples where `scripts/qmp/qmp-shell` (see the top comment +block in the script for usage) is used to issue the QMP commands. + +(1) Determine which CPU features are available for the `max` CPU type + (Note, we started QEMU with qemu-system-aarch64, so `max` is + implementing the ARMv8-A reference manual in this case):: + + (QEMU) query-cpu-model-expansion type=full model={"name":"max"} + { "return": { + "model": { "name": "max", "props": { + "pmu": true, "aarch64": true + }}}} + +We see that the `max` CPU type has the `pmu` and `aarch64` CPU features. +We also see that the CPU features are enabled, as they are all `true`. + +(2) Let's try to disable the PMU:: + + (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"pmu":false}} + { "return": { + "model": { "name": "max", "props": { + "pmu": false, "aarch64": true + }}}} + +We see it worked, as `pmu` is now `false`. + +(3) Let's try to disable `aarch64`, which enables the AArch32 CPU feature:: + + (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"aarch64":false}} + {"error": { + "class": "GenericError", "desc": + "'aarch64' feature cannot be disabled unless KVM is enabled and 32-bit EL1 is supported" + }} + +It looks like this feature is limited to a configuration we do not +currently have. + +(4) Let's try probing CPU features for the Cortex-A15 CPU type:: + + (QEMU) query-cpu-model-expansion type=full model={"name":"cortex-a15"} + {"return": {"model": {"name": "cortex-a15", "props": {"pmu": true}}}} + +Only the `pmu` CPU feature is available. + +A note about CPU feature dependencies +------------------------------------- + +It's possible for features to have dependencies on other features. I.e. +it may be possible to change one feature at a time without error, but +when attempting to change all features at once an error could occur +depending on the order they are processed. It's also possible changing +all at once doesn't generate an error, because a feature's dependencies +are satisfied with other features, but the same feature cannot be changed +independently without error. For these reasons callers should always +attempt to make their desired changes all at once in order to ensure the +collection is valid. + +A note about CPU models and KVM +------------------------------- + +Named CPU models generally do not work with KVM. There are a few cases +that do work, e.g. using the named CPU model `cortex-a57` with KVM on a +seattle host, but mostly if KVM is enabled the `host` CPU type must be +used. This means the guest is provided all the same CPU features as the +host CPU type has. And, for this reason, the `host` CPU type should +enable all CPU features that the host has by default. Indeed it's even +a bit strange to allow disabling CPU features that the host has when using +the `host` CPU type, but in the absence of CPU models it's the best we can +do if we want to launch guests without all the host's CPU features enabled. + +Enabling KVM also affects the `query-cpu-model-expansion` QMP command. The +affect is not only limited to specific features, as pointed out in example +(3) of "CPU Feature Probing", but also to which CPU types may be expanded. +When KVM is enabled, only the `max`, `host`, and current CPU type may be +expanded. This restriction is necessary as it's not possible to know all +CPU types that may work with KVM, but it does impose a small risk of users +experiencing unexpected errors. For example on a seattle, as mentioned +above, the `cortex-a57` CPU type is also valid when KVM is enabled. +Therefore a user could use the `host` CPU type for the current type, but +then attempt to query `cortex-a57`, however that query will fail with our +restrictions. This shouldn't be an issue though as management layers and +users have been preferring the `host` CPU type for use with KVM for quite +some time. Additionally, if the KVM-enabled QEMU instance running on a +seattle host is using the `cortex-a57` CPU type, then querying `cortex-a57` +will work. + +Using CPU Features +================== + +After determining which CPU features are available and supported for a +given CPU type, then they may be selectively enabled or disabled on the +QEMU command line with that CPU type:: + + $ qemu-system-aarch64 -M virt -cpu max,pmu=off + +The example above disables the PMU for the `max` CPU type. + diff --git a/exec.c b/exec.c index 85c6d803530d8b3c67e6b15f5d2e43c751284bbe..8822c241d85976de7b8138b3d84ab2d58f2d4350 100644 --- a/exec.c +++ b/exec.c @@ -3834,6 +3834,7 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, AddressSpaceDispatch *d; hwaddr l; MemoryRegion *mr; + Int128 diff; assert(len > 0); @@ -3842,6 +3843,16 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, d = flatview_to_dispatch(cache->fv); cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); + /* + * cache->xlat is now relative to cache->mrs.mr, not to the section itself. + * Take that into account to compute how many bytes are there between + * cache->xlat and the end of the section. + */ + + diff = int128_sub(cache->mrs.size, + int128_make64(cache->xlat - cache->mrs.offset_within_region)); + l = int128_get64(int128_min(diff, int128_make64(l))); + mr = cache->mrs.mr; memory_region_ref(mr); if (memory_access_is_direct(mr, is_write)) { diff --git a/hw/display/ati.c b/hw/display/ati.c index 5943040416ed700c2f34a93881c8c7c546212ddf..b17569874e462bbbb8f809500de3220ab8fba310 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "ati_int.h" #include "ati_regs.h" +#include "vga-access.h" #include "vga_regs.h" #include "qemu/log.h" #include "qemu/module.h" @@ -125,20 +126,19 @@ static void ati_vga_switch_mode(ATIVGAState *s) static void ati_cursor_define(ATIVGAState *s) { uint8_t data[1024]; - uint8_t *src; + uint32_t srcoff; int i, j, idx = 0; if ((s->regs.cur_offset & BIT(31)) || s->cursor_guest_mode) { return; /* Do not update cursor if locked or rendered by guest */ } /* FIXME handle cur_hv_offs correctly */ - src = s->vga.vram_ptr + (s->regs.crtc_offset & 0x07ffffff) + - s->regs.cur_offset - (s->regs.cur_hv_offs >> 16) - - (s->regs.cur_hv_offs & 0xffff) * 16; + srcoff = s->regs.cur_offset - + (s->regs.cur_hv_offs >> 16) - (s->regs.cur_hv_offs & 0xffff) * 16; for (i = 0; i < 64; i++) { for (j = 0; j < 8; j++, idx++) { - data[idx] = src[i * 16 + j]; - data[512 + idx] = src[i * 16 + j + 8]; + data[idx] = vga_read_byte(&s->vga, srcoff + i * 16 + j); + data[512 + idx] = vga_read_byte(&s->vga, srcoff + i * 16 + j + 8); } } if (!s->cursor) { @@ -180,7 +180,7 @@ static void ati_cursor_invalidate(VGACommonState *vga) static void ati_cursor_draw_line(VGACommonState *vga, uint8_t *d, int scr_y) { ATIVGAState *s = container_of(vga, ATIVGAState, vga); - uint8_t *src; + uint32_t srcoff; uint32_t *dp = (uint32_t *)d; int i, j, h; @@ -190,14 +190,13 @@ static void ati_cursor_draw_line(VGACommonState *vga, uint8_t *d, int scr_y) return; } /* FIXME handle cur_hv_offs correctly */ - src = s->vga.vram_ptr + (s->regs.crtc_offset & 0x07ffffff) + - s->cursor_offset + (scr_y - vga->hw_cursor_y) * 16; + srcoff = s->cursor_offset + (scr_y - vga->hw_cursor_y) * 16; dp = &dp[vga->hw_cursor_x]; h = ((s->regs.crtc_h_total_disp >> 16) + 1) * 8; for (i = 0; i < 8; i++) { uint32_t color; - uint8_t abits = src[i]; - uint8_t xbits = src[i + 8]; + uint8_t abits = vga_read_byte(vga, srcoff + i); + uint8_t xbits = vga_read_byte(vga, srcoff + i + 8); for (j = 0; j < 8; j++, abits <<= 1, xbits <<= 1) { if (abits & BIT(7)) { if (xbits & BIT(7)) { diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c index 23a8ae0cd8ceb7b59408c0709e2f13b3531d994e..4dc10ea79529b354f6bdeb92e0056def6ed69f30 100644 --- a/hw/display/ati_2d.c +++ b/hw/display/ati_2d.c @@ -75,8 +75,9 @@ void ati_2d_blt(ATIVGAState *s) dst_stride *= bpp; } uint8_t *end = s->vga.vram_ptr + s->vga.vram_size; - if (dst_bits >= end || dst_bits + dst_x + (dst_y + s->regs.dst_height) * - dst_stride >= end) { + if (dst_x > 0x3fff || dst_y > 0x3fff || dst_bits >= end + || dst_bits + dst_x + + (dst_y + s->regs.dst_height) * dst_stride >= end) { qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); return; } @@ -107,8 +108,9 @@ void ati_2d_blt(ATIVGAState *s) src_bits += s->regs.crtc_offset & 0x07ffffff; src_stride *= bpp; } - if (src_bits >= end || src_bits + src_x + - (src_y + s->regs.dst_height) * src_stride >= end) { + if (src_x > 0x3fff || src_y > 0x3fff || src_bits >= end + || src_bits + src_x + + (src_y + s->regs.dst_height) * src_stride >= end) { qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); return; } diff --git a/hw/display/vga-access.h b/hw/display/vga-access.h new file mode 100644 index 0000000000000000000000000000000000000000..c0fbd9958b2e0c242f59e85b5b5e5036a9df14fd --- /dev/null +++ b/hw/display/vga-access.h @@ -0,0 +1,49 @@ +/* + * QEMU VGA Emulator templates + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +static inline uint8_t vga_read_byte(VGACommonState *vga, uint32_t addr) +{ + return vga->vram_ptr[addr & vga->vbe_size_mask]; +} + +static inline uint16_t vga_read_word_le(VGACommonState *vga, uint32_t addr) +{ + uint32_t offset = addr & vga->vbe_size_mask & ~1; + uint16_t *ptr = (uint16_t *)(vga->vram_ptr + offset); + return lduw_le_p(ptr); +} + +static inline uint16_t vga_read_word_be(VGACommonState *vga, uint32_t addr) +{ + uint32_t offset = addr & vga->vbe_size_mask & ~1; + uint16_t *ptr = (uint16_t *)(vga->vram_ptr + offset); + return lduw_be_p(ptr); +} + +static inline uint32_t vga_read_dword_le(VGACommonState *vga, uint32_t addr) +{ + uint32_t offset = addr & vga->vbe_size_mask & ~3; + uint32_t *ptr = (uint32_t *)(vga->vram_ptr + offset); + return ldl_le_p(ptr); +} diff --git a/hw/display/vga-helpers.h b/hw/display/vga-helpers.h index 5a752b3f9efd22b462f0fb49ae1dbfcf4c0b2072..5b6c02faa6734013c5e62e427f3abcf7739141ad 100644 --- a/hw/display/vga-helpers.h +++ b/hw/display/vga-helpers.h @@ -21,6 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ +#include "vga-access.h" static inline void vga_draw_glyph_line(uint8_t *d, uint32_t font_data, uint32_t xorcol, uint32_t bgcol) @@ -95,32 +96,6 @@ static void vga_draw_glyph9(uint8_t *d, int linesize, } while (--h); } -static inline uint8_t vga_read_byte(VGACommonState *vga, uint32_t addr) -{ - return vga->vram_ptr[addr & vga->vbe_size_mask]; -} - -static inline uint16_t vga_read_word_le(VGACommonState *vga, uint32_t addr) -{ - uint32_t offset = addr & vga->vbe_size_mask & ~1; - uint16_t *ptr = (uint16_t *)(vga->vram_ptr + offset); - return lduw_le_p(ptr); -} - -static inline uint16_t vga_read_word_be(VGACommonState *vga, uint32_t addr) -{ - uint32_t offset = addr & vga->vbe_size_mask & ~1; - uint16_t *ptr = (uint16_t *)(vga->vram_ptr + offset); - return lduw_be_p(ptr); -} - -static inline uint32_t vga_read_dword_le(VGACommonState *vga, uint32_t addr) -{ - uint32_t offset = addr & vga->vbe_size_mask & ~3; - uint32_t *ptr = (uint32_t *)(vga->vram_ptr + offset); - return ldl_le_p(ptr); -} - /* * 4 color mode */ diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index 1b0f66cc089be5324ba073d73efc6128bf78ddd1..fc9dc87f0391395dc9a760d105ff23166b7f798e 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -300,6 +300,9 @@ void ide_atapi_cmd_reply_end(IDEState *s) s->packet_transfer_size -= size; s->elementary_transfer_size -= size; s->io_buffer_index += size; + if (s->io_buffer_index > s->io_buffer_total_len) { + return; + } /* Some adapters process PIO data right away. In that case, we need * to avoid mutual recursion between ide_transfer_start diff --git a/hw/ide/core.c b/hw/ide/core.c index 6d96c3a8c3f0fd97f983128c900dbd5cab79b338..1bedad29de3aad91c277e79dd0ce4ed565dcfe94 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -705,6 +705,7 @@ void ide_cancel_dma_sync(IDEState *s) * whole DMA operation will be submitted to disk with a single * aio operation with preadv/pwritev. */ + assert(s->blk); if (s->bus->dma->aiocb) { trace_ide_cancel_dma_sync_remaining(); blk_drain(s->blk); diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 4c6fb9a68ee2e61a83374c108493fda73440a0b3..ac291858d7cf3fab357c26dd8fbd9302b4f7143a 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -295,7 +295,10 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) /* Ignore writes to SSBM if it keeps the old value */ if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { if (!(val & BM_CMD_START)) { - ide_cancel_dma_sync(idebus_active_if(bm->bus)); + IDEState *s = idebus_active_if(bm->bus); + if (s->blk) { + ide_cancel_dma_sync(s); + } bm->status &= ~BM_STATUS_DMAING; } else { bm->cur_addr = bm->addr; diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 9f8f0d3ff555dc3248641636f5bec6755fc94c53..5013ec978c45d1f04d87f866a468811f059e21af 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -1223,29 +1223,29 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) case 0xd44: /* PFR1. */ return cpu->id_pfr1; case 0xd48: /* DFR0. */ - return cpu->id_dfr0; + return cpu->isar.regs[ID_DFR0]; case 0xd4c: /* AFR0. */ return cpu->id_afr0; case 0xd50: /* MMFR0. */ - return cpu->id_mmfr0; + return cpu->isar.regs[ID_MMFR0]; case 0xd54: /* MMFR1. */ - return cpu->id_mmfr1; + return cpu->isar.regs[ID_MMFR1]; case 0xd58: /* MMFR2. */ - return cpu->id_mmfr2; + return cpu->isar.regs[ID_MMFR2]; case 0xd5c: /* MMFR3. */ - return cpu->id_mmfr3; + return cpu->isar.regs[ID_MMFR3]; case 0xd60: /* ISAR0. */ - return cpu->isar.id_isar0; + return cpu->isar.regs[ID_ISAR0]; case 0xd64: /* ISAR1. */ - return cpu->isar.id_isar1; + return cpu->isar.regs[ID_ISAR1]; case 0xd68: /* ISAR2. */ - return cpu->isar.id_isar2; + return cpu->isar.regs[ID_ISAR2]; case 0xd6c: /* ISAR3. */ - return cpu->isar.id_isar3; + return cpu->isar.regs[ID_ISAR3]; case 0xd70: /* ISAR4. */ - return cpu->isar.id_isar4; + return cpu->isar.regs[ID_ISAR4]; case 0xd74: /* ISAR5. */ - return cpu->isar.id_isar5; + return cpu->isar.regs[ID_ISAR5]; case 0xd78: /* CLIDR */ return cpu->clidr; case 0xd7c: /* CTR */ @@ -1450,11 +1450,11 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) } return cpu->env.v7m.fpdscr[attrs.secure]; case 0xf40: /* MVFR0 */ - return cpu->isar.mvfr0; + return cpu->isar.regs[MVFR0]; case 0xf44: /* MVFR1 */ - return cpu->isar.mvfr1; + return cpu->isar.regs[MVFR1]; case 0xf48: /* MVFR2 */ - return cpu->isar.mvfr2; + return cpu->isar.regs[MVFR2]; default: bad_offset: qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 2a221c2ef99587c4a75e91877f92491ae1220373..e45d47f5842c90c10a0cc8fb4d02c3b5c392aec3 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -1595,13 +1595,13 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); } } - desc_offset += desc_size; - if (desc_offset >= total_size) { - is_last = true; - } } else { /* as per intel docs; skip descriptors with null buf addr */ trace_e1000e_rx_null_descriptor(); } + desc_offset += desc_size; + if (desc_offset >= total_size) { + is_last = true; + } e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 10d01d0058c6241d9a00900d3c3b511561e9734f..ecc4f5bcf000528ecb0850ba6d270b4bb099fce9 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -1418,6 +1418,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) vmxnet3_setup_rx_filtering(s); /* Cache fields from shared memory */ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); + assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu < VMXNET3_MAX_MTU); VMW_CFPRN("MTU is %u", s->mtu); s->max_rx_frags = @@ -1471,7 +1472,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read rings memory locations for TX queues */ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA); size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize); - + if (size > VMXNET3_TX_RING_MAX_SIZE) { + size = VMXNET3_TX_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size, sizeof(struct Vmxnet3_TxDesc), false); VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring); @@ -1481,6 +1484,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* TXC ring */ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA); size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize); + if (size > VMXNET3_TC_RING_MAX_SIZE) { + size = VMXNET3_TC_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_TxCompDesc), true); VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring); @@ -1522,6 +1528,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* RX rings */ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]); size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]); + if (size > VMXNET3_RX_RING_MAX_SIZE) { + size = VMXNET3_RX_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size, sizeof(struct Vmxnet3_RxDesc), false); VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", @@ -1531,6 +1540,9 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* RXC ring */ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA); size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize); + if (size > VMXNET3_RC_RING_MAX_SIZE) { + size = VMXNET3_RC_RING_MAX_SIZE; + } vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_RxCompDesc), true); VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); diff --git a/hw/pci/msix.c b/hw/pci/msix.c index d39dcf32e8c98da14ee4ca1d31bda09a0f6a591c..ec43f16875885bb4b42f77005cc78383e6753dc6 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -192,6 +192,15 @@ static void msix_table_mmio_write(void *opaque, hwaddr addr, msix_handle_mask_update(dev, vector, was_masked); } +static bool msix_table_accepts(void *opaque, hwaddr addr, unsigned size, + bool is_write, MemTxAttrs attrs) +{ + PCIDevice *dev = opaque; + uint16_t tbl_size = dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE; + + return dev->msix_table + addr + 4 <= dev->msix_table + tbl_size; +} + static const MemoryRegionOps msix_table_mmio_ops = { .read = msix_table_mmio_read, .write = msix_table_mmio_write, @@ -199,6 +208,7 @@ static const MemoryRegionOps msix_table_mmio_ops = { .valid = { .min_access_size = 4, .max_access_size = 4, + .accepts = msix_table_accepts }, }; @@ -220,6 +230,15 @@ static void msix_pba_mmio_write(void *opaque, hwaddr addr, { } +static bool msix_pba_accepts(void *opaque, hwaddr addr, unsigned size, + bool is_write, MemTxAttrs attrs) +{ + PCIDevice *dev = opaque; + uint16_t pba_size = QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8; + + return dev->msix_pba + addr + 4 <= dev->msix_pba + pba_size; +} + static const MemoryRegionOps msix_pba_mmio_ops = { .read = msix_pba_mmio_read, .write = msix_pba_mmio_write, @@ -227,6 +246,7 @@ static const MemoryRegionOps msix_pba_mmio_ops = { .valid = { .min_access_size = 4, .max_access_size = 4, + .accepts = msix_pba_accepts }, }; diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 8076a80ab3a981c023d97a8f585a16e84f123abb..602fc566cce9b6ec4147628a13c96e0d46cbb77a 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -249,6 +249,9 @@ static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) PCIBus *bus; for (;;) { bus = pci_get_bus(pci_dev); + if (!bus) { + return; + } irq_num = bus->map_irq(pci_dev, irq_num); if (bus->set_irq) break; diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 65a530aee4192e8ab6e26ee9eb71b8cacbb39f9a..d4ee6bd01f5de448f9f7b32146618587b0d848c3 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -613,6 +613,7 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) s->blkcnt--; } } + assert(s->data_count <= s->buf_maxsz && s->data_count > begin); dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], s->data_count - begin); s->sdmasysad += s->data_count - begin; @@ -635,6 +636,7 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) s->data_count = block_size; boundary_count -= block_size - begin; } + assert(s->data_count <= s->buf_maxsz && s->data_count > begin); dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], s->data_count - begin); s->sdmasysad += s->data_count - begin; diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index 5f089f30054b3a1e2fb3bd90c46647a9725da250..433e6a4fc001138e7fa2f2d75ae9534fff8169f4 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -1370,7 +1370,10 @@ static int ehci_execute(EHCIPacket *p, const char *action) spd = (p->pid == USB_TOKEN_IN && NLPTR_TBIT(p->qtd.altnext) == 0); usb_packet_setup(&p->packet, p->pid, ep, 0, p->qtdaddr, spd, (p->qtd.token & QTD_TOKEN_IOC) != 0); - usb_packet_map(&p->packet, &p->sgl); + if (usb_packet_map(&p->packet, &p->sgl)) { + qemu_sglist_destroy(&p->sgl); + return -1; + } p->async = EHCI_ASYNC_INITIALIZED; } @@ -1449,7 +1452,10 @@ static int ehci_process_itd(EHCIState *ehci, if (ep && ep->type == USB_ENDPOINT_XFER_ISOC) { usb_packet_setup(&ehci->ipacket, pid, ep, 0, addr, false, (itd->transact[i] & ITD_XACT_IOC) != 0); - usb_packet_map(&ehci->ipacket, &ehci->isgl); + if (usb_packet_map(&ehci->ipacket, &ehci->isgl)) { + qemu_sglist_destroy(&ehci->isgl); + return -1; + } usb_handle_packet(dev, &ehci->ipacket); usb_packet_unmap(&ehci->ipacket, &ehci->isgl); } else { diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 4f6fdbc0a7dfc0ab1a219a9c26d120caef5ddb9a..d2dd8efd58348580925113f28be80944d7b430a7 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -689,6 +689,10 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, the next ISO TD of the same ED */ trace_usb_ohci_iso_td_relative_frame_number_big(relative_frame_number, frame_count); + if (OHCI_CC_DATAOVERRUN == OHCI_BM(iso_td.flags, TD_CC)) { + /* avoid infinite loop */ + return 1; + } OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_DATAOVERRUN); ed->head &= ~OHCI_DPTR_MASK; ed->head |= (iso_td.next & OHCI_DPTR_MASK); @@ -729,7 +733,11 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, } start_offset = iso_td.offset[relative_frame_number]; - next_offset = iso_td.offset[relative_frame_number + 1]; + if (relative_frame_number < frame_count) { + next_offset = iso_td.offset[relative_frame_number + 1]; + } else { + next_offset = iso_td.be; + } if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) || ((relative_frame_number < frame_count) && @@ -762,7 +770,12 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, } } else { /* Last packet in the ISO TD */ - end_addr = iso_td.be; + end_addr = next_offset; + } + + if (start_addr > end_addr) { + trace_usb_ohci_iso_td_bad_cc_overrun(start_addr, end_addr); + return 1; } if ((start_addr & OHCI_PAGE_MASK) != (end_addr & OHCI_PAGE_MASK)) { @@ -771,6 +784,9 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, } else { len = end_addr - start_addr + 1; } + if (len > sizeof(ohci->usb_buf)) { + len = sizeof(ohci->usb_buf); + } if (len && dir != OHCI_TD_DIR_IN) { if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, @@ -973,8 +989,16 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) if ((td.cbp & 0xfffff000) != (td.be & 0xfffff000)) { len = (td.be & 0xfff) + 0x1001 - (td.cbp & 0xfff); } else { + if (td.cbp > td.be) { + trace_usb_ohci_iso_td_bad_cc_overrun(td.cbp, td.be); + ohci_die(ohci); + return 1; + } len = (td.be - td.cbp) + 1; } + if (len > sizeof(ohci->usb_buf)) { + len = sizeof(ohci->usb_buf); + } pktlen = len; if (len && dir != OHCI_TD_DIR_IN) { diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 744e888e68fea0946d5ffe4997fbdfc8afac1bee..4844edc3a32ef38ad98a79a551499fa1a9a260b6 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -995,6 +995,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 +#define KVM_CAP_ARM_CPU_FEATURE 555 + #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing_irqchip { diff --git a/migration/channel.c b/migration/channel.c index 7462181484a4dece0e4a393b41a873aa244c40c6..46ed40b89cc3f35b130511091119a715917cc161 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -99,5 +99,6 @@ void migration_channel_connect(MigrationState *s, } } migrate_fd_connect(s, error); + g_free(s->hostname); error_free(error); } diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c new file mode 100644 index 0000000000000000000000000000000000000000..9a6d0e2cc698e211fa3775ec1bb302b484a1dc50 --- /dev/null +++ b/migration/dirtyrate.c @@ -0,0 +1,429 @@ +/* + * Dirtyrate implement code + * + * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD. + * + * Authors: + * Chuan Zheng + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include "qapi/error.h" +#include "cpu.h" +#include "exec/memory.h" +#include "exec/ram_addr.h" +#include "exec/target_page.h" +#include "qemu/rcu_queue.h" +#include "qemu/error-report.h" +#include "qapi/qapi-commands-migration.h" +#include "ram.h" +#include "trace.h" +#include "dirtyrate.h" + +static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED; +static struct DirtyRateStat DirtyStat; + +static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) +{ + int64_t current_time; + + current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + if ((current_time - initial_time) >= msec) { + msec = current_time - initial_time; + } else { + g_usleep((msec + initial_time - current_time) * 1000); + } + + return msec; +} + +static bool is_sample_period_valid(int64_t sec) +{ + if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC || + sec > MAX_FETCH_DIRTYRATE_TIME_SEC) { + return false; + } + + return true; +} + +static int dirtyrate_set_state(int *state, int old_state, int new_state) +{ + assert(new_state < DIRTY_RATE_STATUS__MAX); + trace_dirtyrate_set_state(DirtyRateStatus_str(new_state)); + if (atomic_cmpxchg(state, old_state, new_state) == old_state) { + return 0; + } else { + return -1; + } +} + +static struct DirtyRateInfo *query_dirty_rate_info(void) +{ + int64_t dirty_rate = DirtyStat.dirty_rate; + struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo)); + + if (atomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { + info->has_dirty_rate = true; + info->dirty_rate = dirty_rate; + } + + info->status = CalculatingState; + info->start_time = DirtyStat.start_time; + info->calc_time = DirtyStat.calc_time; + + trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState)); + + return info; +} + +static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time) +{ + DirtyStat.total_dirty_samples = 0; + DirtyStat.total_sample_count = 0; + DirtyStat.total_block_mem_MB = 0; + DirtyStat.dirty_rate = -1; + DirtyStat.start_time = start_time; + DirtyStat.calc_time = calc_time; +} + +static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) +{ + DirtyStat.total_dirty_samples += info->sample_dirty_count; + DirtyStat.total_sample_count += info->sample_pages_count; + /* size of total pages in MB */ + DirtyStat.total_block_mem_MB += (info->ramblock_pages * + TARGET_PAGE_SIZE) >> 20; +} + +static void update_dirtyrate(uint64_t msec) +{ + uint64_t dirtyrate; + uint64_t total_dirty_samples = DirtyStat.total_dirty_samples; + uint64_t total_sample_count = DirtyStat.total_sample_count; + uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB; + + dirtyrate = total_dirty_samples * total_block_mem_MB * + 1000 / (total_sample_count * msec); + + DirtyStat.dirty_rate = dirtyrate; +} + +/* + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE + * in ramblock, which starts from ramblock base address. + */ +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, + uint64_t vfn) +{ + uint32_t crc; + + crc = crc32(0, (info->ramblock_addr + + vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); + + trace_get_ramblock_vfn_hash(info->idstr, vfn, crc); + return crc; +} + +static bool save_ramblock_hash(struct RamblockDirtyInfo *info) +{ + unsigned int sample_pages_count; + int i; + GRand *rand; + + sample_pages_count = info->sample_pages_count; + + /* ramblock size less than one page, return success to skip this ramblock */ + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) { + return true; + } + + info->hash_result = g_try_malloc0_n(sample_pages_count, + sizeof(uint32_t)); + if (!info->hash_result) { + return false; + } + + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count, + sizeof(uint64_t)); + if (!info->sample_page_vfn) { + g_free(info->hash_result); + return false; + } + + rand = g_rand_new(); + for (i = 0; i < sample_pages_count; i++) { + info->sample_page_vfn[i] = g_rand_int_range(rand, 0, + info->ramblock_pages - 1); + info->hash_result[i] = get_ramblock_vfn_hash(info, + info->sample_page_vfn[i]); + } + g_rand_free(rand); + + return true; +} + +static void get_ramblock_dirty_info(RAMBlock *block, + struct RamblockDirtyInfo *info, + struct DirtyRateConfig *config) +{ + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; + + /* Right shift 30 bits to calc ramblock size in GB */ + info->sample_pages_count = (qemu_ram_get_used_length(block) * + sample_pages_per_gigabytes) >> 30; + /* Right shift TARGET_PAGE_BITS to calc page count */ + info->ramblock_pages = qemu_ram_get_used_length(block) >> + TARGET_PAGE_BITS; + info->ramblock_addr = qemu_ram_get_host_addr(block); + strcpy(info->idstr, qemu_ram_get_idstr(block)); +} + +static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count) +{ + int i; + + if (!infos) { + return; + } + + for (i = 0; i < count; i++) { + g_free(infos[i].sample_page_vfn); + g_free(infos[i].hash_result); + } + g_free(infos); +} + +static bool skip_sample_ramblock(RAMBlock *block) +{ + /* + * Sample only blocks larger than MIN_RAMBLOCK_SIZE. + */ + if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) { + trace_skip_sample_ramblock(block->idstr, + qemu_ram_get_used_length(block)); + return true; + } + + return false; +} + +static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo, + struct DirtyRateConfig config, + int *block_count) +{ + struct RamblockDirtyInfo *info = NULL; + struct RamblockDirtyInfo *dinfo = NULL; + RAMBlock *block = NULL; + int total_count = 0; + int index = 0; + bool ret = false; + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + if (skip_sample_ramblock(block)) { + continue; + } + total_count++; + } + + dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo)); + if (dinfo == NULL) { + goto out; + } + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + if (skip_sample_ramblock(block)) { + continue; + } + if (index >= total_count) { + break; + } + info = &dinfo[index]; + get_ramblock_dirty_info(block, info, &config); + if (!save_ramblock_hash(info)) { + goto out; + } + index++; + } + ret = true; + +out: + *block_count = index; + *block_dinfo = dinfo; + return ret; +} + +static void calc_page_dirty_rate(struct RamblockDirtyInfo *info) +{ + uint32_t crc; + int i; + + for (i = 0; i < info->sample_pages_count; i++) { + crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); + if (crc != info->hash_result[i]) { + trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]); + info->sample_dirty_count++; + } + } +} + +static struct RamblockDirtyInfo * +find_block_matched(RAMBlock *block, int count, + struct RamblockDirtyInfo *infos) +{ + int i; + struct RamblockDirtyInfo *matched; + + for (i = 0; i < count; i++) { + if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) { + break; + } + } + + if (i == count) { + return NULL; + } + + if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) || + infos[i].ramblock_pages != + (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) { + trace_find_page_matched(block->idstr); + return NULL; + } + + matched = &infos[i]; + + return matched; +} + +static bool compare_page_hash_info(struct RamblockDirtyInfo *info, + int block_count) +{ + struct RamblockDirtyInfo *block_dinfo = NULL; + RAMBlock *block = NULL; + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + if (skip_sample_ramblock(block)) { + continue; + } + block_dinfo = find_block_matched(block, block_count, info); + if (block_dinfo == NULL) { + continue; + } + calc_page_dirty_rate(block_dinfo); + update_dirtyrate_stat(block_dinfo); + } + + if (DirtyStat.total_sample_count == 0) { + return false; + } + + return true; +} + +static void calculate_dirtyrate(struct DirtyRateConfig config) +{ + struct RamblockDirtyInfo *block_dinfo = NULL; + int block_count = 0; + int64_t msec = 0; + int64_t initial_time; + + rcu_register_thread(); + rcu_read_lock(); + initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) { + goto out; + } + rcu_read_unlock(); + + msec = config.sample_period_seconds * 1000; + msec = set_sample_page_period(msec, initial_time); + DirtyStat.start_time = initial_time / 1000; + DirtyStat.calc_time = msec / 1000; + + rcu_read_lock(); + if (!compare_page_hash_info(block_dinfo, block_count)) { + goto out; + } + + update_dirtyrate(msec); + +out: + rcu_read_unlock(); + free_ramblock_dirty_info(block_dinfo, block_count); + rcu_unregister_thread(); +} + +void *get_dirtyrate_thread(void *arg) +{ + struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg; + int ret; + int64_t start_time; + int64_t calc_time; + + ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED, + DIRTY_RATE_STATUS_MEASURING); + if (ret == -1) { + error_report("change dirtyrate state failed."); + return NULL; + } + + start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; + calc_time = config.sample_period_seconds; + init_dirtyrate_stat(start_time, calc_time); + + calculate_dirtyrate(config); + + ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING, + DIRTY_RATE_STATUS_MEASURED); + if (ret == -1) { + error_report("change dirtyrate state failed."); + } + return NULL; +} + +void qmp_calc_dirty_rate(int64_t calc_time, Error **errp) +{ + static struct DirtyRateConfig config; + QemuThread thread; + int ret; + + /* + * If the dirty rate is already being measured, don't attempt to start. + */ + if (atomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) { + error_setg(errp, "the dirty rate is already being measured."); + return; + } + + if (!is_sample_period_valid(calc_time)) { + error_setg(errp, "calc-time is out of range[%d, %d].", + MIN_FETCH_DIRTYRATE_TIME_SEC, + MAX_FETCH_DIRTYRATE_TIME_SEC); + return; + } + + /* + * Init calculation state as unstarted. + */ + ret = dirtyrate_set_state(&CalculatingState, CalculatingState, + DIRTY_RATE_STATUS_UNSTARTED); + if (ret == -1) { + error_setg(errp, "init dirty rate calculation state failed."); + return; + } + + config.sample_period_seconds = calc_time; + config.sample_pages_per_gigabytes = DIRTYRATE_DEFAULT_SAMPLE_PAGES; + qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread, + (void *)&config, QEMU_THREAD_DETACHED); +} + +struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp) +{ + return query_dirty_rate_info(); +} diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h new file mode 100644 index 0000000000000000000000000000000000000000..6ec429534d75fdf74f8b5947fd10bb88647d3caf --- /dev/null +++ b/migration/dirtyrate.h @@ -0,0 +1,69 @@ +/* + * Dirtyrate common functions + * + * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO., LTD. + * + * Authors: + * Chuan Zheng + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_MIGRATION_DIRTYRATE_H +#define QEMU_MIGRATION_DIRTYRATE_H + +/* + * Sample 512 pages per GB as default. + * TODO: Make it configurable. + */ +#define DIRTYRATE_DEFAULT_SAMPLE_PAGES 512 + +/* + * Record ramblock idstr + */ +#define RAMBLOCK_INFO_MAX_LEN 256 + +/* + * Minimum RAMBlock size to sample, in megabytes. + */ +#define MIN_RAMBLOCK_SIZE 128 + +/* + * Take 1s as minimum time for calculation duration + */ +#define MIN_FETCH_DIRTYRATE_TIME_SEC 1 +#define MAX_FETCH_DIRTYRATE_TIME_SEC 60 + +struct DirtyRateConfig { + uint64_t sample_pages_per_gigabytes; /* sample pages per GB */ + int64_t sample_period_seconds; /* time duration between two sampling */ +}; + +/* + * Store dirtypage info for each ramblock. + */ +struct RamblockDirtyInfo { + char idstr[RAMBLOCK_INFO_MAX_LEN]; /* idstr for each ramblock */ + uint8_t *ramblock_addr; /* base address of ramblock we measure */ + uint64_t ramblock_pages; /* ramblock size in TARGET_PAGE_SIZE */ + uint64_t *sample_page_vfn; /* relative offset address for sampled page */ + uint64_t sample_pages_count; /* count of sampled pages */ + uint64_t sample_dirty_count; /* count of dirty pages we measure */ + uint32_t *hash_result; /* array of hash result for sampled pages */ +}; + +/* + * Store calculation statistics for each measure. + */ +struct DirtyRateStat { + uint64_t total_dirty_samples; /* total dirty sampled page */ + uint64_t total_sample_count; /* total sampled pages */ + uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ + int64_t dirty_rate; /* dirty rate in MB/s */ + int64_t start_time; /* calculation start time in units of second */ + int64_t calc_time; /* time duration of two sampling in units of second */ +}; + +void *get_dirtyrate_thread(void *arg); +#endif diff --git a/migration/migration.c b/migration/migration.c index 8f2fc2b4ff6c4f2c2f73d9b4fac5f4203fe70b3f..ffa6d1875f3cd54b7bf91f3e8be505b9d364d96b 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -822,6 +822,25 @@ bool migration_is_setup_or_active(int state) } } +bool migration_is_running(int state) +{ + switch (state) { + case MIGRATION_STATUS_ACTIVE: + case MIGRATION_STATUS_POSTCOPY_ACTIVE: + case MIGRATION_STATUS_POSTCOPY_PAUSED: + case MIGRATION_STATUS_POSTCOPY_RECOVER: + case MIGRATION_STATUS_SETUP: + case MIGRATION_STATUS_PRE_SWITCHOVER: + case MIGRATION_STATUS_DEVICE: + case MIGRATION_STATUS_CANCELLING: + return true; + + default: + return false; + + } +} + static void populate_ram_info(MigrationInfo *info, MigrationState *s) { info->has_ram = true; @@ -1074,7 +1093,7 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, MigrationCapabilityStatusList *cap; bool cap_list[MIGRATION_CAPABILITY__MAX]; - if (migration_is_setup_or_active(s->state)) { + if (migration_is_running(s->state)) { error_setg(errp, QERR_MIGRATION_ACTIVE); return; } @@ -1241,12 +1260,12 @@ static void migrate_params_test_apply(MigrateSetParameters *params, if (params->has_tls_creds) { assert(params->tls_creds->type == QTYPE_QSTRING); - dest->tls_creds = g_strdup(params->tls_creds->u.s); + dest->tls_creds = params->tls_creds->u.s; } if (params->has_tls_hostname) { assert(params->tls_hostname->type == QTYPE_QSTRING); - dest->tls_hostname = g_strdup(params->tls_hostname->u.s); + dest->tls_hostname = params->tls_hostname->u.s; } if (params->has_max_bandwidth) { @@ -1588,7 +1607,7 @@ static void migrate_fd_cancel(MigrationState *s) do { old_state = s->state; - if (!migration_is_setup_or_active(old_state)) { + if (!migration_is_running(old_state)) { break; } /* If the migration is paused, kick it out of the pause */ @@ -1710,6 +1729,7 @@ void migrate_init(MigrationState *s) s->migration_thread_running = false; error_free(s->error); s->error = NULL; + s->hostname = NULL; migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); @@ -1872,9 +1892,7 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, return true; } - if (migration_is_setup_or_active(s->state) || - s->state == MIGRATION_STATUS_CANCELLING || - s->state == MIGRATION_STATUS_COLO) { + if (migration_is_running(s->state)) { error_setg(errp, QERR_MIGRATION_ACTIVE); return false; } diff --git a/migration/migration.h b/migration/migration.h index feb344306ae93af6ac7be102f8b62a462734462b..f2bd4ebe3327e416be8e282c5d308381fc639c00 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -259,6 +259,11 @@ struct MigrationState * (which is in 4M chunk). */ uint8_t clear_bitmap_shift; + + /* + * This save hostname when out-going migration starts + */ + char *hostname; }; void migrate_set_state(int *state, int old_state, int new_state); @@ -277,6 +282,7 @@ void migrate_fd_error(MigrationState *s, const Error *error); void migrate_fd_connect(MigrationState *s, Error *error_in); bool migration_is_setup_or_active(int state); +bool migration_is_running(int state); void migrate_init(MigrationState *s); bool migration_is_blocked(Error **errp); diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 18f480529a3472bb7b0d4cd029327514a421726f..cd96d04e9a4a786afb454301cfa88e17fd45f4d2 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -51,6 +51,8 @@ struct QEMUFile { unsigned int iovcnt; int last_error; + /* has the file has been shutdown */ + bool shutdown; }; /* @@ -59,10 +61,18 @@ struct QEMUFile { */ int qemu_file_shutdown(QEMUFile *f) { + int ret; + + f->shutdown = true; if (!f->ops->shut_down) { return -ENOSYS; } - return f->ops->shut_down(f->opaque, true, true); + + ret = f->ops->shut_down(f->opaque, true, true); + if (!f->last_error) { + qemu_file_set_error(f, -EIO); + } + return ret; } /* @@ -181,6 +191,10 @@ void qemu_fflush(QEMUFile *f) return; } + if (f->shutdown) { + return; + } + if (f->iovcnt > 0) { expect = iov_size(f->iov, f->iovcnt); ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt, f->pos); @@ -293,6 +307,9 @@ static ssize_t qemu_fill_buffer(QEMUFile *f) f->buf_index = 0; f->buf_size = pending; + if (f->shutdown) { + return 0; + } len = f->ops->get_buffer(f->opaque, f->buf + pending, f->pos, IO_BUF_SIZE - pending); if (len > 0) { @@ -591,6 +608,9 @@ int64_t qemu_ftell(QEMUFile *f) int qemu_file_rate_limit(QEMUFile *f) { + if (f->shutdown) { + return 1; + } if (qemu_file_get_error(f)) { return 1; } diff --git a/migration/ram.c b/migration/ram.c index 840e35480b04f56c033d73c43d8c180d41fe58ca..c6cd5c0c7a4e6822fd0055835986c85cf0b041b5 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -38,6 +38,7 @@ #include "ram.h" #include "migration.h" #include "socket.h" +#include "tls.h" #include "migration/register.h" #include "migration/misc.h" #include "qemu-file.h" @@ -159,21 +160,12 @@ out: return ret; } -static bool ramblock_is_ignored(RAMBlock *block) +bool ramblock_is_ignored(RAMBlock *block) { return !qemu_ram_is_migratable(block) || (migrate_ignore_shared() && qemu_ram_is_shared(block)); } -/* Should be holding either ram_list.mutex, or the RCU lock. */ -#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \ - INTERNAL_RAMBLOCK_FOREACH(block) \ - if (ramblock_is_ignored(block)) {} else - -#define RAMBLOCK_FOREACH_MIGRATABLE(block) \ - INTERNAL_RAMBLOCK_FOREACH(block) \ - if (!qemu_ram_is_migratable(block)) {} else - #undef RAMBLOCK_FOREACH int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque) @@ -630,6 +622,8 @@ typedef struct { uint8_t id; /* channel thread name */ char *name; + /* tls hostname */ + char *tls_hostname; /* channel thread id */ QemuThread thread; /* communication channel */ @@ -1044,6 +1038,8 @@ void multifd_save_cleanup(void) qemu_sem_destroy(&p->sem_sync); g_free(p->name); p->name = NULL; + g_free(p->tls_hostname); + p->tls_hostname = NULL; multifd_pages_clear(p->pages); p->pages = NULL; p->packet_len = 0; @@ -1198,6 +1194,123 @@ out: return NULL; } +static bool multifd_channel_connect(MultiFDSendParams *p, + QIOChannel *ioc, + Error *error); + +static void multifd_tls_outgoing_handshake(QIOTask *task, + gpointer opaque) +{ + MultiFDSendParams *p = opaque; + QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task)); + Error *err = NULL; + + if (qio_task_propagate_error(task, &err)) { + trace_multifd_tls_outgoing_handshake_error(ioc, error_get_pretty(err)); + } else { + trace_multifd_tls_outgoing_handshake_complete(ioc); + } + + if (!multifd_channel_connect(p, ioc, err)) { + /* + * Error happen, mark multifd_send_thread status as 'quit' although it + * is not created, and then tell who pay attention to me. + */ + p->quit = true; + qemu_sem_post(&multifd_send_state->channels_ready); + qemu_sem_post(&p->sem_sync); + } +} + +static void *multifd_tls_handshake_thread(void *opaque) +{ + MultiFDSendParams *p = opaque; + QIOChannelTLS *tioc = QIO_CHANNEL_TLS(p->c); + + qio_channel_tls_handshake(tioc, + multifd_tls_outgoing_handshake, + p, + NULL, + NULL); + return NULL; +} + +static void multifd_tls_channel_connect(MultiFDSendParams *p, + QIOChannel *ioc, + Error **errp) +{ + MigrationState *s = migrate_get_current(); + const char *hostname = p->tls_hostname; + QIOChannelTLS *tioc; + + tioc = migration_tls_client_create(s, ioc, hostname, errp); + if (!tioc) { + return; + } + + object_unref(OBJECT(ioc)); + trace_multifd_tls_outgoing_handshake_start(ioc, tioc, hostname); + qio_channel_set_name(QIO_CHANNEL(tioc), "multifd-tls-outgoing"); + p->c = QIO_CHANNEL(tioc); + qemu_thread_create(&p->thread, "multifd-tls-handshake-worker", + multifd_tls_handshake_thread, p, + QEMU_THREAD_JOINABLE); +} + +static bool multifd_channel_connect(MultiFDSendParams *p, + QIOChannel *ioc, + Error *error) +{ + MigrationState *s = migrate_get_current(); + + trace_multifd_set_outgoing_channel( + ioc, object_get_typename(OBJECT(ioc)), p->tls_hostname, error); + + if (!error) { + if (s->parameters.tls_creds && + *s->parameters.tls_creds && + !object_dynamic_cast(OBJECT(ioc), + TYPE_QIO_CHANNEL_TLS)) { + multifd_tls_channel_connect(p, ioc, &error); + if (!error) { + /* + * tls_channel_connect will call back to this + * function after the TLS handshake, + * so we mustn't call multifd_send_thread until then + */ + return true; + } else { + return false; + } + } else { + /* update for tls qio channel */ + p->c = ioc; + qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, + QEMU_THREAD_JOINABLE); + } + return true; + } + + return false; +} + +static void multifd_new_send_channel_cleanup(MultiFDSendParams *p, + QIOChannel *ioc, Error *err) +{ + migrate_set_error(migrate_get_current(), err); + /* Error happen, we need to tell who pay attention to me */ + qemu_sem_post(&multifd_send_state->channels_ready); + qemu_sem_post(&p->sem_sync); + /* + * Although multifd_send_thread is not created, but main migration + * thread neet to judge whether it is running, so we need to mark + * its status. + */ + p->quit = true; + object_unref(OBJECT(ioc)); + error_free(err); +} + static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque) { MultiFDSendParams *p = opaque; @@ -1205,25 +1318,19 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque) Error *local_err = NULL; if (qio_task_propagate_error(task, &local_err)) { - migrate_set_error(migrate_get_current(), local_err); - /* Error happen, we need to tell who pay attention to me */ - qemu_sem_post(&multifd_send_state->channels_ready); - qemu_sem_post(&p->sem_sync); - /* - * Although multifd_send_thread is not created, but main migration - * thread neet to judge whether it is running, so we need to mark - * its status. - */ - p->quit = true; - object_unref(OBJECT(sioc)); - error_free(local_err); + goto cleanup; } else { p->c = QIO_CHANNEL(sioc); qio_channel_set_delay(p->c, false); p->running = true; - qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, - QEMU_THREAD_JOINABLE); + if (!multifd_channel_connect(p, sioc, local_err)) { + goto cleanup; + } + return; } + +cleanup: + multifd_new_send_channel_cleanup(p, sioc, local_err); } int multifd_save_setup(void) @@ -1231,10 +1338,12 @@ int multifd_save_setup(void) int thread_count; uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); uint8_t i; + MigrationState *s; if (!migrate_use_multifd()) { return 0; } + s = migrate_get_current(); thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); @@ -1255,6 +1364,7 @@ int multifd_save_setup(void) + sizeof(ram_addr_t) * page_count; p->packet = g_malloc0(p->packet_len); p->name = g_strdup_printf("multifdsend_%d", i); + p->tls_hostname = g_strdup(s->hostname); socket_send_channel_create(multifd_new_send_channel_async, p); } return 0; @@ -3515,7 +3625,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) { RAMState **temp = opaque; RAMState *rs = *temp; - int ret; + int ret = 0; int i; int64_t t0; int done = 0; @@ -3584,12 +3694,15 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) ram_control_after_iterate(f, RAM_CONTROL_ROUND); out: - multifd_send_sync_main(rs); - qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - qemu_fflush(f); - ram_counters.transferred += 8; + if (ret >= 0 + && migration_is_setup_or_active(migrate_get_current()->state)) { + multifd_send_sync_main(rs); + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + qemu_fflush(f); + ram_counters.transferred += 8; - ret = qemu_file_get_error(f); + ret = qemu_file_get_error(f); + } if (ret < 0) { return ret; } @@ -3643,9 +3756,11 @@ static int ram_save_complete(QEMUFile *f, void *opaque) rcu_read_unlock(); - multifd_send_sync_main(rs); - qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - qemu_fflush(f); + if (ret >= 0) { + multifd_send_sync_main(rs); + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + qemu_fflush(f); + } return ret; } diff --git a/migration/ram.h b/migration/ram.h index a788ff0e8e97726d9b6d56b23c5fe293bb4d57a0..565ec86b1f505c9cd0f999b16432e63deb8274b4 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -37,6 +37,16 @@ extern MigrationStats ram_counters; extern XBZRLECacheStats xbzrle_counters; extern CompressionStats compression_counters; +bool ramblock_is_ignored(RAMBlock *block); +/* Should be holding either ram_list.mutex, or the RCU lock. */ +#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \ + INTERNAL_RAMBLOCK_FOREACH(block) \ + if (ramblock_is_ignored(block)) {} else + +#define RAMBLOCK_FOREACH_MIGRATABLE(block) \ + INTERNAL_RAMBLOCK_FOREACH(block) \ + if (!qemu_ram_is_migratable(block)) {} else + int xbzrle_cache_resize(int64_t new_size, Error **errp); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_total(void); diff --git a/migration/savevm.c b/migration/savevm.c index 480c511b1971aae7b4b94359b55ec2d1c8aa361e..74b388da4330338b4a798fa054f00c3ebacf953d 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1411,9 +1411,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) MigrationState *ms = migrate_get_current(); MigrationStatus status; - if (migration_is_setup_or_active(ms->state) || - ms->state == MIGRATION_STATUS_CANCELLING || - ms->state == MIGRATION_STATUS_COLO) { + if (migration_is_running(ms->state)) { error_setg(errp, QERR_MIGRATION_ACTIVE); return -EINVAL; } diff --git a/migration/tls.c b/migration/tls.c index 5171afc6c481d96121e5d0cffaf842be3f852f85..1d5b571d8e033d12d0b526a16305fce399f49be2 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -22,7 +22,6 @@ #include "channel.h" #include "migration.h" #include "tls.h" -#include "io/channel-tls.h" #include "crypto/tlscreds.h" #include "qemu/error-report.h" #include "qapi/error.h" @@ -126,11 +125,10 @@ static void migration_tls_outgoing_handshake(QIOTask *task, object_unref(OBJECT(ioc)); } - -void migration_tls_channel_connect(MigrationState *s, - QIOChannel *ioc, - const char *hostname, - Error **errp) +QIOChannelTLS *migration_tls_client_create(MigrationState *s, + QIOChannel *ioc, + const char *hostname, + Error **errp) { QCryptoTLSCreds *creds; QIOChannelTLS *tioc; @@ -138,7 +136,7 @@ void migration_tls_channel_connect(MigrationState *s, creds = migration_tls_get_creds( s, QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, errp); if (!creds) { - return; + return NULL; } if (s->parameters.tls_hostname && *s->parameters.tls_hostname) { @@ -146,15 +144,29 @@ void migration_tls_channel_connect(MigrationState *s, } if (!hostname) { error_setg(errp, "No hostname available for TLS"); - return; + return NULL; } tioc = qio_channel_tls_new_client( ioc, creds, hostname, errp); + + return tioc; +} + +void migration_tls_channel_connect(MigrationState *s, + QIOChannel *ioc, + const char *hostname, + Error **errp) +{ + QIOChannelTLS *tioc; + + tioc = migration_tls_client_create(s, ioc, hostname, errp); if (!tioc) { return; } + /* Save hostname into MigrationState for handshake */ + s->hostname = g_strdup(hostname); trace_migration_tls_outgoing_handshake_start(hostname); qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-outgoing"); qio_channel_tls_handshake(tioc, diff --git a/migration/tls.h b/migration/tls.h index cdd70001ed3656368dd9c347c7ed4c3132ec88cc..0cfbe368baea1264f88df999e5095684f38aa683 100644 --- a/migration/tls.h +++ b/migration/tls.h @@ -22,11 +22,17 @@ #define QEMU_MIGRATION_TLS_H #include "io/channel.h" +#include "io/channel-tls.h" void migration_tls_channel_process_incoming(MigrationState *s, QIOChannel *ioc, Error **errp); +QIOChannelTLS *migration_tls_client_create(MigrationState *s, + QIOChannel *ioc, + const char *hostname, + Error **errp); + void migration_tls_channel_connect(MigrationState *s, QIOChannel *ioc, const char *hostname, diff --git a/migration/trace-events b/migration/trace-events index d8e54c367a4b978baead3ea6d1836355d540fdbb..c0640cd42466cf00377d895994b0e5b5774fa8bf 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -93,6 +93,10 @@ multifd_send_sync_main_signal(uint8_t id) "channel %d" multifd_send_sync_main_wait(uint8_t id) "channel %d" multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64 multifd_send_thread_start(uint8_t id) "%d" +multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s" +multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s" +multifd_tls_outgoing_handshake_complete(void *ioc) "ioc=%p" +multifd_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostname, void *err) "ioc=%p ioctype=%s hostname=%s err=%p" ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start: %" PRIx64 " %zx" ram_load_loop(const char *rbname, uint64_t addr, int flags, void *host) "%s: addr: 0x%" PRIx64 " flags: 0x%x host: %p" ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x" @@ -296,3 +300,11 @@ dirty_bitmap_load_bits_zeroes(void) "" dirty_bitmap_load_header(uint32_t flags) "flags 0x%x" dirty_bitmap_load_enter(void) "" dirty_bitmap_load_success(void) "" + +# dirtyrate.c +dirtyrate_set_state(const char *new_state) "new state %s" +query_dirty_rate_info(const char *new_state) "current state %s" +get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t crc) "ramblock name: %s, vfn: %"PRIu64 ", crc: %" PRIu32 +calc_page_dirty_rate(const char *idstr, uint32_t new_crc, uint32_t old_crc) "ramblock name: %s, new crc: %" PRIu32 ", old crc: %" PRIu32 +skip_sample_ramblock(const char *idstr, uint64_t ramblock_size) "ramblock name: %s, ramblock size: %" PRIu64 +find_page_matched(const char *idstr) "ramblock %s addr or size changed" diff --git a/net/eth.c b/net/eth.c index 0c1d413ee26e31d4ac6b622a9aa9b61d35356765..1e0821c5f81b59536edc5ef498e99c40f9bea319 100644 --- a/net/eth.c +++ b/net/eth.c @@ -16,6 +16,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "net/eth.h" #include "net/checksum.h" #include "net/tap.h" @@ -71,9 +72,8 @@ eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto) return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state; } } - - /* Unsupported offload */ - g_assert_not_reached(); + qemu_log_mask(LOG_UNIMP, "%s: probably not GSO frame, " + "unknown L3 protocol: 0x%04"PRIx16"\n", __func__, l3_proto); return VIRTIO_NET_HDR_GSO_NONE | ecn_state; } diff --git a/qapi/machine-target.json b/qapi/machine-target.json index 55310a6aa226d580bb95dcb32c606c6e709cfb8a..04623224720dce5d92c8e64e80d3c86c542f2129 100644 --- a/qapi/machine-target.json +++ b/qapi/machine-target.json @@ -212,7 +212,7 @@ ## { 'struct': 'CpuModelExpansionInfo', 'data': { 'model': 'CpuModelInfo' }, - 'if': 'defined(TARGET_S390X) || defined(TARGET_I386)' } + 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' } ## # @query-cpu-model-expansion: @@ -237,7 +237,7 @@ # query-cpu-model-expansion while using these is not advised. # # Some architectures may not support all expansion types. s390x supports -# "full" and "static". +# "full" and "static". Arm only supports "full". # # Returns: a CpuModelExpansionInfo. Returns an error if expanding CPU models is # not supported, if the model cannot be expanded, if the model contains @@ -251,7 +251,7 @@ 'data': { 'type': 'CpuModelExpansionType', 'model': 'CpuModelInfo' }, 'returns': 'CpuModelExpansionInfo', - 'if': 'defined(TARGET_S390X) || defined(TARGET_I386)' } + 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' } ## # @CpuDefinitionInfo: diff --git a/qapi/migration.json b/qapi/migration.json index 9cfbaf8c6ca61bc874f351ee6d7402b6d72588e5..6844ddfab3a238b6d0aee08e8e738904473b9c78 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -1445,3 +1445,68 @@ # Since: 3.0 ## { 'command': 'migrate-pause', 'allow-oob': true } + +## +# @DirtyRateStatus: +# +# An enumeration of dirtyrate status. +# +# @unstarted: the dirtyrate thread has not been started. +# +# @measuring: the dirtyrate thread is measuring. +# +# @measured: the dirtyrate thread has measured and results are available. +# +# Since: 5.2 +# +## +{ 'enum': 'DirtyRateStatus', + 'data': [ 'unstarted', 'measuring', 'measured'] } + +## +# @DirtyRateInfo: +# +# Information about current dirty page rate of vm. +# +# @dirty-rate: an estimate of the dirty page rate of the VM in units of +# MB/s, present only when estimating the rate has completed. +# +# @status: status containing dirtyrate query status includes +# 'unstarted' or 'measuring' or 'measured' +# +# @start-time: start time in units of second for calculation +# +# @calc-time: time in units of second for sample dirty pages +# +# Since: 5.2 +# +## +{ 'struct': 'DirtyRateInfo', + 'data': {'*dirty-rate': 'int64', + 'status': 'DirtyRateStatus', + 'start-time': 'int64', + 'calc-time': 'int64'} } + +## +# @calc-dirty-rate: +# +# start calculating dirty page rate for vm +# +# @calc-time: time in units of second for sample dirty pages +# +# Since: 5.2 +# +# Example: +# {"command": "calc-dirty-rate", "data": {"calc-time": 1} } +# +## +{ 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64'} } + +## +# @query-dirty-rate: +# +# query dirty page rate in units of MB/s for vm +# +# Since: 5.2 +## +{ 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } diff --git a/qobject/json-parser.c b/qobject/json-parser.c index 7d23e12e3379f91025459751c1addd2d90e545a9..840909ea6a3835c4f77d9cc9cbebf87aa3119aae 100644 --- a/qobject/json-parser.c +++ b/qobject/json-parser.c @@ -257,8 +257,9 @@ static JSONToken *parser_context_peek_token(JSONParserContext *ctxt) */ static int parse_pair(JSONParserContext *ctxt, QDict *dict) { + QObject *key_obj = NULL; + QString *key; QObject *value; - QString *key = NULL; JSONToken *peek, *token; peek = parser_context_peek_token(ctxt); @@ -267,7 +268,8 @@ static int parse_pair(JSONParserContext *ctxt, QDict *dict) goto out; } - key = qobject_to(QString, parse_value(ctxt)); + key_obj = parse_value(ctxt); + key = qobject_to(QString, key_obj); if (!key) { parse_error(ctxt, peek, "key is not a string in object"); goto out; @@ -297,13 +299,11 @@ static int parse_pair(JSONParserContext *ctxt, QDict *dict) qdict_put_obj(dict, qstring_get_str(key), value); - qobject_unref(key); - + qobject_unref(key_obj); return 0; out: - qobject_unref(key); - + qobject_unref(key_obj); return -1; } diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 811e5c63652a79af14edae3027ddb39a9e6bc1f5..3f62336acf57c9f1a8133d83a1205b6a630fa44c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -25,6 +25,8 @@ #include "qemu/module.h" #include "qapi/error.h" #include "qapi/visitor.h" +#include "qapi/qmp/qdict.h" +#include "qom/qom-qobject.h" #include "cpu.h" #include "internals.h" #include "exec/exec-all.h" @@ -170,9 +172,9 @@ static void arm_cpu_reset(CPUState *s) g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; - env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; - env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; - env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; + env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.regs[MVFR0]; + env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.regs[MVFR1]; + env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.regs[MVFR2]; cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; s->halted = cpu->start_powered_off; @@ -1034,6 +1036,509 @@ static void arm_set_init_svtor(Object *obj, Visitor *v, const char *name, visit_type_uint32(v, name, &cpu->init_svtor, errp); } +/** + * CPUFeatureInfo: + * @reg: The ID register where the ID field is in. + * @name: The name of the CPU feature. + * @length: The bit length of the ID field. + * @shift: The bit shift of the ID field in the ID register. + * @min_value: The minimum value equal to or larger than which means the CPU + * feature is implemented. + * @ni_value: Not-implemented value. It will be set to the ID field when + * disabling the CPU feature. Usually, it's min_value - 1. + * @sign: Whether the ID field is signed. + * @is_32bit: Whether the CPU feature is for 32-bit. + * + * In ARM, a CPU feature is described by an ID field, which is a 4-bit field in + * an ID register. + */ +typedef struct CPUFeatureInfo { + CPUIDReg reg; + const char *name; + int length; + int shift; + int min_value; + int ni_value; + bool sign; + bool is_32bit; +} CPUFeatureInfo; + +#define FIELD_INFO(feature_name, id_reg, field, s, min_val, ni_val, is32bit) { \ + .reg = id_reg, \ + .length = R_ ## id_reg ## _ ## field ## _LENGTH, \ + .shift = R_ ## id_reg ## _ ## field ## _SHIFT, \ + .sign = s, \ + .min_value = min_val, \ + .ni_value = ni_val, \ + .name = feature_name, \ + .is_32bit = is32bit, \ +} + +static struct CPUFeatureInfo cpu_features[] = { + FIELD_INFO("swap", ID_ISAR0, SWAP, false, 1, 0, true), + FIELD_INFO("bitcount", ID_ISAR0, BITCOUNT, false, 1, 0, true), + FIELD_INFO("bitfield", ID_ISAR0, BITFIELD, false, 1, 0, true), + FIELD_INFO("cmpbranch", ID_ISAR0, CMPBRANCH, false, 1, 0, true), + FIELD_INFO("coproc", ID_ISAR0, COPROC, false, 1, 0, true), + FIELD_INFO("debug", ID_ISAR0, DEBUG, false, 1, 0, true), + FIELD_INFO("device", ID_ISAR0, DIVIDE, false, 1, 0, true), + + FIELD_INFO("endian", ID_ISAR1, ENDIAN, false, 1, 0, true), + FIELD_INFO("except", ID_ISAR1, EXCEPT, false, 1, 0, true), + FIELD_INFO("except_ar", ID_ISAR1, EXCEPT_AR, false, 1, 0, true), + FIELD_INFO("extend", ID_ISAR1, EXTEND, false, 1, 0, true), + FIELD_INFO("ifthen", ID_ISAR1, IFTHEN, false, 1, 0, true), + FIELD_INFO("immediate", ID_ISAR1, IMMEDIATE, false, 1, 0, true), + FIELD_INFO("interwork", ID_ISAR1, INTERWORK, false, 1, 0, true), + FIELD_INFO("jazelle", ID_ISAR1, JAZELLE, false, 1, 0, true), + + FIELD_INFO("loadstore", ID_ISAR2, LOADSTORE, false, 1, 0, true), + FIELD_INFO("memhint", ID_ISAR2, MEMHINT, false, 1, 0, true), + FIELD_INFO("multiaccessint", ID_ISAR2, MULTIACCESSINT, false, 1, 0, true), + FIELD_INFO("mult", ID_ISAR2, MULT, false, 1, 0, true), + FIELD_INFO("mults", ID_ISAR2, MULTS, false, 1, 0, true), + FIELD_INFO("multu", ID_ISAR2, MULTU, false, 1, 0, true), + FIELD_INFO("psr_ar", ID_ISAR2, PSR_AR, false, 1, 0, true), + FIELD_INFO("reversal", ID_ISAR2, REVERSAL, false, 1, 0, true), + + FIELD_INFO("saturate", ID_ISAR3, SATURATE, false, 1, 0, true), + FIELD_INFO("simd", ID_ISAR3, SIMD, false, 1, 0, true), + FIELD_INFO("svc", ID_ISAR3, SVC, false, 1, 0, true), + FIELD_INFO("synchprim", ID_ISAR3, SYNCHPRIM, false, 1, 0, true), + FIELD_INFO("tabbranch", ID_ISAR3, TABBRANCH, false, 1, 0, true), + FIELD_INFO("t32copy", ID_ISAR3, T32COPY, false, 1, 0, true), + FIELD_INFO("truenop", ID_ISAR3, TRUENOP, false, 1, 0, true), + FIELD_INFO("t32ee", ID_ISAR3, T32EE, false, 1, 0, true), + + FIELD_INFO("unpriv", ID_ISAR4, UNPRIV, false, 1, 0, true), + FIELD_INFO("withshifts", ID_ISAR4, WITHSHIFTS, false, 1, 0, true), + FIELD_INFO("writeback", ID_ISAR4, WRITEBACK, false, 1, 0, true), + FIELD_INFO("smc", ID_ISAR4, SMC, false, 1, 0, true), + FIELD_INFO("barrier", ID_ISAR4, BARRIER, false, 1, 0, true), + FIELD_INFO("synchprim_frac", ID_ISAR4, SYNCHPRIM_FRAC, false, 1, 0, true), + FIELD_INFO("psr_m", ID_ISAR4, PSR_M, false, 1, 0, true), + FIELD_INFO("swp_frac", ID_ISAR4, SWP_FRAC, false, 1, 0, true), + + FIELD_INFO("sevl", ID_ISAR5, SEVL, false, 1, 0, true), + FIELD_INFO("aes", ID_ISAR5, AES, false, 1, 0, true), + FIELD_INFO("sha1", ID_ISAR5, SHA1, false, 1, 0, true), + FIELD_INFO("sha2", ID_ISAR5, SHA2, false, 1, 0, true), + FIELD_INFO("crc32", ID_ISAR5, CRC32, false, 1, 0, true), + FIELD_INFO("rdm", ID_ISAR5, RDM, false, 1, 0, true), + FIELD_INFO("vcma", ID_ISAR5, VCMA, false, 1, 0, true), + + FIELD_INFO("jscvt", ID_ISAR6, JSCVT, false, 1, 0, true), + FIELD_INFO("dp", ID_ISAR6, DP, false, 1, 0, true), + FIELD_INFO("fhm", ID_ISAR6, FHM, false, 1, 0, true), + FIELD_INFO("sb", ID_ISAR6, SB, false, 1, 0, true), + FIELD_INFO("specres", ID_ISAR6, SPECRES, false, 1, 0, true), + FIELD_INFO("i8mm", ID_AA64ISAR1, I8MM, false, 1, 0, false), + FIELD_INFO("bf16", ID_AA64ISAR1, BF16, false, 1, 0, false), + FIELD_INFO("dgh", ID_AA64ISAR1, DGH, false, 1, 0, false), + + FIELD_INFO("cmaintva", ID_MMFR3, CMAINTVA, false, 1, 0, true), + FIELD_INFO("cmaintsw", ID_MMFR3, CMAINTSW, false, 1, 0, true), + FIELD_INFO("bpmaint", ID_MMFR3, BPMAINT, false, 1, 0, true), + FIELD_INFO("maintbcst", ID_MMFR3, MAINTBCST, false, 1, 0, true), + FIELD_INFO("pan", ID_MMFR3, PAN, false, 1, 0, true), + FIELD_INFO("cohwalk", ID_MMFR3, COHWALK, false, 1, 0, true), + FIELD_INFO("cmemsz", ID_MMFR3, CMEMSZ, false, 1, 0, true), + FIELD_INFO("supersec", ID_MMFR3, SUPERSEC, false, 1, 0, true), + + FIELD_INFO("specsei", ID_MMFR4, SPECSEI, false, 1, 0, true), + FIELD_INFO("ac2", ID_MMFR4, AC2, false, 1, 0, true), + FIELD_INFO("xnx", ID_MMFR4, XNX, false, 1, 0, true), + FIELD_INFO("cnp", ID_MMFR4, CNP, false, 1, 0, true), + FIELD_INFO("hpds", ID_MMFR4, HPDS, false, 1, 0, true), + FIELD_INFO("lsm", ID_MMFR4, LSM, false, 1, 0, true), + FIELD_INFO("ccidx", ID_MMFR4, CCIDX, false, 1, 0, true), + FIELD_INFO("evt", ID_MMFR4, EVT, false, 1, 0, true), + + FIELD_INFO("simdreg", MVFR0, SIMDREG, false, 1, 0, true), + FIELD_INFO("fpsp", MVFR0, FPSP, false, 1, 0, true), + FIELD_INFO("fpdp", MVFR0, FPDP, false, 1, 0, true), + FIELD_INFO("fptrap", MVFR0, FPTRAP, false, 1, 0, true), + FIELD_INFO("fpdivide", MVFR0, FPDIVIDE, false, 1, 0, true), + FIELD_INFO("fpsqrt", MVFR0, FPSQRT, false, 1, 0, true), + FIELD_INFO("fpshvec", MVFR0, FPSHVEC, false, 1, 0, true), + FIELD_INFO("fpround", MVFR0, FPROUND, false, 1, 0, true), + + FIELD_INFO("fpftz", MVFR1, FPFTZ, false, 1, 0, true), + FIELD_INFO("fpdnan", MVFR1, FPDNAN, false, 1, 0, true), + FIELD_INFO("simdls", MVFR1, SIMDLS, false, 1, 0, true), + FIELD_INFO("simdint", MVFR1, SIMDINT, false, 1, 0, true), + FIELD_INFO("simdsp", MVFR1, SIMDSP, false, 1, 0, true), + FIELD_INFO("simdhp", MVFR1, SIMDHP, false, 1, 0, true), + FIELD_INFO("fphp", MVFR1, FPHP, false, 1, 0, true), + FIELD_INFO("simdfmac", MVFR1, SIMDFMAC, false, 1, 0, true), + + FIELD_INFO("simdmisc", MVFR2, SIMDMISC, false, 1, 0, true), + FIELD_INFO("fpmisc", MVFR2, FPMISC, false, 1, 0, true), + + FIELD_INFO("debugver", ID_AA64DFR0, DEBUGVER, false, 1, 0, false), + FIELD_INFO("tracever", ID_AA64DFR0, TRACEVER, false, 1, 0, false), + FIELD_INFO("pmuver", ID_AA64DFR0, PMUVER, false, 1, 0, false), + FIELD_INFO("brps", ID_AA64DFR0, BRPS, false, 1, 0, false), + FIELD_INFO("wrps", ID_AA64DFR0, WRPS, false, 1, 0, false), + FIELD_INFO("ctx_cmps", ID_AA64DFR0, CTX_CMPS, false, 1, 0, false), + FIELD_INFO("pmsver", ID_AA64DFR0, PMSVER, false, 1, 0, false), + FIELD_INFO("doublelock", ID_AA64DFR0, DOUBLELOCK, false, 1, 0, false), + FIELD_INFO("tracefilt", ID_AA64DFR0, TRACEFILT, false, 1, 0, false), + + FIELD_INFO("aes", ID_AA64ISAR0, AES, false, 1, 0, false), + FIELD_INFO("sha1", ID_AA64ISAR0, SHA1, false, 1, 0, false), + FIELD_INFO("sha2", ID_AA64ISAR0, SHA2, false, 1, 0, false), + FIELD_INFO("crc32", ID_AA64ISAR0, CRC32, false, 1, 0, false), + FIELD_INFO("atomics", ID_AA64ISAR0, ATOMIC, false, 1, 0, false), + FIELD_INFO("asimdrdm", ID_AA64ISAR0, RDM, false, 1, 0, false), + FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + FIELD_INFO("sm3", ID_AA64ISAR0, SM3, false, 1, 0, false), + FIELD_INFO("sm4", ID_AA64ISAR0, SM4, false, 1, 0, false), + FIELD_INFO("asimddp", ID_AA64ISAR0, DP, false, 1, 0, false), + FIELD_INFO("asimdfhm", ID_AA64ISAR0, FHM, false, 1, 0, false), + FIELD_INFO("flagm", ID_AA64ISAR0, TS, false, 1, 0, false), + FIELD_INFO("tlb", ID_AA64ISAR0, TLB, false, 1, 0, false), + FIELD_INFO("rng", ID_AA64ISAR0, RNDR, false, 1, 0, false), + + FIELD_INFO("dcpop", ID_AA64ISAR1, DPB, false, 1, 0, false), + FIELD_INFO("papa", ID_AA64ISAR1, APA, false, 1, 0, false), + FIELD_INFO("api", ID_AA64ISAR1, API, false, 1, 0, false), + FIELD_INFO("jscvt", ID_AA64ISAR1, JSCVT, false, 1, 0, false), + FIELD_INFO("fcma", ID_AA64ISAR1, FCMA, false, 1, 0, false), + FIELD_INFO("lrcpc", ID_AA64ISAR1, LRCPC, false, 1, 0, false), + FIELD_INFO("pacg", ID_AA64ISAR1, GPA, false, 1, 0, false), + FIELD_INFO("gpi", ID_AA64ISAR1, GPI, false, 1, 0, false), + FIELD_INFO("frint", ID_AA64ISAR1, FRINTTS, false, 1, 0, false), + FIELD_INFO("sb", ID_AA64ISAR1, SB, false, 1, 0, false), + FIELD_INFO("specres", ID_AA64ISAR1, SPECRES, false, 1, 0, false), + + FIELD_INFO("el0", ID_AA64PFR0, EL0, false, 1, 0, false), + FIELD_INFO("el1", ID_AA64PFR0, EL1, false, 1, 0, false), + FIELD_INFO("el2", ID_AA64PFR0, EL2, false, 1, 0, false), + FIELD_INFO("el3", ID_AA64PFR0, EL3, false, 1, 0, false), + FIELD_INFO("fp", ID_AA64PFR0, FP, true, 0, 0xf, false), + FIELD_INFO("asimd", ID_AA64PFR0, ADVSIMD, true, 0, 0xf, false), + FIELD_INFO("gic", ID_AA64PFR0, GIC, false, 1, 0, false), + FIELD_INFO("ras", ID_AA64PFR0, RAS, false, 1, 0, false), + FIELD_INFO("sve", ID_AA64PFR0, SVE, false, 1, 0, false), + + FIELD_INFO("bti", ID_AA64PFR1, BT, false, 1, 0, false), + FIELD_INFO("sbss", ID_AA64PFR1, SBSS, false, 1, 0, false), + FIELD_INFO("mte", ID_AA64PFR1, MTE, false, 1, 0, false), + FIELD_INFO("ras_frac", ID_AA64PFR1, RAS_FRAC, false, 1, 0, false), + + FIELD_INFO("parange", ID_AA64MMFR0, PARANGE, false, 1, 0, false), + FIELD_INFO("asidbits", ID_AA64MMFR0, ASIDBITS, false, 1, 0, false), + FIELD_INFO("bigend", ID_AA64MMFR0, BIGEND, false, 1, 0, false), + FIELD_INFO("snsmem", ID_AA64MMFR0, SNSMEM, false, 1, 0, false), + FIELD_INFO("bigendel0", ID_AA64MMFR0, BIGENDEL0, false, 1, 0, false), + FIELD_INFO("tgran16", ID_AA64MMFR0, TGRAN16, false, 1, 0, false), + FIELD_INFO("tgran64", ID_AA64MMFR0, TGRAN64, false, 1, 0, false), + FIELD_INFO("tgran4", ID_AA64MMFR0, TGRAN4, false, 1, 0, false), + FIELD_INFO("tgran16_2", ID_AA64MMFR0, TGRAN16_2, false, 1, 0, false), + FIELD_INFO("tgran64_2", ID_AA64MMFR0, TGRAN64_2, false, 1, 0, false), + FIELD_INFO("tgran4_2", ID_AA64MMFR0, TGRAN4_2, false, 1, 0, false), + FIELD_INFO("exs", ID_AA64MMFR0, EXS, false, 1, 0, false), + + FIELD_INFO("hafdbs", ID_AA64MMFR1, HAFDBS, false, 1, 0, false), + FIELD_INFO("vmidbits", ID_AA64MMFR1, VMIDBITS, false, 1, 0, false), + FIELD_INFO("vh", ID_AA64MMFR1, VH, false, 1, 0, false), + FIELD_INFO("hpds", ID_AA64MMFR1, HPDS, false, 1, 0, false), + FIELD_INFO("lo", ID_AA64MMFR1, LO, false, 1, 0, false), + FIELD_INFO("pan", ID_AA64MMFR1, PAN, false, 1, 0, false), + FIELD_INFO("specsei", ID_AA64MMFR1, SPECSEI, false, 1, 0, false), + FIELD_INFO("xnx", ID_AA64MMFR1, XNX, false, 1, 0, false), + + FIELD_INFO("cnp", ID_AA64MMFR2, CNP, false, 1, 0, false), + FIELD_INFO("uao", ID_AA64MMFR2, UAO, false, 1, 0, false), + FIELD_INFO("lsm", ID_AA64MMFR2, LSM, false, 1, 0, false), + FIELD_INFO("iesb", ID_AA64MMFR2, IESB, false, 1, 0, false), + FIELD_INFO("varange", ID_AA64MMFR2, VARANGE, false, 1, 0, false), + FIELD_INFO("ccidx", ID_AA64MMFR2, CCIDX, false, 1, 0, false), + FIELD_INFO("nv", ID_AA64MMFR2, NV, false, 1, 0, false), + FIELD_INFO("st", ID_AA64MMFR2, ST, false, 1, 0, false), + FIELD_INFO("uscat", ID_AA64MMFR2, AT, false, 1, 0, false), + FIELD_INFO("ids", ID_AA64MMFR2, IDS, false, 1, 0, false), + FIELD_INFO("fwb", ID_AA64MMFR2, FWB, false, 1, 0, false), + FIELD_INFO("ttl", ID_AA64MMFR2, TTL, false, 1, 0, false), + FIELD_INFO("bbm", ID_AA64MMFR2, BBM, false, 1, 0, false), + FIELD_INFO("evt", ID_AA64MMFR2, EVT, false, 1, 0, false), + FIELD_INFO("e0pd", ID_AA64MMFR2, E0PD, false, 1, 0, false), + + FIELD_INFO("copdbg", ID_DFR0, COPDBG, false, 1, 0, false), + FIELD_INFO("copsdbg", ID_DFR0, COPSDBG, false, 1, 0, false), + FIELD_INFO("mmapdbg", ID_DFR0, MMAPDBG, false, 1, 0, false), + FIELD_INFO("coptrc", ID_DFR0, COPTRC, false, 1, 0, false), + FIELD_INFO("mmaptrc", ID_DFR0, MMAPTRC, false, 1, 0, false), + FIELD_INFO("mprofdbg", ID_DFR0, MPROFDBG, false, 1, 0, false), + FIELD_INFO("perfmon", ID_DFR0, PERFMON, false, 1, 0, false), + FIELD_INFO("tracefilt", ID_DFR0, TRACEFILT, false, 1, 0, false), + + { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_FP_LENGTH, + .shift = R_ID_AA64PFR0_FP_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "fphp", .is_32bit = false, + }, + { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_ADVSIMD_LENGTH, + .shift = R_ID_AA64PFR0_ADVSIMD_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "asimdhp", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_AES_LENGTH, + .shift = R_ID_AA64ISAR0_AES_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "pmull", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_TS_LENGTH, + .shift = R_ID_AA64ISAR0_TS_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "flagm2", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR1, .length = R_ID_AA64ISAR1_DPB_LENGTH, + .shift = R_ID_AA64ISAR1_DPB_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "dcpodp", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR1, .length = R_ID_AA64ISAR1_LRCPC_LENGTH, + .shift = R_ID_AA64ISAR1_LRCPC_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "ilrcpc", .is_32bit = false, + }, +}; + +typedef struct CPUFeatureDep { + CPUFeatureInfo from, to; +} CPUFeatureDep; + +static const CPUFeatureDep feature_dependencies[] = { + { + .from = FIELD_INFO("fp", ID_AA64PFR0, FP, true, 0, 0xf, false), + .to = FIELD_INFO("asimd", ID_AA64PFR0, ADVSIMD, true, 0, 0xf, false), + }, + { + .from = FIELD_INFO("asimd", ID_AA64PFR0, ADVSIMD, true, 0, 0xf, false), + .to = FIELD_INFO("fp", ID_AA64PFR0, FP, true, 0, 0xf, false), + }, + { + .from = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_FP_LENGTH, + .shift = R_ID_AA64PFR0_FP_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "fphp", .is_32bit = false, + }, + .to = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_ADVSIMD_LENGTH, + .shift = R_ID_AA64PFR0_ADVSIMD_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "asimdhp", .is_32bit = false, + }, + }, + { + .from = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_ADVSIMD_LENGTH, + .shift = R_ID_AA64PFR0_ADVSIMD_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "asimdhp", .is_32bit = false, + }, + .to = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_FP_LENGTH, + .shift = R_ID_AA64PFR0_FP_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "fphp", .is_32bit = false, + }, + }, + { + + .from = FIELD_INFO("aes", ID_AA64ISAR0, AES, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_AES_LENGTH, + .shift = R_ID_AA64ISAR0_AES_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "pmull", .is_32bit = false, + }, + }, + { + + .from = FIELD_INFO("sha2", ID_AA64ISAR0, SHA2, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + }, + { + .from = FIELD_INFO("lrcpc", ID_AA64ISAR1, LRCPC, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR1, .length = R_ID_AA64ISAR1_LRCPC_LENGTH, + .shift = R_ID_AA64ISAR1_LRCPC_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "ilrcpc", .is_32bit = false, + }, + }, + { + .from = FIELD_INFO("sm3", ID_AA64ISAR0, SM3, false, 1, 0, false), + .to = FIELD_INFO("sm4", ID_AA64ISAR0, SM4, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sm4", ID_AA64ISAR0, SM4, false, 1, 0, false), + .to = FIELD_INFO("sm3", ID_AA64ISAR0, SM3, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sha1", ID_AA64ISAR0, SHA1, false, 1, 0, false), + .to = FIELD_INFO("sha2", ID_AA64ISAR0, SHA2, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sha1", ID_AA64ISAR0, SHA1, false, 1, 0, false), + .to = FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + }, + { + .from = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + .to = FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + }, +}; + +void arm_cpu_features_to_dict(ARMCPU *cpu, QDict *features) +{ + Object *obj = OBJECT(cpu); + const char *name; + ObjectProperty *prop; + bool is_32bit = !arm_feature(&cpu->env, ARM_FEATURE_AARCH64); + int i; + + for (i = 0; i < ARRAY_SIZE(cpu_features); ++i) { + if (is_32bit != cpu_features[i].is_32bit) { + continue; + } + + name = cpu_features[i].name; + prop = object_property_find(obj, name, NULL); + if (prop) { + QObject *value; + + assert(prop->get); + value = object_property_get_qobject(obj, name, &error_abort); + qdict_put_obj(features, name, value); + } + } +} + +static void arm_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + CPUFeatureInfo *feat = opaque; + int field_value = feat->sign ? sextract64(cpu->isar.regs[feat->reg], + feat->shift, feat->length) : + extract64(cpu->isar.regs[feat->reg], + feat->shift, feat->length); + bool value = field_value >= feat->min_value; + + visit_type_bool(v, name, &value, errp); +} + +static void arm_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; + CPUFeatureInfo *feat = opaque; + Error *local_err = NULL; + bool value; + + if (!kvm_arm_cpu_feature_supported()) { + warn_report("KVM doesn't support to set CPU feature in arm. " + "Setting to `%s` is ignored.", name); + return; + } + if (dev->realized) { + qdev_prop_set_after_realize(dev, name, errp); + return; + } + + visit_type_bool(v, name, &value, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (value) { + if (object_property_get_bool(obj, feat->name, NULL)) { + return; + } + isar->regs[feat->reg] = deposit64(isar->regs[feat->reg], + feat->shift, feat->length, + feat->min_value); + /* Auto enable the features which current feature is dependent on. */ + for (int i = 0; i < ARRAY_SIZE(feature_dependencies); ++i) { + const CPUFeatureDep *d = &feature_dependencies[i]; + if (strcmp(d->to.name, feat->name) != 0) { + continue; + } + + object_property_set_bool(obj, true, d->from.name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + } else { + if (!object_property_get_bool(obj, feat->name, NULL)) { + return; + } + isar->regs[feat->reg] = deposit64(isar->regs[feat->reg], + feat->shift, feat->length, + feat->ni_value); + /* Auto disable the features which are dependent on current feature. */ + for (int i = 0; i < ARRAY_SIZE(feature_dependencies); ++i) { + const CPUFeatureDep *d = &feature_dependencies[i]; + if (strcmp(d->from.name, feat->name) != 0) { + continue; + } + + object_property_set_bool(obj, false, d->to.name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + } +} + +static void arm_cpu_register_feature_props(ARMCPU *cpu) +{ + int i; + int num = ARRAY_SIZE(cpu_features); + ObjectProperty *op; + CPUARMState *env = &cpu->env; + + for (i = 0; i < num; i++) { + if ((arm_feature(env, ARM_FEATURE_AARCH64) && cpu_features[i].is_32bit) + || (!arm_feature(env, ARM_FEATURE_AARCH64) && + cpu_features[i].is_32bit)) { + continue; + } + op = object_property_find(OBJECT(cpu), cpu_features[i].name, NULL); + if (!op) { + object_property_add(OBJECT(cpu), cpu_features[i].name, "bool", + arm_cpu_get_bit_prop, + arm_cpu_set_bit_prop, + NULL, &cpu_features[i], &error_abort); + } + } +} + void arm_cpu_post_init(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1150,6 +1655,8 @@ void arm_cpu_post_init(Object *obj) qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property, &error_abort); + + arm_cpu_register_feature_props(cpu); } static void arm_cpu_finalizefn(Object *obj) @@ -1251,19 +1758,19 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_VFP3); unset_feature(env, ARM_FEATURE_VFP4); - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; - u = cpu->isar.id_isar6; + u = cpu->isar.regs[ID_ISAR6]; u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0); - cpu->isar.id_isar6 = u; + cpu->isar.regs[ID_ISAR6] = u; - u = cpu->isar.mvfr0; + u = cpu->isar.regs[MVFR0]; u = FIELD_DP32(u, MVFR0, FPSP, 0); u = FIELD_DP32(u, MVFR0, FPDP, 0); u = FIELD_DP32(u, MVFR0, FPTRAP, 0); @@ -1271,17 +1778,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) u = FIELD_DP32(u, MVFR0, FPSQRT, 0); u = FIELD_DP32(u, MVFR0, FPSHVEC, 0); u = FIELD_DP32(u, MVFR0, FPROUND, 0); - cpu->isar.mvfr0 = u; + cpu->isar.regs[MVFR0] = u; - u = cpu->isar.mvfr1; + u = cpu->isar.regs[MVFR1]; u = FIELD_DP32(u, MVFR1, FPFTZ, 0); u = FIELD_DP32(u, MVFR1, FPDNAN, 0); u = FIELD_DP32(u, MVFR1, FPHP, 0); - cpu->isar.mvfr1 = u; + cpu->isar.regs[MVFR1] = u; - u = cpu->isar.mvfr2; + u = cpu->isar.regs[MVFR2]; u = FIELD_DP32(u, MVFR2, FPMISC, 0); - cpu->isar.mvfr2 = u; + cpu->isar.regs[MVFR2] = u; } if (!cpu->has_neon) { @@ -1290,56 +1797,56 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_NEON); - t = cpu->isar.id_aa64isar0; + t = cpu->isar.regs[ID_AA64ISAR0]; t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0); - cpu->isar.id_aa64isar0 = t; + cpu->isar.regs[ID_AA64ISAR0] = t; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; - u = cpu->isar.id_isar5; + u = cpu->isar.regs[ID_ISAR5]; u = FIELD_DP32(u, ID_ISAR5, RDM, 0); u = FIELD_DP32(u, ID_ISAR5, VCMA, 0); - cpu->isar.id_isar5 = u; + cpu->isar.regs[ID_ISAR5] = u; - u = cpu->isar.id_isar6; + u = cpu->isar.regs[ID_ISAR6]; u = FIELD_DP32(u, ID_ISAR6, DP, 0); u = FIELD_DP32(u, ID_ISAR6, FHM, 0); - cpu->isar.id_isar6 = u; + cpu->isar.regs[ID_ISAR6] = u; - u = cpu->isar.mvfr1; + u = cpu->isar.regs[MVFR1]; u = FIELD_DP32(u, MVFR1, SIMDLS, 0); u = FIELD_DP32(u, MVFR1, SIMDINT, 0); u = FIELD_DP32(u, MVFR1, SIMDSP, 0); u = FIELD_DP32(u, MVFR1, SIMDHP, 0); u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0); - cpu->isar.mvfr1 = u; + cpu->isar.regs[MVFR1] = u; - u = cpu->isar.mvfr2; + u = cpu->isar.regs[MVFR2]; u = FIELD_DP32(u, MVFR2, SIMDMISC, 0); - cpu->isar.mvfr2 = u; + cpu->isar.regs[MVFR2] = u; } if (!cpu->has_neon && !cpu->has_vfp) { uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar0; + t = cpu->isar.regs[ID_AA64ISAR0]; t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0); - cpu->isar.id_aa64isar0 = t; + cpu->isar.regs[ID_AA64ISAR0] = t; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - u = cpu->isar.mvfr0; + u = cpu->isar.regs[MVFR0]; u = FIELD_DP32(u, MVFR0, SIMDREG, 0); - cpu->isar.mvfr0 = u; + cpu->isar.regs[MVFR0] = u; } if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) { @@ -1347,19 +1854,19 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_THUMB_DSP); - u = cpu->isar.id_isar1; + u = cpu->isar.regs[ID_ISAR1]; u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1); - cpu->isar.id_isar1 = u; + cpu->isar.regs[ID_ISAR1] = u; - u = cpu->isar.id_isar2; + u = cpu->isar.regs[ID_ISAR2]; u = FIELD_DP32(u, ID_ISAR2, MULTU, 1); u = FIELD_DP32(u, ID_ISAR2, MULTS, 1); - cpu->isar.id_isar2 = u; + cpu->isar.regs[ID_ISAR2] = u; - u = cpu->isar.id_isar3; + u = cpu->isar.regs[ID_ISAR3]; u = FIELD_DP32(u, ID_ISAR3, SIMD, 1); u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0); - cpu->isar.id_isar3 = u; + cpu->isar.regs[ID_ISAR3] = u; } /* Some features automatically imply others: */ @@ -1489,7 +1996,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } } - if (!cpu->has_el3) { + if (!cpu->has_el3 && !kvm_enabled()) { /* If the has_el3 CPU property is disabled then we need to disable the * feature. */ @@ -1499,7 +2006,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. */ cpu->id_pfr1 &= ~0xf0; - cpu->isar.id_aa64pfr0 &= ~0xf000; + cpu->isar.regs[ID_AA64PFR0] &= ~0xf000; } if (!cpu->has_el2) { @@ -1522,18 +2029,20 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu); #endif } else { - cpu->id_aa64dfr0 &= ~0xf00; - cpu->id_dfr0 &= ~(0xf << 24); + cpu->isar.regs[ID_AA64DFR0] = + FIELD_DP64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER, 0); + cpu->isar.regs[ID_DFR0] = FIELD_DP32(cpu->isar.regs[ID_DFR0], ID_DFR0, + PERFMON, 0); cpu->pmceid0 = 0; cpu->pmceid1 = 0; } - if (!arm_feature(env, ARM_FEATURE_EL2)) { + if (!arm_feature(env, ARM_FEATURE_EL2) && !kvm_enabled()) { /* Disable the hypervisor feature bits in the processor feature * registers if we don't have EL2. These are id_pfr1[15:12] and * id_aa64pfr0_el1[11:8]. */ - cpu->isar.id_aa64pfr0 &= ~0xf00; + cpu->isar.regs[ID_AA64PFR0] &= ~0xf00; cpu->id_pfr1 &= ~0xf000; } @@ -1674,13 +2183,15 @@ static void arm926_initfn(Object *obj) * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ - cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + cpu->isar.regs[ID_ISAR1] = FIELD_DP32(cpu->isar.regs[ID_ISAR1], ID_ISAR1, + JAZELLE, 1); /* * Similarly, we need to set MVFR0 fields to enable double precision * and short vector support even though ARMv5 doesn't have this register. */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, + FPSHVEC, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPDP, 1); } static void arm946_initfn(Object *obj) @@ -1716,13 +2227,15 @@ static void arm1026_initfn(Object *obj) * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ - cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + cpu->isar.regs[ID_ISAR1] = FIELD_DP32(cpu->isar.regs[ID_ISAR1], ID_ISAR1, + JAZELLE, 1); /* * Similarly, we need to set MVFR0 fields to enable double precision * and short vector support even though ARMv5 doesn't have this register. */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, + FPSHVEC, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPDP, 1); { /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ @@ -1755,22 +2268,22 @@ static void arm1136_r2_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4107b362; cpu->reset_fpsid = 0x410120b4; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0x2; + cpu->isar.regs[ID_DFR0] = 0x2; cpu->id_afr0 = 0x3; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222110; - cpu->isar.id_isar0 = 0x00140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231111; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + cpu->isar.regs[ID_MMFR0] = 0x01130003; + cpu->isar.regs[ID_MMFR1] = 0x10030302; + cpu->isar.regs[ID_MMFR2] = 0x01222110; + cpu->isar.regs[ID_ISAR0] = 0x00140011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11231111; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x141; cpu->reset_auxcr = 7; } @@ -1787,22 +2300,22 @@ static void arm1136_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4117b363; cpu->reset_fpsid = 0x410120b4; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0x2; + cpu->isar.regs[ID_DFR0] = 0x2; cpu->id_afr0 = 0x3; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222110; - cpu->isar.id_isar0 = 0x00140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231111; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + cpu->isar.regs[ID_MMFR0] = 0x01130003; + cpu->isar.regs[ID_MMFR1] = 0x10030302; + cpu->isar.regs[ID_MMFR2] = 0x01222110; + cpu->isar.regs[ID_ISAR0] = 0x00140011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11231111; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x141; cpu->reset_auxcr = 7; } @@ -1820,22 +2333,22 @@ static void arm1176_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fb767; cpu->reset_fpsid = 0x410120b5; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x33; + cpu->isar.regs[ID_DFR0] = 0x33; cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222100; - cpu->isar.id_isar0 = 0x0140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231121; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x01141; + cpu->isar.regs[ID_MMFR0] = 0x01130003; + cpu->isar.regs[ID_MMFR1] = 0x10030302; + cpu->isar.regs[ID_MMFR2] = 0x01222100; + cpu->isar.regs[ID_ISAR0] = 0x0140011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11231121; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x01141; cpu->reset_auxcr = 7; } @@ -1851,21 +2364,21 @@ static void arm11mpcore_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x410fb022; cpu->reset_fpsid = 0x410120b4; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0; + cpu->isar.regs[ID_DFR0] = 0; cpu->id_afr0 = 0x2; - cpu->id_mmfr0 = 0x01100103; - cpu->id_mmfr1 = 0x10020302; - cpu->id_mmfr2 = 0x01222000; - cpu->isar.id_isar0 = 0x00100011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11221011; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + cpu->isar.regs[ID_MMFR0] = 0x01100103; + cpu->isar.regs[ID_MMFR1] = 0x10020302; + cpu->isar.regs[ID_MMFR2] = 0x01222000; + cpu->isar.regs[ID_ISAR0] = 0x00100011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11221011; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x141; cpu->reset_auxcr = 1; } @@ -1888,19 +2401,19 @@ static void cortex_m3_initfn(Object *obj) cpu->pmsav7_dregion = 8; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; - cpu->id_dfr0 = 0x00100000; + cpu->isar.regs[ID_DFR0] = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00000030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x00000000; - cpu->id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00000030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x00000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01141110; + cpu->isar.regs[ID_ISAR1] = 0x02111000; + cpu->isar.regs[ID_ISAR2] = 0x21112231; + cpu->isar.regs[ID_ISAR3] = 0x01111110; + cpu->isar.regs[ID_ISAR4] = 0x01310102; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; } static void cortex_m4_initfn(Object *obj) @@ -1914,24 +2427,24 @@ static void cortex_m4_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_VFP4); cpu->midr = 0x410fc240; /* r0p0 */ cpu->pmsav7_dregion = 8; - cpu->isar.mvfr0 = 0x10110021; - cpu->isar.mvfr1 = 0x11000011; - cpu->isar.mvfr2 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x10110021; + cpu->isar.regs[MVFR1] = 0x11000011; + cpu->isar.regs[MVFR2] = 0x00000000; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; - cpu->id_dfr0 = 0x00100000; + cpu->isar.regs[ID_DFR0] = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00000030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x00000000; - cpu->id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00000030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x00000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01141110; + cpu->isar.regs[ID_ISAR1] = 0x02111000; + cpu->isar.regs[ID_ISAR2] = 0x21112231; + cpu->isar.regs[ID_ISAR3] = 0x01111110; + cpu->isar.regs[ID_ISAR4] = 0x01310102; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; } static void cortex_m33_initfn(Object *obj) @@ -1947,24 +2460,24 @@ static void cortex_m33_initfn(Object *obj) cpu->midr = 0x410fd213; /* r0p3 */ cpu->pmsav7_dregion = 16; cpu->sau_sregion = 8; - cpu->isar.mvfr0 = 0x10110021; - cpu->isar.mvfr1 = 0x11000011; - cpu->isar.mvfr2 = 0x00000040; + cpu->isar.regs[MVFR0] = 0x10110021; + cpu->isar.regs[MVFR1] = 0x11000011; + cpu->isar.regs[MVFR2] = 0x00000040; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000210; - cpu->id_dfr0 = 0x00200000; + cpu->isar.regs[ID_DFR0] = 0x00200000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00101F40; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x01000000; - cpu->id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00101F40; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x01000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01101110; + cpu->isar.regs[ID_ISAR1] = 0x02212000; + cpu->isar.regs[ID_ISAR2] = 0x20232232; + cpu->isar.regs[ID_ISAR3] = 0x01111131; + cpu->isar.regs[ID_ISAR4] = 0x01310132; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; cpu->clidr = 0x00000000; cpu->ctr = 0x8000c000; } @@ -2003,19 +2516,19 @@ static void cortex_r5_initfn(Object *obj) cpu->midr = 0x411fc153; /* r1p3 */ cpu->id_pfr0 = 0x0131; cpu->id_pfr1 = 0x001; - cpu->id_dfr0 = 0x010400; + cpu->isar.regs[ID_DFR0] = 0x010400; cpu->id_afr0 = 0x0; - cpu->id_mmfr0 = 0x0210030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x01200000; - cpu->id_mmfr3 = 0x0211; - cpu->isar.id_isar0 = 0x02101111; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232141; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x0010142; - cpu->isar.id_isar5 = 0x0; - cpu->isar.id_isar6 = 0x0; + cpu->isar.regs[ID_MMFR0] = 0x0210030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x01200000; + cpu->isar.regs[ID_MMFR3] = 0x0211; + cpu->isar.regs[ID_ISAR0] = 0x02101111; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232141; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x0010142; + cpu->isar.regs[ID_ISAR5] = 0x0; + cpu->isar.regs[ID_ISAR6] = 0x0; cpu->mp_is_up = true; cpu->pmsav7_dregion = 16; define_arm_cp_regs(cpu, cortexr5_cp_reginfo); @@ -2027,8 +2540,8 @@ static void cortex_r5f_initfn(Object *obj) cortex_r5_initfn(obj); set_feature(&cpu->env, ARM_FEATURE_VFP3); - cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x00000011; + cpu->isar.regs[MVFR0] = 0x10110221; + cpu->isar.regs[MVFR1] = 0x00000011; } static const ARMCPRegInfo cortexa8_cp_reginfo[] = { @@ -2052,24 +2565,24 @@ static void cortex_a8_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fc080; cpu->reset_fpsid = 0x410330c0; - cpu->isar.mvfr0 = 0x11110222; - cpu->isar.mvfr1 = 0x00011111; + cpu->isar.regs[MVFR0] = 0x11110222; + cpu->isar.regs[MVFR1] = 0x00011111; cpu->ctr = 0x82048004; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x400; + cpu->isar.regs[ID_DFR0] = 0x400; cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x31100003; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01202000; - cpu->id_mmfr3 = 0x11; - cpu->isar.id_isar0 = 0x00101111; - cpu->isar.id_isar1 = 0x12112111; - cpu->isar.id_isar2 = 0x21232031; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x00111142; - cpu->dbgdidr = 0x15141000; + cpu->isar.regs[ID_MMFR0] = 0x31100003; + cpu->isar.regs[ID_MMFR1] = 0x20000000; + cpu->isar.regs[ID_MMFR2] = 0x01202000; + cpu->isar.regs[ID_MMFR3] = 0x11; + cpu->isar.regs[ID_ISAR0] = 0x00101111; + cpu->isar.regs[ID_ISAR1] = 0x12112111; + cpu->isar.regs[ID_ISAR2] = 0x21232031; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x00111142; + cpu->isar.regs[DBGDIDR] = 0x15141000; cpu->clidr = (1 << 27) | (2 << 24) | 3; cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ @@ -2125,24 +2638,24 @@ static void cortex_a9_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CBAR); cpu->midr = 0x410fc090; cpu->reset_fpsid = 0x41033090; - cpu->isar.mvfr0 = 0x11110222; - cpu->isar.mvfr1 = 0x01111111; + cpu->isar.regs[MVFR0] = 0x11110222; + cpu->isar.regs[MVFR1] = 0x01111111; cpu->ctr = 0x80038003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x000; + cpu->isar.regs[ID_DFR0] = 0x000; cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x00100103; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01230000; - cpu->id_mmfr3 = 0x00002111; - cpu->isar.id_isar0 = 0x00101111; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x00111142; - cpu->dbgdidr = 0x35141000; + cpu->isar.regs[ID_MMFR0] = 0x00100103; + cpu->isar.regs[ID_MMFR1] = 0x20000000; + cpu->isar.regs[ID_MMFR2] = 0x01230000; + cpu->isar.regs[ID_MMFR3] = 0x00002111; + cpu->isar.regs[ID_ISAR0] = 0x00101111; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232041; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x00111142; + cpu->isar.regs[DBGDIDR] = 0x35141000; cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ @@ -2190,27 +2703,27 @@ static void cortex_a7_initfn(Object *obj) cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7; cpu->midr = 0x410fc075; cpu->reset_fpsid = 0x41023075; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x11111111; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x11111111; cpu->ctr = 0x84448003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x02010555; + cpu->isar.regs[ID_DFR0] = 0x02010555; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01240000; - cpu->id_mmfr3 = 0x02102211; + cpu->isar.regs[ID_MMFR0] = 0x10101105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01240000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but * table 4-41 gives 0x02101110, which includes the arm div insns. */ - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x10011142; - cpu->dbgdidr = 0x3515f005; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232041; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x10011142; + cpu->isar.regs[DBGDIDR] = 0x3515f005; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -2236,24 +2749,24 @@ static void cortex_a15_initfn(Object *obj) cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; cpu->midr = 0x412fc0f1; cpu->reset_fpsid = 0x410430f0; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x11111111; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x11111111; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x02010555; + cpu->isar.regs[ID_DFR0] = 0x02010555; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10201105; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01240000; - cpu->id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x10011142; - cpu->dbgdidr = 0x3515f021; + cpu->isar.regs[ID_MMFR0] = 0x10201105; + cpu->isar.regs[ID_MMFR1] = 0x20000000; + cpu->isar.regs[ID_MMFR2] = 0x01240000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232041; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x10011142; + cpu->isar.regs[DBGDIDR] = 0x3515f021; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -2446,7 +2959,8 @@ static void arm_max_initfn(Object *obj) cortex_a15_initfn(obj); /* old-style VFP short-vector support */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, + FPSHVEC, 1); #ifdef CONFIG_USER_ONLY /* We don't set these in system emulation mode for the moment, @@ -2457,35 +2971,39 @@ static void arm_max_initfn(Object *obj) { uint32_t t; - t = cpu->isar.id_isar5; + t = cpu->isar.regs[ID_ISAR5]; t = FIELD_DP32(t, ID_ISAR5, AES, 2); t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); t = FIELD_DP32(t, ID_ISAR5, RDM, 1); t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); - cpu->isar.id_isar5 = t; + cpu->isar.regs[ID_ISAR5] = t; - t = cpu->isar.id_isar6; + t = cpu->isar.regs[ID_ISAR6]; t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); t = FIELD_DP32(t, ID_ISAR6, DP, 1); t = FIELD_DP32(t, ID_ISAR6, FHM, 1); t = FIELD_DP32(t, ID_ISAR6, SB, 1); t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); - cpu->isar.id_isar6 = t; + cpu->isar.regs[ID_ISAR6] = t; - t = cpu->isar.mvfr1; + t = cpu->isar.regs[MVFR1]; t = FIELD_DP32(t, MVFR1, FPHP, 2); /* v8.0 FP support */ - cpu->isar.mvfr1 = t; + cpu->isar.regs[MVFR1] = t; - t = cpu->isar.mvfr2; + t = cpu->isar.regs[MVFR2]; t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */ - cpu->isar.mvfr2 = t; + cpu->isar.regs[MVFR2] = t; + + t = cpu->isar.regs[ID_MMFR3]; + t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */ + cpu->isar.regs[ID_MMFR3] = t; - t = cpu->id_mmfr4; + t = cpu->isar.regs[ID_MMFR4]; t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */ - cpu->id_mmfr4 = t; + cpu->isar.regs[ID_MMFR4] = t; } #endif } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 86eb79cd02a322a4fda1f3294f265c79d53edfdf..eb875e112abbe624eb04b52cc5c2bb9800aaf48d 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -63,6 +63,37 @@ #define ARMV7M_EXCP_PENDSV 14 #define ARMV7M_EXCP_SYSTICK 15 +typedef enum CPUIDReg { + MIDR_EL1, + ID_ISAR0, + ID_ISAR1, + ID_ISAR2, + ID_ISAR3, + ID_ISAR4, + ID_ISAR5, + ID_ISAR6, + ID_MMFR0, + ID_MMFR1, + ID_MMFR2, + ID_MMFR3, + ID_MMFR4, + ID_AA64ISAR0, + ID_AA64ISAR1, + ID_AA64PFR0, + ID_AA64PFR1, + ID_AA64MMFR0, + ID_AA64MMFR1, + ID_AA64MMFR2, + ID_AA64DFR0, + ID_AA64DFR1, + ID_DFR0, + MVFR0, + MVFR1, + MVFR2, + DBGDIDR, + ID_MAX, +} CPUIDReg; + /* For M profile, some registers are banked secure vs non-secure; * these are represented as a 2-element array where the first element * is the non-secure copy and the second is the secure copy. @@ -848,24 +879,14 @@ struct ARMCPU { * prefix means a constant register. * Some of these registers are split out into a substructure that * is shared with the translators to control the ISA. + * + * Note that if you add an ID register to the ARMISARegisters struct + * you need to also update the 32-bit and 64-bit versions of the + * kvm_arm_get_host_cpu_features() function to correctly populate the + * field by reading the value from the KVM vCPU. */ struct ARMISARegisters { - uint32_t id_isar0; - uint32_t id_isar1; - uint32_t id_isar2; - uint32_t id_isar3; - uint32_t id_isar4; - uint32_t id_isar5; - uint32_t id_isar6; - uint32_t mvfr0; - uint32_t mvfr1; - uint32_t mvfr2; - uint64_t id_aa64isar0; - uint64_t id_aa64isar1; - uint64_t id_aa64pfr0; - uint64_t id_aa64pfr1; - uint64_t id_aa64mmfr0; - uint64_t id_aa64mmfr1; + uint64_t regs[ID_MAX]; } isar; uint32_t midr; uint32_t revidr; @@ -874,20 +895,11 @@ struct ARMCPU { uint32_t reset_sctlr; uint32_t id_pfr0; uint32_t id_pfr1; - uint32_t id_dfr0; uint64_t pmceid0; uint64_t pmceid1; uint32_t id_afr0; - uint32_t id_mmfr0; - uint32_t id_mmfr1; - uint32_t id_mmfr2; - uint32_t id_mmfr3; - uint32_t id_mmfr4; - uint64_t id_aa64dfr0; - uint64_t id_aa64dfr1; uint64_t id_aa64afr0; uint64_t id_aa64afr1; - uint32_t dbgdidr; uint32_t clidr; uint64_t mp_affinity; /* MP ID without feature bits */ /* The elements of this array are the CCSIDR values for each cache, @@ -1679,6 +1691,17 @@ FIELD(ID_ISAR6, DP, 4, 4) FIELD(ID_ISAR6, FHM, 8, 4) FIELD(ID_ISAR6, SB, 12, 4) FIELD(ID_ISAR6, SPECRES, 16, 4) +FIELD(ID_ISAR6, BF16, 20, 4) +FIELD(ID_ISAR6, I8MM, 24, 4) + +FIELD(ID_MMFR3, CMAINTVA, 0, 4) +FIELD(ID_MMFR3, CMAINTSW, 4, 4) +FIELD(ID_MMFR3, BPMAINT, 8, 4) +FIELD(ID_MMFR3, MAINTBCST, 12, 4) +FIELD(ID_MMFR3, PAN, 16, 4) +FIELD(ID_MMFR3, COHWALK, 20, 4) +FIELD(ID_MMFR3, CMEMSZ, 24, 4) +FIELD(ID_MMFR3, SUPERSEC, 28, 4) FIELD(ID_MMFR4, SPECSEI, 0, 4) FIELD(ID_MMFR4, AC2, 4, 4) @@ -1715,6 +1738,9 @@ FIELD(ID_AA64ISAR1, GPI, 28, 4) FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) FIELD(ID_AA64ISAR1, SB, 36, 4) FIELD(ID_AA64ISAR1, SPECRES, 40, 4) +FIELD(ID_AA64ISAR1, BF16, 44, 4) +FIELD(ID_AA64ISAR1, DGH, 48, 4) +FIELD(ID_AA64ISAR1, I8MM, 52, 4) FIELD(ID_AA64PFR0, EL0, 0, 4) FIELD(ID_AA64PFR0, EL1, 4, 4) @@ -1725,11 +1751,18 @@ FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) FIELD(ID_AA64PFR0, GIC, 24, 4) FIELD(ID_AA64PFR0, RAS, 28, 4) FIELD(ID_AA64PFR0, SVE, 32, 4) +FIELD(ID_AA64PFR0, SEL2, 36, 4) +FIELD(ID_AA64PFR0, MPAM, 40, 4) +FIELD(ID_AA64PFR0, AMU, 44, 4) +FIELD(ID_AA64PFR0, DIT, 44, 4) +FIELD(ID_AA64PFR0, CSV2, 56, 4) +FIELD(ID_AA64PFR0, CSV3, 60, 4) FIELD(ID_AA64PFR1, BT, 0, 4) FIELD(ID_AA64PFR1, SBSS, 4, 4) FIELD(ID_AA64PFR1, MTE, 8, 4) FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) +FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4) FIELD(ID_AA64MMFR0, PARANGE, 0, 4) FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) @@ -1743,6 +1776,8 @@ FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4) FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4) FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4) FIELD(ID_AA64MMFR0, EXS, 44, 4) +FIELD(ID_AA64MMFR0, FGT, 56, 4) +FIELD(ID_AA64MMFR0, ECV, 60, 4) FIELD(ID_AA64MMFR1, HAFDBS, 0, 4) FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4) @@ -1752,6 +1787,35 @@ FIELD(ID_AA64MMFR1, LO, 16, 4) FIELD(ID_AA64MMFR1, PAN, 20, 4) FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) FIELD(ID_AA64MMFR1, XNX, 28, 4) +FIELD(ID_AA64MMFR1, TWED, 32, 4) +FIELD(ID_AA64MMFR1, ETS, 36, 4) + +FIELD(ID_AA64MMFR2, CNP, 0, 4) +FIELD(ID_AA64MMFR2, UAO, 4, 4) +FIELD(ID_AA64MMFR2, LSM, 8, 4) +FIELD(ID_AA64MMFR2, IESB, 12, 4) +FIELD(ID_AA64MMFR2, VARANGE, 16, 4) +FIELD(ID_AA64MMFR2, CCIDX, 20, 4) +FIELD(ID_AA64MMFR2, NV, 24, 4) +FIELD(ID_AA64MMFR2, ST, 28, 4) +FIELD(ID_AA64MMFR2, AT, 32, 4) +FIELD(ID_AA64MMFR2, IDS, 36, 4) +FIELD(ID_AA64MMFR2, FWB, 40, 4) +FIELD(ID_AA64MMFR2, TTL, 48, 4) +FIELD(ID_AA64MMFR2, BBM, 52, 4) +FIELD(ID_AA64MMFR2, EVT, 56, 4) +FIELD(ID_AA64MMFR2, E0PD, 60, 4) + +FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) +FIELD(ID_AA64DFR0, TRACEVER, 4, 4) +FIELD(ID_AA64DFR0, PMUVER, 8, 4) +FIELD(ID_AA64DFR0, BRPS, 12, 4) +FIELD(ID_AA64DFR0, WRPS, 20, 4) +FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) +FIELD(ID_AA64DFR0, PMSVER, 32, 4) +FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) +FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) +FIELD(ID_AA64DFR0, MUPMU, 48, 4) FIELD(ID_DFR0, COPDBG, 0, 4) FIELD(ID_DFR0, COPSDBG, 4, 4) @@ -1762,6 +1826,13 @@ FIELD(ID_DFR0, MPROFDBG, 20, 4) FIELD(ID_DFR0, PERFMON, 24, 4) FIELD(ID_DFR0, TRACEFILT, 28, 4) +FIELD(DBGDIDR, SE_IMP, 12, 1) +FIELD(DBGDIDR, NSUHD_IMP, 14, 1) +FIELD(DBGDIDR, VERSION, 16, 4) +FIELD(DBGDIDR, CTX_CMPS, 20, 4) +FIELD(DBGDIDR, BRPS, 24, 4) +FIELD(DBGDIDR, WRPS, 28, 4) + FIELD(MVFR0, SIMDREG, 0, 4) FIELD(MVFR0, FPSP, 4, 4) FIELD(MVFR0, FPDP, 8, 4) @@ -3310,77 +3381,77 @@ extern const uint64_t pred_esz_masks[4]; */ static inline bool isar_feature_thumb_div(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; + return FIELD_EX32(id->regs[ID_ISAR0], ID_ISAR0, DIVIDE) != 0; } static inline bool isar_feature_arm_div(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; + return FIELD_EX32(id->regs[ID_ISAR0], ID_ISAR0, DIVIDE) > 1; } static inline bool isar_feature_jazelle(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; + return FIELD_EX32(id->regs[ID_ISAR1], ID_ISAR1, JAZELLE) != 0; } static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, AES) != 0; } static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, AES) > 1; } static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, SHA1) != 0; } static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, SHA2) != 0; } static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, CRC32) != 0; } static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, RDM) != 0; } static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, VCMA) != 0; } static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, JSCVT) != 0; } static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, DP) != 0; } static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, FHM) != 0; } static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, SB) != 0; } static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, SPECRES) != 0; } static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) @@ -3390,24 +3461,24 @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) * the ARMv8.2-FP16 extension is implemented for aa32 mode. * At which point we can properly set and check MVFR1.FPHP. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, FP) == 1; } static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id) { /* Return true if D16-D31 are implemented */ - return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2; + return FIELD_EX64(id->regs[MVFR0], MVFR0, SIMDREG) >= 2; } static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0; + return FIELD_EX64(id->regs[MVFR0], MVFR0, FPSHVEC) > 0; } static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id) { /* Return true if CPU supports double precision floating point */ - return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0; + return FIELD_EX64(id->regs[MVFR0], MVFR0, FPDP) > 0; } /* @@ -3417,32 +3488,49 @@ static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id) */ static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 0; + return FIELD_EX64(id->regs[MVFR1], MVFR1, FPHP) > 0; } static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 1; + return FIELD_EX64(id->regs[MVFR1], MVFR1, FPHP) > 1; } static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 1; + return FIELD_EX64(id->regs[MVFR2], MVFR2, FPMISC) >= 1; } static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 2; + return FIELD_EX64(id->regs[MVFR2], MVFR2, FPMISC) >= 2; } static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 3; + return FIELD_EX64(id->regs[MVFR2], MVFR2, FPMISC) >= 3; } static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 4; + return FIELD_EX64(id->regs[MVFR2], MVFR2, FPMISC) >= 4; +} + +static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) +{ + return FIELD_EX32(id->regs[ID_MMFR3], ID_MMFR3, PAN) != 0; +} + +static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->regs[ID_MMFR3], ID_MMFR3, PAN) >= 2; +} + +static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->regs[ID_DFR0], ID_DFR0, PERFMON) >= 4 && + FIELD_EX32(id->regs[ID_DFR0], ID_DFR0, PERFMON) != 0xf; } /* @@ -3450,92 +3538,92 @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) */ static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, AES) != 0; } static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, AES) > 1; } static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA1) != 0; } static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA2) != 0; } static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA2) > 1; } static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, CRC32) != 0; } static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, ATOMIC) != 0; } static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, RDM) != 0; } static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA3) != 0; } static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SM3) != 0; } static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SM4) != 0; } static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, DP) != 0; } static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, FHM) != 0; } static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, TS) != 0; } static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, TS) >= 2; } static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, RNDR) != 0; } static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, JSCVT) != 0; } static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, FCMA) != 0; } static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) @@ -3546,7 +3634,7 @@ static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) * defined algorithms, and thus API+GPI, and this predicate controls * migration of the 128-bit keys. */ - return (id->id_aa64isar1 & + return (id->regs[ID_AA64ISAR1] & (FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) | FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) | FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) | @@ -3555,43 +3643,64 @@ static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, SB) != 0; } static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, SPECRES) != 0; } static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, FRINTTS) != 0; } static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically wrt FP16. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, FP) == 1; } static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, EL0) >= 2; } static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, SVE) != 0; } static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, LO) != 0; +} + +static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) +{ + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, PAN) != 0; +} + +static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, PAN) >= 2; } static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR1], ID_AA64PFR1, BT) != 0; +} + +static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER) >= 4 && + FIELD_EX64(id->regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id); } /* @@ -3600,4 +3709,6 @@ static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) #define cpu_isar_feature(name, cpu) \ ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) +void arm_cpu_features_to_dict(ARMCPU *cpu, QDict *features); + #endif diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 15f4ee9215f128a93783635152a41e01311c6353..a1649f884498e4c3c4ec61e5b5925f3b2bcd3a30 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -116,31 +116,31 @@ static void aarch64_a57_initfn(Object *obj) cpu->midr = 0x411fd070; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x12111111; + cpu->isar.regs[MVFR2] = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; + cpu->isar.regs[ID_DFR0] = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->dbgdidr = 0x3516d000; + cpu->isar.regs[ID_MMFR0] = 0x10101105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01260000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x00011142; + cpu->isar.regs[ID_ISAR5] = 0x00011121; + cpu->isar.regs[ID_ISAR6] = 0; + cpu->isar.regs[ID_AA64PFR0] = 0x00002222; + cpu->isar.regs[ID_AA64DFR0] = 0x10305106; + cpu->isar.regs[ID_AA64ISAR0] = 0x00011120; + cpu->isar.regs[ID_AA64MMFR0] = 0x00001124; + cpu->isar.regs[DBGDIDR] = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ @@ -170,31 +170,31 @@ static void aarch64_a53_initfn(Object *obj) cpu->midr = 0x410fd034; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x12111111; + cpu->isar.regs[MVFR2] = 0x00000043; cpu->ctr = 0x84448004; /* L1Ip = VIPT */ cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; + cpu->isar.regs[ID_DFR0] = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ - cpu->dbgdidr = 0x3516d000; + cpu->isar.regs[ID_MMFR0] = 0x10101105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01260000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x00011142; + cpu->isar.regs[ID_ISAR5] = 0x00011121; + cpu->isar.regs[ID_ISAR6] = 0; + cpu->isar.regs[ID_AA64PFR0] = 0x00002222; + cpu->isar.regs[ID_AA64DFR0] = 0x10305106; + cpu->isar.regs[ID_AA64ISAR0] = 0x00011120; + cpu->isar.regs[ID_AA64MMFR0] = 0x00001122; /* 40 bit physical addr */ + cpu->isar.regs[DBGDIDR] = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ @@ -224,30 +224,30 @@ static void aarch64_a72_initfn(Object *obj) cpu->midr = 0x410fd083; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034080; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x12111111; + cpu->isar.regs[MVFR2] = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; + cpu->isar.regs[ID_DFR0] = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10201105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->dbgdidr = 0x3516d000; + cpu->isar.regs[ID_MMFR0] = 0x10201105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01260000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x00011142; + cpu->isar.regs[ID_ISAR5] = 0x00011121; + cpu->isar.regs[ID_AA64PFR0] = 0x00002222; + cpu->isar.regs[ID_AA64DFR0] = 0x10305106; + cpu->isar.regs[ID_AA64ISAR0] = 0x00011120; + cpu->isar.regs[ID_AA64MMFR0] = 0x00001124; + cpu->isar.regs[DBGDIDR] = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ @@ -275,10 +275,33 @@ static void aarch64_kunpeng_920_initfn(Object *obj) cpu->midr = 0x480fd010; cpu->ctr = 0x84448004; - cpu->isar.id_aa64pfr0 = 0x11001111; - cpu->id_aa64dfr0 = 0x110305408; - cpu->isar.id_aa64isar0 = 0x10211120; - cpu->isar.id_aa64mmfr0 = 0x101125; + cpu->isar.regs[ID_ISAR0] = 0; + cpu->isar.regs[ID_ISAR1] = 0; + cpu->isar.regs[ID_ISAR2] = 0; + cpu->isar.regs[ID_ISAR3] = 0; + cpu->isar.regs[ID_ISAR4] = 0; + cpu->isar.regs[ID_ISAR5] = 0; + cpu->isar.regs[ID_MMFR0] = 0; + cpu->isar.regs[ID_MMFR1] = 0; + cpu->isar.regs[ID_MMFR2] = 0; + cpu->isar.regs[ID_MMFR3] = 0; + cpu->isar.regs[ID_MMFR4] = 0; + cpu->isar.regs[MVFR0] = 0; + cpu->isar.regs[MVFR1] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[ID_DFR0] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->id_pfr0 = 0; + cpu->id_pfr1 = 0; + cpu->isar.regs[ID_AA64PFR0] = 0x0000010011111111; + cpu->isar.regs[ID_AA64DFR0] = 0x110305408; + cpu->isar.regs[ID_AA64ISAR0] = 0x0001100010211120; + cpu->isar.regs[ID_AA64ISAR1] = 0x00011001; + cpu->isar.regs[ID_AA64MMFR0] = 0x101125; + cpu->isar.regs[ID_AA64MMFR1] = 0x10211122; + cpu->isar.regs[ID_AA64MMFR2] = 0x00001011; } static void cpu_max_get_sve_vq(Object *obj, Visitor *v, const char *name, @@ -321,7 +344,7 @@ static void aarch64_max_initfn(Object *obj) uint32_t u; aarch64_a57_initfn(obj); - t = cpu->isar.id_aa64isar0; + t = cpu->isar.regs[ID_AA64ISAR0]; t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */ @@ -335,9 +358,9 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */ t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); - cpu->isar.id_aa64isar0 = t; + cpu->isar.regs[ID_AA64ISAR0] = t; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); t = FIELD_DP64(t, ID_AA64ISAR1, APA, 1); /* PAuth, architected only */ @@ -347,40 +370,45 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; - t = cpu->isar.id_aa64pfr1; + t = cpu->isar.regs[ID_AA64PFR1]; t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); - cpu->isar.id_aa64pfr1 = t; + cpu->isar.regs[ID_AA64PFR1] = t; - t = cpu->isar.id_aa64mmfr1; + t = cpu->isar.regs[ID_AA64MMFR1]; t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); - cpu->isar.id_aa64mmfr1 = t; + t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */ + cpu->isar.regs[ID_AA64MMFR1] = t; /* Replicate the same data to the 32-bit id registers. */ - u = cpu->isar.id_isar5; + u = cpu->isar.regs[ID_ISAR5]; u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */ u = FIELD_DP32(u, ID_ISAR5, SHA1, 1); u = FIELD_DP32(u, ID_ISAR5, SHA2, 1); u = FIELD_DP32(u, ID_ISAR5, CRC32, 1); u = FIELD_DP32(u, ID_ISAR5, RDM, 1); u = FIELD_DP32(u, ID_ISAR5, VCMA, 1); - cpu->isar.id_isar5 = u; + cpu->isar.regs[ID_ISAR5] = u; - u = cpu->isar.id_isar6; + u = cpu->isar.regs[ID_ISAR6]; u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1); u = FIELD_DP32(u, ID_ISAR6, DP, 1); u = FIELD_DP32(u, ID_ISAR6, FHM, 1); u = FIELD_DP32(u, ID_ISAR6, SB, 1); u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1); - cpu->isar.id_isar6 = u; + cpu->isar.regs[ID_ISAR6] = u; + + u = cpu->isar.regs[ID_MMFR3]; + u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */ + cpu->isar.regs[ID_MMFR3] = u; /* * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet, @@ -501,6 +529,115 @@ static void arm_cpu_parse_featurestr(const char *typename, char *features, } } +static const char *unconfigurable_feats[] = { + "evtstrm", + "cpuid", + NULL +}; + +static bool is_configurable_feat(const char *name) +{ + int i; + + for (i = 0; unconfigurable_feats[i]; ++i) { + if (g_strcmp0(unconfigurable_feats[i], name) == 0) { + return false; + } + } + + return true; +} + +static void +cpu_add_feat_as_prop(const char *typename, const char *name, const char *val) +{ + GlobalProperty *prop; + + if (!is_configurable_feat(name)) { + info_report("CPU feature '%s' is not configurable by QEMU. Ignore it.", + name); + return; + } + + prop = g_new0(typeof(*prop), 1); + prop->driver = typename; + prop->property = g_strdup(name); + prop->value = g_strdup(val); + qdev_prop_register_global(prop); +} + +static gint compare_string(gconstpointer a, gconstpointer b) +{ + return g_strcmp0(a, b); +} + +static GList *plus_features, *minus_features; + +static void aarch64_cpu_parse_features(const char *typename, char *features, + Error **errp) +{ + GList *l; + char *featurestr; /* Single 'key=value" string being parsed */ + static bool cpu_globals_initialized; + + if (cpu_globals_initialized) { + return; + } + cpu_globals_initialized = true; + + if (!features) { + return; + } + for (featurestr = strtok(features, ","); + featurestr; + featurestr = strtok(NULL, ",")) { + const char *name; + const char *val = NULL; + char *eq = NULL; + + /* Compatibility syntax: */ + if (featurestr[0] == '+') { + plus_features = g_list_append(plus_features, + g_strdup(featurestr + 1)); + continue; + } else if (featurestr[0] == '-') { + minus_features = g_list_append(minus_features, + g_strdup(featurestr + 1)); + continue; + } + + eq = strchr(featurestr, '='); + name = featurestr; + if (eq) { + *eq++ = 0; + val = eq; + } else { + error_setg(errp, "Unsupported property format: %s", name); + return; + } + + if (g_list_find_custom(plus_features, name, compare_string)) { + warn_report("Ambiguous CPU model string. " + "Don't mix both \"+%s\" and \"%s=%s\"", + name, name, val); + } + if (g_list_find_custom(minus_features, name, compare_string)) { + warn_report("Ambiguous CPU model string. " + "Don't mix both \"-%s\" and \"%s=%s\"", + name, name, val); + } + cpu_add_feat_as_prop(typename, name, val); + } + + for (l = plus_features; l; l = l->next) { + cpu_add_feat_as_prop(typename, l->data, "on"); + } + + for (l = minus_features; l; l = l->next) { + cpu_add_feat_as_prop(typename, l->data, "off"); + } +} + static void aarch64_cpu_class_init(ObjectClass *oc, void *data) { CPUClass *cc = CPU_CLASS(oc); @@ -512,6 +649,7 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_num_core_regs = 34; cc->gdb_core_xml_file = "aarch64-core.xml"; cc->gdb_arch_name = aarch64_gdb_arch_name; + cc->parse_features = aarch64_cpu_parse_features; } static void aarch64_cpu_instance_init(Object *obj) diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index dde80273ff1376a4102823402228b2710d955bef..3f8f667df7f31294328f7633422cb2011a74dbdf 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -16,8 +16,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn) { CPUARMState *env = &cpu->env; uint64_t bcr = env->cp15.dbgbcr[lbn]; - int brps = extract32(cpu->dbgdidr, 24, 4); - int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); + int brps = arm_num_brps(cpu); + int ctx_cmps = arm_num_ctx_cmps(cpu); int bt; uint32_t contextidr; @@ -28,7 +28,7 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn) * case DBGWCR_EL1.LBN must indicate that breakpoint). * We choose the former. */ - if (lbn > brps || lbn < (brps - ctx_cmps)) { + if (lbn >= brps || lbn < (brps - ctx_cmps)) { return false; } diff --git a/target/arm/helper.c b/target/arm/helper.c index b74c23a9bc08cf772fd4a4ee4b8e9d3ae34870cc..bddd355fa0203093ad9aa6ee925170beb5338cac 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -23,6 +23,7 @@ #include "hw/semihosting/semihost.h" #include "sysemu/cpus.h" #include "sysemu/kvm.h" +#include "sysemu/tcg.h" #include "qemu/range.h" #include "qapi/qapi-commands-machine-target.h" #include "qapi/error.h" @@ -31,6 +32,7 @@ #include "arm_ldst.h" #include "exec/cpu_ldst.h" #endif +#include "kvm_arm.h" #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ @@ -250,6 +252,16 @@ static bool raw_accessors_invalid(const ARMCPRegInfo *ri) return true; } +static bool is_id_reg(const ARMCPRegInfo *ri) +{ + /* + * (Op0, Op1, CRn, CRm, Op2) of ID registers is (3, 0, 0, crm, op2), + * where 1<=crm<8, 0<=op2<8. + */ + return ri->opc0 == 3 && ri->opc1 == 0 && ri->crn == 0 && + ri->crm > 0 && ri->crm < 8; +} + bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) { /* Write the coprocessor state from cpu->env to the (index,value) list. */ @@ -266,30 +278,53 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) ok = false; continue; } - if (ri->type & ARM_CP_NO_RAW) { + if ((ri->type & ARM_CP_NO_RAW) && !(kvm_sync && is_id_reg(ri))) { continue; } newval = read_raw_cp_reg(&cpu->env, ri); if (kvm_sync) { - /* - * Only sync if the previous list->cpustate sync succeeded. - * Rather than tracking the success/failure state for every - * item in the list, we just recheck "does the raw write we must - * have made in write_list_to_cpustate() read back OK" here. - */ - uint64_t oldval = cpu->cpreg_values[i]; + if (is_id_reg(ri)) { + /* Only sync if we can sync to KVM successfully. */ + uint64_t oldval; + uint64_t kvmval; - if (oldval == newval) { - continue; - } + if (kvm_arm_get_one_reg(cpu, cpu->cpreg_indexes[i], &oldval)) { + continue; + } + if (oldval == newval) { + continue; + } - write_raw_cp_reg(&cpu->env, ri, oldval); - if (read_raw_cp_reg(&cpu->env, ri) != oldval) { - continue; - } + if (kvm_arm_set_one_reg(cpu, cpu->cpreg_indexes[i], &newval)) { + continue; + } + if (kvm_arm_get_one_reg(cpu, cpu->cpreg_indexes[i], &kvmval) || + kvmval != newval) { + continue; + } + + kvm_arm_set_one_reg(cpu, cpu->cpreg_indexes[i], &oldval); + } else { + /* + * Only sync if the previous list->cpustate sync succeeded. + * Rather than tracking the success/failure state for every + * item in the list, we just recheck "does the raw write we must + * have made in write_list_to_cpustate() read back OK" here. + */ + uint64_t oldval = cpu->cpreg_values[i]; + + if (oldval == newval) { + continue; + } - write_raw_cp_reg(&cpu->env, ri, newval); + write_raw_cp_reg(&cpu->env, ri, oldval); + if (read_raw_cp_reg(&cpu->env, ri) != oldval) { + continue; + } + + write_raw_cp_reg(&cpu->env, ri, newval); + } } cpu->cpreg_values[i] = newval; } @@ -5596,26 +5631,16 @@ static void define_debug_regs(ARMCPU *cpu) ARMCPRegInfo dbgdidr = { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.regs[DBGDIDR], }; /* Note that all these register fields hold "number of Xs minus 1". */ - brps = extract32(cpu->dbgdidr, 24, 4); - wrps = extract32(cpu->dbgdidr, 28, 4); - ctx_cmps = extract32(cpu->dbgdidr, 20, 4); + brps = arm_num_brps(cpu); + wrps = arm_num_wrps(cpu); + ctx_cmps = arm_num_ctx_cmps(cpu); assert(ctx_cmps <= brps); - /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties - * of the debug registers such as number of breakpoints; - * check that if they both exist then they agree. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); - assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); - assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); - } - define_one_arm_cp_reg(cpu, &dbgdidr); define_arm_cp_regs(cpu, debug_cp_reginfo); @@ -5623,7 +5648,7 @@ static void define_debug_regs(ARMCPU *cpu) define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); } - for (i = 0; i < brps + 1; i++) { + for (i = 0; i < brps; i++) { ARMCPRegInfo dbgregs[] = { { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, @@ -5642,7 +5667,7 @@ static void define_debug_regs(ARMCPU *cpu) define_arm_cp_regs(cpu, dbgregs); } - for (i = 0; i < wrps + 1; i++) { + for (i = 0; i < wrps; i++) { ARMCPRegInfo dbgregs[] = { { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, @@ -5672,7 +5697,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) ARMCPU *cpu = env_archcpu(env); uint64_t pfr1 = cpu->id_pfr1; - if (env->gicv3state) { + if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && env->gicv3state) { pfr1 |= 1 << 28; } return pfr1; @@ -5681,7 +5706,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr0 = cpu->isar.id_aa64pfr0; + uint64_t pfr0 = cpu->isar.regs[ID_AA64PFR0]; if (env->gicv3state) { pfr0 |= 1 << 24; @@ -5907,7 +5932,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_dfr0 }, + .resetvalue = cpu->isar.regs[ID_DFR0] }, { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -5915,51 +5940,51 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_mmfr0 }, + .resetvalue = cpu->isar.regs[ID_MMFR0] }, { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_mmfr1 }, + .resetvalue = cpu->isar.regs[ID_MMFR1] }, { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_mmfr2 }, + .resetvalue = cpu->isar.regs[ID_MMFR2] }, { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_mmfr3 }, + .resetvalue = cpu->isar.regs[ID_MMFR3] }, { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar0 }, + .resetvalue = cpu->isar.regs[ID_ISAR0] }, { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar1 }, + .resetvalue = cpu->isar.regs[ID_ISAR1] }, { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar2 }, + .resetvalue = cpu->isar.regs[ID_ISAR2] }, { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar3 }, + .resetvalue = cpu->isar.regs[ID_ISAR3] }, { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar4 }, + .resetvalue = cpu->isar.regs[ID_ISAR4] }, { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar5 }, + .resetvalue = cpu->isar.regs[ID_ISAR5] }, { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_mmfr4 }, + .resetvalue = cpu->isar.regs[ID_MMFR4] }, { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_isar6 }, + .resetvalue = cpu->isar.regs[ID_ISAR6] }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v6_idregs); @@ -6050,8 +6075,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) } else { define_arm_cp_regs(cpu, not_v7_cp_reginfo); } - if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 && - FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) { + if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { ARMCPRegInfo v81_pmu_regs[] = { { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, @@ -6084,7 +6108,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64pfr1}, + .resetvalue = cpu->isar.regs[ID_AA64PFR1]}, { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -6113,11 +6137,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_aa64dfr0 }, + .resetvalue = cpu->isar.regs[ID_AA64DFR0] }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_aa64dfr1 }, + .resetvalue = cpu->isar.regs[ID_AA64DFR1] }, { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -6145,11 +6169,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64isar0 }, + .resetvalue = cpu->isar.regs[ID_AA64ISAR0] }, { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64isar1 }, + .resetvalue = cpu->isar.regs[ID_AA64ISAR1] }, { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -6177,15 +6201,15 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64mmfr0 }, + .resetvalue = cpu->isar.regs[ID_AA64MMFR0] }, { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64mmfr1 }, - { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .resetvalue = cpu->isar.regs[ID_AA64MMFR1] }, + { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = 0 }, + .resetvalue = cpu->isar.regs[ID_AA64MMFR2] }, { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -6209,15 +6233,15 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.mvfr0 }, + .resetvalue = cpu->isar.regs[MVFR0] }, { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.mvfr1 }, + .resetvalue = cpu->isar.regs[MVFR1] }, { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->isar.mvfr2 }, + .resetvalue = cpu->isar.regs[MVFR2] }, { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -6436,7 +6460,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); define_arm_cp_regs(cpu, vmsa_cp_reginfo); /* TTCBR2 is introduced with ARMv8.2-A32HPD. */ - if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) { + if (FIELD_EX32(cpu->isar.regs[ID_MMFR4], ID_MMFR4, HPDS) != 0) { define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); } } diff --git a/target/arm/internals.h b/target/arm/internals.h index 232d96387538fdf9b5663dece10c489cd20e95b2..2da13ba8072a79cf62fe59b4680611e691790f56 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -237,7 +237,7 @@ static inline unsigned int arm_pamax(ARMCPU *cpu) [5] = 48, }; unsigned int parange = - FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + FIELD_EX64(cpu->isar.regs[ID_AA64MMFR0], ID_AA64MMFR0, PARANGE); /* id_aa64mmfr0 is a read-only register so values outside of the * supported mappings can be considered an implementation error. */ @@ -857,6 +857,49 @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) } } +/** + * arm_num_brps: Return number of implemented breakpoints. + * Note that the ID register BRPS field is "number of bps - 1", + * and we return the actual number of breakpoints. + */ +static inline int arm_num_brps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, BRPS) + 1; + } else { + return FIELD_EX32(cpu->isar.regs[DBGDIDR], DBGDIDR, BRPS) + 1; + } +} + +/** + * arm_num_wrps: Return number of implemented watchpoints. + * Note that the ID register WRPS field is "number of wps - 1", + * and we return the actual number of watchpoints. + */ +static inline int arm_num_wrps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, WRPS) + 1; + } else { + return FIELD_EX32(cpu->isar.regs[DBGDIDR], DBGDIDR, WRPS) + 1; + } +} + +/** + * arm_num_ctx_cmps: Return number of implemented context comparators. + * Note that the ID register CTX_CMPS field is "number of cmps - 1", + * and we return the actual number of comparators. + */ +static inline int arm_num_ctx_cmps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, + CTX_CMPS) + 1; + } else { + return FIELD_EX32(cpu->isar.regs[DBGDIDR], DBGDIDR, CTX_CMPS) + 1; + } +} + /* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3. * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits. */ diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 4f131f687df6550b817570bb174ec41c2be063fe..229b17cea0841f44cae334c2c81c5dc971486f72 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -457,6 +457,44 @@ out: return ret; } +int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *target) +{ + uint32_t v32; + int ret; + + switch (regidx & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: + ret = kvm_get_one_reg(CPU(cpu), regidx, &v32); + if (ret == 0) { + *target = v32; + } + return ret; + case KVM_REG_SIZE_U64: + return kvm_get_one_reg(CPU(cpu), regidx, target); + default: + return -1; + } +} + +int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *source) +{ + uint32_t v32; + + switch (regidx & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: + v32 = *source; + if (v32 != *source) { + error_report("the value of source is too large"); + return -1; + } + return kvm_set_one_reg(CPU(cpu), regidx, &v32); + case KVM_REG_SIZE_U64: + return kvm_set_one_reg(CPU(cpu), regidx, source); + default: + return -1; + } +} + bool write_kvmstate_to_list(ARMCPU *cpu) { CPUState *cs = CPU(cpu); diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c index ee1588305d82c483b40619b3f6bed396252f7dad..e984d52dd2e467ffd938afc879323377b2dbe561 100644 --- a/target/arm/kvm32.c +++ b/target/arm/kvm32.c @@ -93,6 +93,9 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ahcf->isar.id_isar6 = 0; } + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM_CP15_REG32(0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0); @@ -104,6 +107,28 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * Fortunately there is not yet anything in there that affects migration. */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM_CP15_REG32(0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM_CP15_REG32(0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM_CP15_REG32(0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM_CP15_REG32(0, 0, 1, 7)); + if (read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM_CP15_REG32(0, 0, 2, 6))) { + /* + * Older kernels don't support reading ID_MMFR4 (a new in v8 + * register); assume it's zero. + */ + ahcf->isar.id_mmfr4 = 0; + } + + /* + * There is no way to read DBGDIDR, because currently 32-bit KVM + * doesn't implement debug at all. Leave it at zero. + */ + kvm_arm_destroy_scratch_host_vcpu(fdarray); if (err < 0) { diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 4f0bf00070e48c5dc6ad5623c04bda6cb0e04180..05345556dd7dbb85600b0345d222c13a0423e238 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -455,7 +455,7 @@ static inline void unset_feature(uint64_t *features, int feature) *features &= ~(1ULL << feature); } -static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) +static int read_sys_reg32(int fd, uint64_t *pret, uint64_t id) { uint64_t ret; struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; @@ -509,7 +509,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ahcf->target = init.target; ahcf->dtb_compatible = "arm,arm-v8"; - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, + err = read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64PFR0], ARM64_SYS_REG(3, 0, 0, 4, 0)); if (unlikely(err < 0)) { /* @@ -528,19 +528,25 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * ??? Either of these sounds like too much effort just * to work around running a modern host kernel. */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + ahcf->isar.regs[ID_AA64PFR0] = 0x00000011; /* EL1&0, AArch64 only */ err = 0; } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64PFR1], ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64DFR0], + ARM64_SYS_REG(3, 0, 0, 5, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64DFR1], + ARM64_SYS_REG(3, 0, 0, 5, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64ISAR0], ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64ISAR1], ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64MMFR0], ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64MMFR1], ARM64_SYS_REG(3, 0, 0, 7, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64MMFR2], + ARM64_SYS_REG(3, 0, 0, 7, 2)); /* * Note that if AArch32 support is not present in the host, @@ -549,27 +555,71 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * than skipping the reads and leaving 0, as we must avoid * considering the values in every case. */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_DFR0], + ARM64_SYS_REG(3, 0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR0], + ARM64_SYS_REG(3, 0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR1], + ARM64_SYS_REG(3, 0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR2], + ARM64_SYS_REG(3, 0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR3], + ARM64_SYS_REG(3, 0, 0, 1, 7)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR0], ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR1], ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR2], ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR3], ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR4], ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR5], ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR4], + ARM64_SYS_REG(3, 0, 0, 2, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR6], ARM64_SYS_REG(3, 0, 0, 2, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[MVFR0], ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[MVFR1], ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[MVFR2], ARM64_SYS_REG(3, 0, 0, 3, 2)); + + /* + * DBGDIDR is a bit complicated because the kernel doesn't + * provide an accessor for it in 64-bit mode, which is what this + * scratch VM is in, and there's no architected "64-bit sysreg + * which reads the same as the 32-bit register" the way there is + * for other ID registers. Instead we synthesize a value from the + * AArch64 ID_AA64DFR0, the same way the kernel code in + * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. + * We only do this if the CPU supports AArch32 at EL1. + */ + if (FIELD_EX32(ahcf->isar.regs[ID_AA64PFR0], ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.regs[ID_AA64DFR0], + ID_AA64DFR0, WRPS); + int brps = FIELD_EX64(ahcf->isar.regs[ID_AA64DFR0], + ID_AA64DFR0, BRPS); + int ctx_cmps = + FIELD_EX64(ahcf->isar.regs[ID_AA64DFR0], ID_AA64DFR0, CTX_CMPS); + int version = 6; /* ARMv8 debug architecture */ + bool has_el3 = + !!FIELD_EX32(ahcf->isar.regs[ID_AA64PFR0], ID_AA64PFR0, EL3); + uint32_t dbgdidr = 0; + + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); + dbgdidr |= (1 << 15); /* RES1 bit */ + ahcf->isar.regs[DBGDIDR] = dbgdidr; + } } kvm_arm_destroy_scratch_host_vcpu(fdarray); @@ -594,6 +644,20 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) return true; } +bool kvm_arm_cpu_feature_supported(void) +{ + static bool cpu_feature_initialized; + static bool cpu_feature_supported; + + if (!cpu_feature_initialized) { + cpu_feature_supported = kvm_check_extension(kvm_state, + KVM_CAP_ARM_CPU_FEATURE); + cpu_feature_initialized = true; + } + + return cpu_feature_supported; +} + #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 int kvm_arch_init_vcpu(CPUState *cs) diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 0de5f83ee8629c4c748db48bba3f16fbd878f531..49e80878f42a71fede9c5b1d7c93e20addbc3537 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -239,6 +239,13 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu); */ void kvm_arm_add_vcpu_properties(Object *obj); +/** + * kvm_arm_cpu_feature_supported: + * + * Returns true if KVM can set CPU features and false otherwise. + */ +bool kvm_arm_cpu_feature_supported(void); + /** * kvm_arm_get_max_vm_ipa_size: * @ms: Machine state handle @@ -400,4 +407,7 @@ static inline const char *its_class_name(void) } } +int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *target); +int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *source); + #endif diff --git a/target/arm/monitor.c b/target/arm/monitor.c index 6ec6dd04ac2e00d8d983867085f77b76056b75fb..7c2ff3c06e50f69bbb0cb03187781b34e4c1d8df 100644 --- a/target/arm/monitor.c +++ b/target/arm/monitor.c @@ -23,7 +23,14 @@ #include "qemu/osdep.h" #include "hw/boards.h" #include "kvm_arm.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/qapi-commands-machine-target.h" #include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qerror.h" +#include "qapi/qmp/qdict.h" +#include "qom/qom-qobject.h" static GICCapability *gic_cap_new(int version) { @@ -82,3 +89,146 @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp) return head; } + +/* + * These are cpu model features we want to advertise. The order here + * matters as this is the order in which qmp_query_cpu_model_expansion + * will attempt to set them. If there are dependencies between features, + * then the order that considers those dependencies must be used. + */ +static const char *cpu_model_advertised_features[] = { + "aarch64", "pmu", + NULL +}; + +CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + CpuModelExpansionInfo *expansion_info; + const QDict *qdict_in = NULL; + QDict *qdict_out; + ObjectClass *oc; + Object *obj; + const char *name; + int i; + + if (type != CPU_MODEL_EXPANSION_TYPE_FULL) { + error_setg(errp, "The requested expansion type is not supported"); + return NULL; + } + + if (!kvm_enabled() && !strcmp(model->name, "host")) { + error_setg(errp, "The CPU type '%s' requires KVM", model->name); + return NULL; + } + + oc = cpu_class_by_name(TYPE_ARM_CPU, model->name); + if (!oc) { + error_setg(errp, "The CPU type '%s' is not a recognized ARM CPU type", + model->name); + return NULL; + } + + if (kvm_enabled()) { + bool supported = false; + + if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) { + /* These are kvmarm's recommended cpu types */ + supported = true; + } else if (current_machine->cpu_type) { + const char *cpu_type = current_machine->cpu_type; + int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX); + + if (strlen(model->name) == len && + !strncmp(model->name, cpu_type, len)) { + /* KVM is enabled and we're using this type, so it works. */ + supported = true; + } + } + if (!supported) { + error_setg(errp, "We cannot guarantee the CPU type '%s' works " + "with KVM on this host", model->name); + return NULL; + } + } + + if (model->props) { + qdict_in = qobject_to(QDict, model->props); + if (!qdict_in) { + error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict"); + return NULL; + } + } + + obj = object_new(object_class_get_name(oc)); + + if (qdict_in) { + Visitor *visitor; + Error *err = NULL; + + visitor = qobject_input_visitor_new(model->props); + visit_start_struct(visitor, NULL, NULL, 0, &err); + if (err) { + visit_free(visitor); + object_unref(obj); + error_propagate(errp, err); + return NULL; + } + + i = 0; + while ((name = cpu_model_advertised_features[i++]) != NULL) { + if (qdict_get(qdict_in, name)) { + object_property_set(obj, visitor, name, &err); + if (err) { + break; + } + } + } + + if (!err) { + visit_check_struct(visitor, &err); + } + visit_end_struct(visitor, NULL); + visit_free(visitor); + if (err) { + object_unref(obj); + error_propagate(errp, err); + return NULL; + } + } + + expansion_info = g_new0(CpuModelExpansionInfo, 1); + expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); + expansion_info->model->name = g_strdup(model->name); + + qdict_out = qdict_new(); + + i = 0; + while ((name = cpu_model_advertised_features[i++]) != NULL) { + ObjectProperty *prop = object_property_find(obj, name, NULL); + if (prop) { + Error *err = NULL; + QObject *value; + + assert(prop->get); + value = object_property_get_qobject(obj, name, &err); + assert(!err); + + qdict_put_obj(qdict_out, name, value); + } + } + + arm_cpu_features_to_dict(ARM_CPU(obj), qdict_out); + + if (!qdict_size(qdict_out)) { + qobject_unref(qdict_out); + } else { + expansion_info->model->props = QOBJECT(qdict_out); + expansion_info->model->has_props = true; + } + + object_unref(obj); + + return expansion_info; +} diff --git a/tests/check-qjson.c b/tests/check-qjson.c index fa2afccb0a24985771e535a646fc8b9e1cbfca68..5e3e08fe79cffd5cd351c75ebb6a6577990a0ed0 100644 --- a/tests/check-qjson.c +++ b/tests/check-qjson.c @@ -1415,6 +1415,14 @@ static void invalid_dict_comma(void) g_assert(obj == NULL); } +static void invalid_dict_key(void) +{ + Error *err = NULL; + QObject *obj = qobject_from_json("{32:'abc'}", &err); + error_free_or_abort(&err); + g_assert(obj == NULL); +} + static void unterminated_literal(void) { Error *err = NULL; @@ -1500,6 +1508,7 @@ int main(int argc, char **argv) g_test_add_func("/errors/unterminated/dict_comma", unterminated_dict_comma); g_test_add_func("/errors/invalid_array_comma", invalid_array_comma); g_test_add_func("/errors/invalid_dict_comma", invalid_dict_comma); + g_test_add_func("/errors/invalid_dict_key", invalid_dict_key); g_test_add_func("/errors/unterminated/literal", unterminated_literal); g_test_add_func("/errors/limits/nesting", limits_nesting); g_test_add_func("/errors/multiple_values", multiple_values);