diff --git a/arch/arm64/core/virtualization/mm.c b/arch/arm64/core/virtualization/mm.c index ae46529eb8948ad028877c3c681db550f36628c8..73f5e87f3e71ac952372a54a08591ae0422dba7f 100644 --- a/arch/arm64/core/virtualization/mm.c +++ b/arch/arm64/core/virtualization/mm.c @@ -193,10 +193,11 @@ static inline bool vm_is_block_desc(uint64_t desc) return (desc & PTE_DESC_TYPE_MASK) == PTE_BLOCK_DESC; } -static void vm_set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level) +static void vm_set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level,bool invalid) { if (desc) { desc |= (level == XLAT_LAST_LEVEL) ? PTE_PAGE_DESC : PTE_BLOCK_DESC; + if(invalid && level == XLAT_LAST_LEVEL) desc &= PTE_INVALID_DESC; } *pte = desc; } @@ -315,8 +316,8 @@ static uint64_t *vm_expand_to_table(uint64_t *pte, unsigned int level, uint32_t } static int vm_set_mapping(struct arm_mmu_ptables *ptables,\ - uintptr_t virt, size_t size,\ - uint64_t desc, bool may_overwrite, uint32_t vmid) + uintptr_t virt, size_t size,\ + uint64_t desc, bool may_overwrite, bool invalid, uint32_t vmid) { uint64_t *pte, *ptes[XLAT_LAST_LEVEL + 1]; uint64_t level_size; @@ -326,7 +327,7 @@ static int vm_set_mapping(struct arm_mmu_ptables *ptables,\ while (size) { __ASSERT(level <= XLAT_LAST_LEVEL, - "max translation table level exceeded\n"); + "max translation table level exceeded\n"); /* Locate PTE for given virtual address and page table level */ pte = &table[XLAT_TABLE_VA_IDX(virt, level)]; @@ -348,7 +349,7 @@ static int vm_set_mapping(struct arm_mmu_ptables *ptables,\ level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); if (vm_is_desc_superset(*pte, desc, level)) { - /* This block already covers our range */ + /* This block already covers our range */ level_size -= (virt & (level_size - 1)); if (level_size > size) { level_size = size; @@ -357,7 +358,7 @@ static int vm_set_mapping(struct arm_mmu_ptables *ptables,\ } if ((size < level_size) || (virt & (level_size - 1)) || - !vm_is_desc_block_aligned(desc, level_size)) { + !vm_is_desc_block_aligned(desc, level_size)) { /* Range doesn't fit, create subtable */ table = vm_expand_to_table(pte, level, vmid); if (!table) { @@ -376,14 +377,14 @@ static int vm_set_mapping(struct arm_mmu_ptables *ptables,\ vm_table_usage(pte, -1, vmid); } /* Create (or erase) block/page descriptor */ - vm_set_pte_block_desc(pte, desc, level); + vm_set_pte_block_desc(pte, desc, level, invalid); /* recursively free unused tables if any */ while (level != BASE_XLAT_LEVEL && - vm_is_table_unused(pte, vmid)) { + vm_is_table_unused(pte, vmid)) { vm_free_table(pte, vmid); pte = ptes[--level]; - vm_set_pte_block_desc(pte, 0, level); + vm_set_pte_block_desc(pte, 0, level, false); vm_table_usage(pte, -1, vmid); } @@ -452,7 +453,7 @@ static int vm_remove_dev_map(struct arm_mmu_ptables *ptables, const char *name, "address/size are not page aligned\n"); key = k_spin_lock(&vm_xlat_lock); - ret = vm_set_mapping(ptables, virt, size, 0, true, vmid); + ret = vm_set_mapping(ptables, virt, size, 0, true, false,vmid); k_spin_unlock(&vm_xlat_lock, key); return ret; } @@ -473,7 +474,7 @@ static int vm_add_dev_map(struct arm_mmu_ptables *ptables, const char *name, "address/size are not page aligned\n"); key = k_spin_lock(&vm_xlat_lock); - ret = vm_set_mapping(ptables, virt, size, desc, may_overwrite, vmid); + ret = vm_set_mapping(ptables, virt, size, desc, may_overwrite, false, vmid); k_spin_unlock(&vm_xlat_lock, key); return ret; } @@ -494,7 +495,7 @@ static int vm_add_map(struct arm_mmu_ptables *ptables, const char *name, size = ALIGN_TO_PAGE(size); __ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, "address/size are not page aligned\n"); - ret = vm_set_mapping(ptables, virt, size, desc, may_overwrite, vmid); + ret = vm_set_mapping(ptables, virt, size, desc, may_overwrite, false, vmid); k_spin_unlock(&vm_xlat_lock, key); return ret; @@ -512,13 +513,26 @@ static int vm_remove_map(struct arm_mmu_ptables *ptables, const char *name, return ret; } -int arch_mmap_vpart_to_block(uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs) +int arch_mmap_vpart_to_block(struct k_mem_domain *domain,uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs,bool invalid,uint32_t vmid) { int ret; ARG_UNUSED(ret); uintptr_t dest_virt = virt; + k_spinlock_key_t key; + uint64_t desc = get_vm_region_desc(attrs); + struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; + + arch_vm_mmap_pre(dest_virt, phys, size, attrs); + + desc |= phys; - arch_vm_mmap_pre(dest_virt, phys, size, attrs); + key = k_spin_lock(&vm_xlat_lock); + + __ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, + "address/size are not page aligned\n"); + ret = vm_set_mapping(domain_ptables, virt, size, desc, true, invalid, vmid); + + k_spin_unlock(&vm_xlat_lock, key); return 0; } diff --git a/arch/arm64/core/virtualization/trap_handler.c b/arch/arm64/core/virtualization/trap_handler.c index 1f7861d359d90d474fac959188b72727057c4664..e6d1e3adb02f931be89d3962b6d7092e22e722a1 100644 --- a/arch/arm64/core/virtualization/trap_handler.c +++ b/arch/arm64/core/virtualization/trap_handler.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include LOG_MODULE_DECLARE(ZVM_MODULE_NAME); @@ -43,9 +45,6 @@ static int handle_ftrans_desc(int iss_dfsc, uint64_t pa_addr, struct vcpu *vcpu = _current_vcpu; uint64_t esr_elx = vcpu->arch->fault.esr_el2; -#ifdef CONFIG_VM_DYNAMIC_MEMORY - /* TODO: Add dynamic memory allocate. */ -#else uint16_t reg_index = dabt->srt; uint64_t *reg_value; reg_value = find_index_reg(reg_index, regs); @@ -55,6 +54,63 @@ static int handle_ftrans_desc(int iss_dfsc, uint64_t pa_addr, /* check that if it is a device memory fault */ ret = handle_vm_device_emulate(vcpu->vm, pa_addr); + +#ifdef CONFIG_VM_DYNAMIC_MEMORY + /* TODO: Add dynamic memory allocate. */ + if(ret){ + /* pci initial sucessful. */ + if(ret > 0){ + return 0; + } + else if(ret == -ENXIO || ret == -EFAULT ){ + reg_value = find_index_reg(reg_index, regs); + *reg_value = 0xfefefefefefefefe; + ZVM_LOG_ERR("Unable to handle Date abort in address: 0x%llx ! \n", pa_addr); + ZVM_LOG_ERR("A stage-2 translation table need to set for this device address 0x%llx.\n", pa_addr); + } + else{ + struct vm *vm = get_current_vm(); + struct vm_mem_domain *vmem_domain = vm->vmem_domain; + struct _dnode *d_node, *ds_node,*vd_node, *vds_node; + struct vm_mem_partition *vpart; + struct vm_mem_block *blk; + uint64_t image_size, base_addr, base_offset, mem_size; + uint16_t vmid = vm->vmid; + if(vmid < ZVM_ZEPHYR_VM_NUM){ + image_size = ZEPHYR_VM_IMAGE_SIZE; + base_addr = ZEPHYR_VMSYS_BASE; + mem_size = ZEPHYR_VM_BLOCK_SIZE; + base_offset = (pa_addr - image_size - base_addr)/mem_size; + }else{ + image_size = LINUX_IMAGE_VMRFS_SIZE; + base_addr = LINUX_VMSYS_BASE; + mem_size = LINUX_VM_BLOCK_SIZE; + base_offset = (pa_addr - image_size - base_addr)/mem_size; + } + SYS_DLIST_FOR_EACH_NODE_SAFE(&vmem_domain->mapped_vpart_list,d_node,ds_node){ + vpart = CONTAINER_OF(d_node,struct vm_mem_partition,vpart_node); + if(pa_addr >= vpart->vm_mm_partition->start && pa_addr < vpart->vm_mm_partition->start + vpart->vm_mm_partition->size){ + SYS_DLIST_FOR_EACH_NODE_SAFE(&vpart->blk_list,vd_node,vds_node){ + blk = CONTAINER_OF(vd_node,struct vm_mem_block,vblk_node); + if(blk->cur_blk_offset == base_offset){ + blk->phy_pointer = k_malloc(mem_size + CONFIG_MMU_PAGE_SIZE); + blk->phy_base = ROUND_UP(blk->phy_pointer, CONFIG_MMU_PAGE_SIZE); + blk->phy_base = z_mem_phys_addr(blk->phy_base); + ret = arch_mmap_vpart_to_block(vmem_domain->vm_mm_domain,blk->phy_base, + blk->virt_base,mem_size,MT_VM_NORMAL_MEM,false,vmid); + break; + } + } + break; + } + } + vcpu->arch->ctxt.regs.pc -= (GET_ESR_IL(esr_elx)) ? 4 : 2; + } + }else{ + ret = vm_mem_domain_partitions_add(vcpu->vm->vmem_domain); + vcpu->arch->ctxt.regs.pc -= (GET_ESR_IL(esr_elx)) ? 4 : 2; + } +#else if(ret){ /* pci initial sucessful. */ if(ret > 0){ diff --git a/auto_zvm.sh b/auto_zvm.sh index f815529df1e3d7a7df895bcd9fb1b21aa291fff9..77fafe50083690cee4d5b6bcd0b1c343bfe938c9 100755 --- a/auto_zvm.sh +++ b/auto_zvm.sh @@ -63,11 +63,11 @@ elif [ "$OPS" = "${ops_array[1]}" ]; then -cpu max -m 8G -nographic -machine virt,virtualization=on,gic-version=3 \ -net none -pidfile qemu.pid -chardev stdio,id=con,mux=on \ -serial chardev:con -mon chardev=con,mode=readline -serial pty -serial pty -smp cpus=4 \ - -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/zephyr.bin,addr=0x60000000,force-raw=on \ - -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/Image_withoutFS,addr=0x70000000,force-raw=on \ - -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/linux-qemu-virt.dtb,addr=0x75000000 \ - -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/debian.cpio.gz,addr=0x90000000 \ - -kernel $(pwd)/build/zephyr/zvm_host.elf + -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/zephyr.bin,addr=0xa0000000,force-raw=on \ + -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/Image_withoutFS,addr=0xa2000000,force-raw=on \ + -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/linux-qemu-virt.dtb,addr=0xa8000000 \ + -device loader,file=$(pwd)/zvm_config/qemu_platform/hub/debian.cpio.gz,addr=0xaa000000 \ + -kernel $(pwd)/build/zephyr/zvm_host.elf -S -s ### using gdb to connect it: # gdb-multiarch -q -ex 'file ./build/zephyr/zvm_host.elf' -ex 'target remote localhost:1234' ### using trace to record qemu info when boot qemu diff --git a/boards/arm64/qemu_cortex_max/qemu_cortex_max.dts b/boards/arm64/qemu_cortex_max/qemu_cortex_max.dts index 52a4c74dff3fc8c197f1c4993796a667e6daac1d..0c67ec0fab79d736a239b363a00e086e4edeadee 100644 --- a/boards/arm64/qemu_cortex_max/qemu_cortex_max.dts +++ b/boards/arm64/qemu_cortex_max/qemu_cortex_max.dts @@ -28,7 +28,7 @@ soc { sram0: memory@40000000 { compatible = "mmio-sram"; - reg = <0x0 0x40000000 0x0 DT_SIZE_M(512)>; + reg = <0x0 0x40000000 0x0 DT_SIZE_M(1536)>; }; }; diff --git a/include/virtualization/arm/mm.h b/include/virtualization/arm/mm.h index 834e99425cc4c85e1a1c980348cdb788fd2b875b..b5f84ef5918f32ee94ff130cbe07771a47fdb702 100644 --- a/include/virtualization/arm/mm.h +++ b/include/virtualization/arm/mm.h @@ -98,7 +98,7 @@ /** * @brief Mapping vpart to physical block address. */ -int arch_mmap_vpart_to_block(uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs); +int arch_mmap_vpart_to_block(struct k_mem_domain *domain, uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs, bool invalid, uint32_t vmid); int arch_unmap_vpart_to_block(uintptr_t virt, size_t size); /** diff --git a/include/virtualization/os/os_linux.h b/include/virtualization/os/os_linux.h index 8c9445bc79f31c3db6b8aeca1f5ee47319b38468..4424c30226c705596a97242073c2c40b6af8d9e5 100644 --- a/include/virtualization/os/os_linux.h +++ b/include/virtualization/os/os_linux.h @@ -37,6 +37,8 @@ #define LINUX_VMRFS_PHY_BASE DT_PROP(DT_INST(0, linux_vm), rootfs_address) #define LINUX_VM_VCPU_NUM DT_PROP(DT_INST(0, linux_vm), vcpu_num) +#define LINUX_IMAGE_VMRFS_SIZE 0x15E00000 + #ifdef CONFIG_VM_DTB_FILE_INPUT #define LINUX_DTB_MEM_BASE DT_PROP(DT_INST(0, linux_vm), dtb_address) #define LINUX_DTB_MEM_SIZE DT_PROP(DT_INST(0, linux_vm), dtb_size) diff --git a/include/virtualization/vm_mm.h b/include/virtualization/vm_mm.h index 734ebb83201af59db2ac8b2a48eccade1b339bf6..ef678f5d619e7200252979714f7afa5984fa59f2 100644 --- a/include/virtualization/vm_mm.h +++ b/include/virtualization/vm_mm.h @@ -117,7 +117,7 @@ struct vm_mem_domain { * @param vpart : vpart for each task * @return int : 0--success, other for error code. */ -int map_vpart_to_block(struct vm_mem_domain *vmem_domain, struct vm_mem_partition *vpart, uint64_t unit_msize); +int map_vpart_to_block(struct vm_mem_domain *vmem_domain, struct vm_mem_partition *vpart); int unmap_vpart_to_block(struct vm_mem_domain *vmem_domain, struct vm_mem_partition *vpart); /** diff --git a/samples/_zvm/boards/qemu_cortex_max_smp.conf b/samples/_zvm/boards/qemu_cortex_max_smp.conf index c9855810d6bb6cca440bec4ebf3d6f5e6f0b475e..5193318f3f975d2e6f92c352f5ebad37bce7ae5b 100644 --- a/samples/_zvm/boards/qemu_cortex_max_smp.conf +++ b/samples/_zvm/boards/qemu_cortex_max_smp.conf @@ -6,7 +6,7 @@ CONFIG_ARMV8_A_NS=y # kernel vm size(16M virt ram start + size) # It is the virt size of kernel, must bigger than true kernel size. -CONFIG_KERNEL_VM_SIZE=0xC0000000 +CONFIG_KERNEL_VM_SIZE=0xa0000000 # uart CONFIG_UART_PL011_PORT1=n diff --git a/samples/_zvm/boards/qemu_cortex_max_smp.overlay b/samples/_zvm/boards/qemu_cortex_max_smp.overlay index d90521fa8bc08aab5ee4283b6c19dfd05b699629..dc93c58906de599aafa475234337ddf3ede2e4c6 100644 --- a/samples/_zvm/boards/qemu_cortex_max_smp.overlay +++ b/samples/_zvm/boards/qemu_cortex_max_smp.overlay @@ -8,15 +8,13 @@ aliases { vmvirtio1 = "/soc/virtio_mmio@a000000"; vmvirtio2 = "/soc/virtio_mmio@a001000"; - zephyrcpy = "/soc/zephyr_cpy@60000000"; - linuxcpy = "/soc/linux_cpy@70000000"; - linuxrfs = "/soc/linux_rootfs@90000000"; - linuxdtb = "/soc/linux_dtb@75000000"; + zephyrcpy = "/soc/zephyr_cpy@a0000000"; + linuxcpy = "/soc/linux_cpy@a2000000"; + linuxrfs = "/soc/linux_rootfs@aa000000"; + linuxdtb = "/soc/linux_dtb@a8000000"; /*Passthrough device.*/ ptdevice1 = "/soc/pass_through_device/uart@9001000"; ptdevice2 = "/soc/pass_through_device/uart@9002000"; - vmvirtmem = "/soc/virtmem@c0000000"; - vmshmemrw = "/soc/shmem_rw@bd000000"; }; chosen { @@ -42,24 +40,6 @@ virtio_type = <2>; }; - virtmem@c0000000 { - compatible = "arm,mem"; - device_type = "memory"; - reg = <0x00 0xc0000000 0x00 0x1000>; - interrupts = ; - label = "VM_SHMEM"; - status = "okay"; - }; - - shmem_rw@bd000000 { - compatible = "shmem,rw"; - device_type = "memory"; - reg = <0x0 0xbd000000 0x0 0x10>; - interrupts = ; - label = "VM_SHMEMRW"; - status = "okay"; - }; - pcie@4010000000 { compatible = "pci-host-ecam-generic"; interrupt-map-mask = <0x1800 0x00 0x00 0x07>; @@ -92,20 +72,20 @@ compatible = "pci-host-ecam-generic"; }; - zephyr_cpy@60000000 { - reg = <0x0 0x60000000 0x0 DT_SIZE_M(2)>; + zephyr_cpy@a0000000 { + reg = <0x0 0xa0000000 0x0 DT_SIZE_M(2)>; }; - linux_cpy@70000000 { - reg = <0x0 0x70000000 0x0 DT_SIZE_M(64)>; + linux_cpy@a2000000 { + reg = <0x0 0xa2000000 0x0 DT_SIZE_M(64)>; }; - linux_dtb@75000000 { - reg = <0x0 0x75000000 0x0 DT_SIZE_M(2)>; + linux_dtb@a8000000 { + reg = <0x0 0xa8000000 0x0 DT_SIZE_M(2)>; }; - linux_rootfs@90000000 { - reg = <0x0 0x90000000 0x0 DT_SIZE_M(180)>; + linux_rootfs@aa000000 { + reg = <0x0 0xaa000000 0x0 DT_SIZE_M(180)>; }; pass_through_device { diff --git a/samples/_zvm/prj.conf b/samples/_zvm/prj.conf index ac6d08790449e4e2f0d07f6dc86b71f48f2d79a5..84dca1cfc17c73aeabaa87e7e567495ff619bb9e 100644 --- a/samples/_zvm/prj.conf +++ b/samples/_zvm/prj.conf @@ -66,7 +66,7 @@ CONFIG_ISR_STACK_SIZE=81920 CONFIG_SHELL_STACK_SIZE=65536 # 128MB heap size. -CONFIG_HEAP_MEM_POOL_SIZE=134217728 +CONFIG_HEAP_MEM_POOL_SIZE=1073741824 # expand partitions CONFIG_MAX_DOMAIN_PARTITIONS=32 @@ -75,7 +75,7 @@ CONFIG_MAX_DOMAIN_PARTITIONS=32 CONFIG_MAX_XLAT_TABLES=20480 # dynamic memory allocation -CONFIG_VM_DYNAMIC_MEMORY=n +CONFIG_VM_DYNAMIC_MEMORY=y # dtb file support CONFIG_VM_DTB_FILE_INPUT=y @@ -93,3 +93,5 @@ CONFIG_DISK_RAM_VOLUME_SIZE=192 CONFIG_MINIMAL_LIBC_RAND=y CONFIG_DYNAMIC_INTERRUPTS=y + +CONFIG_NO_OPTIMIZATIONS=y \ No newline at end of file diff --git a/soc/arm64/qemu_cortex_max/mmu_regions.c b/soc/arm64/qemu_cortex_max/mmu_regions.c index 0756c7f178ce0b914c8277785c1bccc1b6892c43..fba118d1fbc8cffe1c15867ff29b2f4f77e8491a 100644 --- a/soc/arm64/qemu_cortex_max/mmu_regions.c +++ b/soc/arm64/qemu_cortex_max/mmu_regions.c @@ -37,11 +37,6 @@ static const struct arm_mmu_region mmu_regions[] = { DT_REG_SIZE(DT_INST(2, arm_pl011)), MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_DEFAULT_SECURE_STATE), - MMU_REGION_FLAT_ENTRY("VM_SHMEM", - DT_REG_ADDR(DT_ALIAS(vmvirtmem)), - DT_REG_SIZE(DT_ALIAS(vmvirtmem)), - MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_DEFAULT_SECURE_STATE), - MMU_REGION_FLAT_ENTRY("ZEPHYR_ELF_ADDR", DT_REG_ADDR(DT_NODELABEL(zephyr_ddr)), DT_REG_SIZE(DT_NODELABEL(zephyr_ddr)), diff --git a/subsys/virtualization/os/os_linux.c b/subsys/virtualization/os/os_linux.c index c2f2988c93228cd6a304643c69d76258360102a9..93956cd8661cf230412aba63b60c5a06b0252450 100644 --- a/subsys/virtualization/os/os_linux.c +++ b/subsys/virtualization/os/os_linux.c @@ -10,41 +10,57 @@ #include -static atomic_t zvm_linux_image_map_init = ATOMIC_INIT(0); -static uint64_t zvm_linux_image_map_phys = 0; - -/** - * @brief Establish a mapping between the linux image physical - * addresses and virtual addresses. - */ -static uint64_t zvm_mapped_linux_image(void) -{ - uint8_t *ptr; - uintptr_t phys; - size_t size; - uint32_t flags; - if(likely(!atomic_cas(&zvm_linux_image_map_init, 0, 1))){ - return zvm_linux_image_map_phys; - } - - phys = LINUX_VM_IMAGE_BASE; - size = LINUX_VM_IMAGE_SIZE; - flags = K_MEM_CACHE_NONE | K_MEM_PERM_RW | K_MEM_PERM_EXEC; - z_phys_map(&ptr, phys, size, flags); - zvm_linux_image_map_phys = (uint64_t)ptr; - return zvm_linux_image_map_phys; -} +// static atomic_t zvm_linux_image_map_init = ATOMIC_INIT(0); +// static uint64_t zvm_linux_image_map_phys = 0; + +// /** +// * @brief Establish a mapping between the linux image physical +// * addresses and virtual addresses. +// */ +// static uint64_t zvm_mapped_linux_image(void) +// { +// uint8_t *ptr; +// uintptr_t phys; +// size_t size; +// uint32_t flags; +// if(likely(!atomic_cas(&zvm_linux_image_map_init, 0, 1))){ +// return zvm_linux_image_map_phys; +// } + +// phys = LINUX_VM_IMAGE_BASE; +// size = LINUX_VM_IMAGE_SIZE; +// flags = K_MEM_CACHE_NONE | K_MEM_PERM_RW | K_MEM_PERM_EXEC; +// z_phys_map(&ptr, phys, size, flags); +// zvm_linux_image_map_phys = (uint64_t)ptr; +// return zvm_linux_image_map_phys; +// } int load_linux_image(struct vm_mem_domain *vmem_domain) { int ret = 0; - uint64_t lbase_size, limage_base, limage_size; + uint64_t entry, limage_size; struct _dnode *d_node, *ds_node; struct vm_mem_partition *vpart; - uint64_t *src_hva, des_hva; + struct vm *this_vm = vmem_domain->vm; + uint64_t *src_hva, des_hva, des_hpa, total_des_hpa; uint64_t num_m = LINUX_VM_IMAGE_SIZE / (1024 * 1024); uint64_t src_hpa = LINUX_VMCPY_BASE; - uint64_t des_hpa = LINUX_VM_IMAGE_BASE; +#ifndef CONFIG_VM_DYNAMIC_MEMORY + des_hpa = LINUX_VM_IMAGE_BASE; +#else + entry = LINUX_VMSYS_BASE; + limage_size = LINUX_IMAGE_VMRFS_SIZE; + SYS_DLIST_FOR_EACH_NODE_SAFE(&vmem_domain->mapped_vpart_list,d_node,ds_node){ + vpart = CONTAINER_OF(d_node,struct vm_mem_partition,vpart_node); + if(vpart->part_hpa_size == LINUX_VMSYS_SIZE){ + des_hpa = vpart->part_hpa_base; + total_des_hpa = vpart->part_hpa_base; + arch_mmap_vpart_to_block(vmem_domain->vm_mm_domain, vpart->part_hpa_base, + entry, limage_size, MT_VM_NORMAL_MEM, false, this_vm->vmid); + break; + } + } +#endif uint64_t per_size = 1048576; //1M ZVM_LOG_INFO("Linux Kernel Image Loading ...\n"); @@ -87,7 +103,11 @@ int load_linux_image(struct vm_mem_domain *vmem_domain) num_m = LINUX_VMRFS_SIZE / (1024 * 1024); src_hpa = LINUX_VMRFS_BASE; +#ifndef CONFIG_VM_DYNAMIC_MEMORY des_hpa = LINUX_VMRFS_PHY_BASE; +#else + des_hpa = total_des_hpa + 0x9000000; +#endif ZVM_LOG_INFO("Linux FS Image Loading ...\n"); ZVM_LOG_INFO("1 rf_num_m = %lld\n", num_m); @@ -106,20 +126,20 @@ int load_linux_image(struct vm_mem_domain *vmem_domain) ZVM_LOG_INFO("Linux FS Image Loaded !\n"); -#ifndef CONFIG_VM_DYNAMIC_MEMORY - return ret; -#endif /* CONFIG_VM_DYNAMIC_MEMORY */ - - lbase_size = LINUX_VMSYS_SIZE; - limage_base = zvm_mapped_linux_image(); - limage_size = LINUX_VM_IMAGE_SIZE; - SYS_DLIST_FOR_EACH_NODE_SAFE(&vmem_domain->mapped_vpart_list,d_node,ds_node){ - vpart = CONTAINER_OF(d_node,struct vm_mem_partition,vpart_node); - if(vpart->part_hpa_size == lbase_size){ - memcpy((void *)vpart->part_hpa_base, (const void *)limage_base, limage_size); - break; - } - } +// #ifndef CONFIG_VM_DYNAMIC_MEMORY +// return ret; +// #endif /* CONFIG_VM_DYNAMIC_MEMORY */ + +// lbase_size = LINUX_VMSYS_SIZE; +// limage_base = zvm_mapped_linux_image(); +// limage_size = LINUX_VM_IMAGE_SIZE; +// SYS_DLIST_FOR_EACH_NODE_SAFE(&vmem_domain->mapped_vpart_list,d_node,ds_node){ +// vpart = CONTAINER_OF(d_node,struct vm_mem_partition,vpart_node); +// if(vpart->part_hpa_size == lbase_size){ +// memcpy((void *)vpart->part_hpa_base, (const void *)limage_base, limage_size); +// break; +// } +// } return ret; } diff --git a/subsys/virtualization/os/os_zephyr.c b/subsys/virtualization/os/os_zephyr.c index 7ef719ae55ca906b9189ea86fc1a2f0c4469e7e9..419fca73d202f990c45d916a215ca757de346aca 100644 --- a/subsys/virtualization/os/os_zephyr.c +++ b/subsys/virtualization/os/os_zephyr.c @@ -44,7 +44,7 @@ int load_zephyr_image(struct vm_mem_domain *vmem_domain) ARG_UNUSED(dbuf); ARG_UNUSED(sbuf); int ret = 0; - uint64_t zbase_size,zimage_base,zimage_size; + uint64_t entry,zimage_base,zimage_size; struct _dnode *d_node, *ds_node; struct vm_mem_partition *vpart; struct vm_mem_block *blk; @@ -80,13 +80,15 @@ int load_zephyr_image(struct vm_mem_domain *vmem_domain) return ret; #endif /* CONFIG_VM_DYNAMIC_MEMORY */ - zbase_size = ZEPHYR_VMSYS_SIZE; + entry = ZEPHYR_VMSYS_BASE; zimage_base = zvm_mapped_zephyr_image(); - zimage_size = ZEPHYR_VM_IMAGE_SIZE; + zimage_size = ZEPHYR_VM_IMAGE_SIZE; SYS_DLIST_FOR_EACH_NODE_SAFE(&vmem_domain->mapped_vpart_list,d_node,ds_node){ vpart = CONTAINER_OF(d_node,struct vm_mem_partition,vpart_node); - if(vpart->part_hpa_size == zbase_size){ + if(vpart->part_hpa_size == ZEPHYR_VMSYS_SIZE){ memcpy((void *)vpart->part_hpa_base,(const void *)zimage_base,zimage_size); + arch_mmap_vpart_to_block(vmem_domain->vm_mm_domain,vpart->part_hpa_base, + entry,zimage_size,MT_VM_NORMAL_MEM,false,this_vm->vmid); break; } } diff --git a/subsys/virtualization/vm_mm.c b/subsys/virtualization/vm_mm.c index 7514b51cbe15942c9e6dc0644eea1ea3b75a30d1..d2931b4f1fc1cdeb5efc71b0aa8e3dce9e16466c 100644 --- a/subsys/virtualization/vm_mm.c +++ b/subsys/virtualization/vm_mm.c @@ -84,6 +84,66 @@ static struct vm_mem_partition *alloc_vm_mem_partition(uint64_t hpbase, return vpart; } + + +/** + * @brief Alloc memory block for this vpart, direct use current address. + */ +static int alloc_vm_mem_block(struct vm_mem_domain *vmem_dm, + struct vm_mem_partition *vpart) +{ + int i, ret = 0; + uint64_t vpart_vbase,vpart_pbase, blk_count,msize,unit_msize,image_size; + struct vm_mem_block *block; + struct vm *vm = vmem_dm->vm; + + switch(vm->os->type) { + case OS_TYPE_LINUX: + msize = vpart->vm_mm_partition->size - LINUX_IMAGE_VMRFS_SIZE; + image_size = LINUX_IMAGE_VMRFS_SIZE; + unit_msize = LINUX_VM_BLOCK_SIZE; + break; + case OS_TYPE_ZEPHYR: + msize = vpart->vm_mm_partition->size - ZEPHYR_VM_IMAGE_SIZE; + image_size = ZEPHYR_VM_IMAGE_SIZE; + unit_msize = ZEPHYR_VM_BLOCK_SIZE; + break; + default: + unit_msize = DEFAULT_VM_BLOCK_SIZE; + ZVM_LOG_WARN("Unknown os type!"); + break; + } + + vpart_vbase = vpart->vm_mm_partition->start + image_size; + vpart_pbase = vpart->part_hpa_base + image_size; + /* Add flag for blk map, set size as 64k(2M) block */ + blk_count = msize / unit_msize; + + /* allocate physical memory for block */ + for (i = 0; i < blk_count; i++) { + /* allocate block for block struct*/ + block = (struct vm_mem_block *)k_malloc(sizeof(struct vm_mem_block)); + if (block == NULL) { + return -EMMAO; + } + memset(block, 0, sizeof(struct vm_mem_block)); + + /* init block pointer for vm */ + block->phy_base = vpart_pbase + unit_msize*i; + block->virt_base = vpart_vbase + unit_msize*i; + /* get the block number */ + block->cur_blk_offset = i; + /* No physical base */ + block->phy_pointer = NULL; + + sys_dlist_append(&vpart->blk_list, &block->vblk_node); + } + + return ret; +} + + + /** * @brief init vpart from default device tree. */ @@ -101,6 +161,11 @@ static int create_vm_mem_vpart(struct vm_mem_domain *vmem_domain, uint64_t hpbas ret = add_idle_vpart(vmem_domain, vpart); +#ifdef CONFIG_VM_DYNAMIC_MEMORY + if(vpart->vm_mm_partition->start == LINUX_VMSYS_BASE || vpart->vm_mm_partition->start == ZEPHYR_VMSYS_BASE) + ret = alloc_vm_mem_block(vmem_domain, vpart); +#endif + return ret; } @@ -109,9 +174,9 @@ static int vm_ram_mem_create(struct vm_mem_domain *vmem_domain) int ret = 0; int type = OS_TYPE_MAX; uint64_t va_base, pa_base, kpa_base, size; - struct _dnode *d_node, *ds_node; + // struct _dnode *d_node, *ds_node; struct vm *vm = vmem_domain->vm; - struct vm_mem_partition *vpart; + // struct vm_mem_partition *vpart; type = vm->os->type; switch (type) { @@ -119,14 +184,14 @@ static int vm_ram_mem_create(struct vm_mem_domain *vmem_domain) va_base = LINUX_VMSYS_BASE; size = LINUX_VMSYS_SIZE; #ifdef CONFIG_VM_DYNAMIC_MEMORY - kpa_base = (uint64_t)k_malloc(size + CONFIG_MMU_PAGE_SIZE); + kpa_base = (uint64_t)k_malloc(LINUX_IMAGE_VMRFS_SIZE + CONFIG_MMU_PAGE_SIZE); if(kpa_base == 0){ ZVM_LOG_ERR("The heap memory is not enough\n"); return -EMMAO; } pa_base = z_mem_phys_addr((void *)ROUND_UP(kpa_base, CONFIG_MMU_PAGE_SIZE)); #else - ARG_UNUSED(vpart); + // ARG_UNUSED(vpart); ARG_UNUSED(kpa_base); pa_base = LINUX_VM_IMAGE_BASE; #endif @@ -135,17 +200,17 @@ static int vm_ram_mem_create(struct vm_mem_domain *vmem_domain) va_base = ZEPHYR_VMSYS_BASE; size = ZEPHYR_VMSYS_SIZE; #ifdef CONFIG_VM_DYNAMIC_MEMORY - kpa_base = (uint64_t)k_malloc(size + CONFIG_MMU_PAGE_SIZE); + kpa_base = (uint64_t)k_malloc(ZEPHYR_VM_IMAGE_SIZE + CONFIG_MMU_PAGE_SIZE); if(kpa_base == 0){ ZVM_LOG_ERR("The heap memory is not enough\n"); return -EMMAO; } pa_base = z_mem_phys_addr((void *)ROUND_UP(kpa_base, CONFIG_MMU_PAGE_SIZE)); #else - ARG_UNUSED(vpart); + // ARG_UNUSED(vpart); ARG_UNUSED(kpa_base); - ARG_UNUSED(d_node); - ARG_UNUSED(ds_node); + // ARG_UNUSED(d_node); + // ARG_UNUSED(ds_node); pa_base = ZEPHYR_VM_IMAGE_BASE; #endif break; @@ -155,7 +220,7 @@ static int vm_ram_mem_create(struct vm_mem_domain *vmem_domain) } ret = create_vm_mem_vpart(vmem_domain, pa_base, va_base, size, MT_VM_NORMAL_MEM); - +/* #ifdef CONFIG_VM_DYNAMIC_MEMORY SYS_DLIST_FOR_EACH_NODE_SAFE(&vmem_domain->idle_vpart_list,d_node,ds_node){ vpart = CONTAINER_OF(d_node,struct vm_mem_partition,vpart_node); @@ -165,6 +230,7 @@ static int vm_ram_mem_create(struct vm_mem_domain *vmem_domain) } } #endif +*/ return ret; } @@ -248,69 +314,40 @@ int vm_vdev_mem_create(struct vm_mem_domain *vmem_domain, uint64_t hpbase, return create_vm_mem_vpart(vmem_domain, hpbase, ipbase, size, attrs); } -// int map_vpart_to_block(struct vm_mem_domain *vmem_domain, -// struct vm_mem_partition *vpart, uint64_t unit_msize) -// { -// ARG_UNUSED(unit_msize); -// int ret = 0; -// uint64_t vm_mem_size; - -// struct vm_mem_block *blk; -// struct _dnode *d_node, *ds_node; -// struct vm *vm = vmem_domain->vm; - -// switch (vm->os->type) { -// case OS_TYPE_LINUX: -// vm_mem_size = LINUX_VM_BLOCK_SIZE; -// break; -// case OS_TYPE_ZEPHYR: -// vm_mem_size = ZEPHYR_VM_BLOCK_SIZE; -// break; -// default: -// vm_mem_size = DEFAULT_VM_BLOCK_SIZE; -// ZVM_LOG_WARN("Unknow os type!"); -// break; -// } - -// #ifdef CONFIG_VM_DYNAMIC_MEMORY -// uint64_t base_addr = vpart->area_start; -// uint64_t size = vpart->area_size; -// uint64_t virt_offset; - -// SYS_DLIST_FOR_EACH_NODE_SAFE(&vpart->blk_list, d_node, ds_node){ -// blk = CONTAINER_OF(d_node, struct vm_mem_block, vblk_node); +int map_vpart_to_block(struct vm_mem_domain *vmem_domain, + struct vm_mem_partition *vpart) +{ + int ret = 0; + uint64_t vm_mem_size; -// /* find the virt address for this block */ -// virt_offset = base_addr + (blk->cur_blk_offset * vm_mem_size); + struct vm_mem_block *blk; + struct _dnode *d_node, *ds_node; + struct vm *vm = vmem_domain->vm; -// size = vm_mem_size; + switch (vm->os->type) { + case OS_TYPE_LINUX: + vm_mem_size = LINUX_VM_BLOCK_SIZE; + break; + case OS_TYPE_ZEPHYR: + vm_mem_size = ZEPHYR_VM_BLOCK_SIZE; + break; + default: + vm_mem_size = DEFAULT_VM_BLOCK_SIZE; + ZVM_LOG_WARN("Unknown os type!"); + break; + } -// /* add mapping from virt to block physcal address */ -// ret = arch_mmap_vpart_to_block(blk->phy_base, virt_offset, size, MT_VM_NORMAL_MEM); + SYS_DLIST_FOR_EACH_NODE_SAFE(&vpart->blk_list, d_node, ds_node){ + blk = CONTAINER_OF(d_node, struct vm_mem_block, vblk_node); -// } -// #else -// SYS_DLIST_FOR_EACH_NODE_SAFE(&vpart->blk_list, d_node, ds_node){ -// blk = CONTAINER_OF(d_node, struct vm_mem_block, vblk_node); + /* add mapping from virt to block physcal address */ + ret = arch_mmap_vpart_to_block(vmem_domain->vm_mm_domain, blk->phy_base, blk->virt_base, + vm_mem_size, MT_VM_NORMAL_MEM, true, vm->vmid); -// if (blk->cur_blk_offset) { -// continue; -// }else{ -// ret = arch_mmap_vpart_to_block(blk->phy_base, vpart->area_start, -// vpart->area_size, MT_VM_NORMAL_MEM); -// if (ret) { -// return ret; -// } -// } -// break; -// } -// #endif /* CONFIG_VM_DYNAMIC_MEMORY */ -// /* get the pgd table */ -// vm->arch->vm_pgd_base = (uint64_t) -// vm->vmem_domain->vm_mm_domain->arch.ptables.base_xlat_table; + } -// return ret; -// } + return ret; +} /** @@ -461,7 +498,15 @@ static int vm_mem_domain_partition_add(struct vm_mem_domain *vmem_dm, domain->num_partitions++; #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API +#ifndef CONFIG_VM_DYNAMIC_MEMORY ret = arch_vm_mem_domain_partition_add(domain, p_idx, phys_start, vm->vmid); +#else + if(vpart->vm_mm_partition->start == LINUX_VMSYS_BASE || vpart->vm_mm_partition->start == ZEPHYR_VMSYS_BASE) + //ret = map_vpart_to_block(vmem_dm, vpart); + ; + else + ret = arch_vm_mem_domain_partition_add(domain, p_idx, phys_start, vm->vmid); +#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */ unlock_out: