From d8ab6cbd92f0f3b14480451893ed9fbe0afbc32d Mon Sep 17 00:00:00 2001 From: fuju Date: Thu, 8 Jan 2026 08:25:49 -0500 Subject: [PATCH] Unified memory optimization rmi interface --- arch/arm64/include/asm/kvm_rme.h | 9 + arch/arm64/include/asm/kvm_rme_ccal.h | 21 +- arch/arm64/include/asm/rmi_cmds.h | 45 +--- arch/arm64/include/asm/rmi_smc.h | 3 - arch/arm64/kvm/rme-ccal.c | 358 +++++++++++--------------- arch/arm64/kvm/rme.c | 40 +-- 6 files changed, 186 insertions(+), 290 deletions(-) diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h index 35f2771602c4..a3eff1609d2f 100644 --- a/arch/arm64/include/asm/kvm_rme.h +++ b/arch/arm64/include/asm/kvm_rme.h @@ -132,6 +132,15 @@ int _realm_psci_complete(struct kvm_vcpu *source, struct kvm_vcpu *target, unsigned long status); +int realm_create_protected_data_page(struct realm *realm, + unsigned long ipa, + struct page *dst_page, + struct page *src_page, + unsigned long flags); +int realm_create_rtt_levels(struct realm *realm, unsigned long ipa, int level, + int max_level, struct kvm_mmu_memory_cache *mc); +int fold_rtt(struct realm *realm, unsigned long addr, int level); + static inline bool kvm_realm_is_private_address(struct realm *realm, unsigned long addr) { diff --git a/arch/arm64/include/asm/kvm_rme_ccal.h b/arch/arm64/include/asm/kvm_rme_ccal.h index 21fba9794148..bb9b7998e831 100644 --- a/arch/arm64/include/asm/kvm_rme_ccal.h +++ b/arch/arm64/include/asm/kvm_rme_ccal.h @@ -24,21 +24,22 @@ static inline bool is_ccal_rvm(struct realm *realm) void config_realm_ccal(struct realm *realm); int realm_ccal_populate_region(struct kvm *kvm, phys_addr_t ipa_base, - phys_addr_t ipa_end, phys_addr_t *ipa_top, - u32 flags); + phys_addr_t ipa_end, phys_addr_t *ipa_top, + u32 flags); int realm_ccal_map_ram(struct kvm *kvm, - struct arm_rme_populate_realm *args); - -int ccal_create_rtt_levels(struct realm *realm, unsigned long ipa, int level, - int max_level); + struct arm_rme_populate_realm *args); void realm_ccal_destroy_data_range(struct kvm *kvm, unsigned long start, - unsigned long end); - -int ccal_fold_rtt_level(struct realm *realm, int level, unsigned long start, unsigned long end); -int ccal_fold_rtt(struct realm *realm, unsigned long addr, int level); +int realm_ccal_set_ipa_state(struct kvm_vcpu *vcpu, unsigned long start, + unsigned long end, unsigned long ripas, + unsigned long *top_ipa); +int get_start_level(struct realm *realm); + +int find_map_level(struct realm *realm, + unsigned long start, + unsigned long end); #endif diff --git a/arch/arm64/include/asm/rmi_cmds.h b/arch/arm64/include/asm/rmi_cmds.h index 8dc752ca9a74..c069a3cfeaf1 100644 --- a/arch/arm64/include/asm/rmi_cmds.h +++ b/arch/arm64/include/asm/rmi_cmds.h @@ -557,50 +557,13 @@ static inline int rmi_ccal_undelegate_io_range(unsigned long start_addr, return regs.a0; } -static inline int rmi_ccal_rtt_create(unsigned long rd, unsigned long ipa, - unsigned long level, unsigned long rtt1, - unsigned long rtt2) -{ - struct arm_smccc_1_2_regs regs = { - SMC_RMI_HISI_EXT, CCAL_RTT_PAIR_CREATE, - rd, rtt1, rtt2, ipa, level - }; - - arm_smccc_1_2_smc(®s, ®s); - - return regs.a0; -} - -static inline int rmi_ccal_rtt_fold(unsigned long rd, unsigned long ipa, - unsigned long level, - unsigned long *out_rtt1, - unsigned long *out_rtt2) -{ - struct arm_smccc_1_2_regs regs = { - SMC_RMI_HISI_EXT, CCAL_RTT_FOLD, - rd, ipa, level - }; - - arm_smccc_1_2_smc(®s, ®s); - - if (RMI_RETURN_STATUS(regs.a0) == RMI_SUCCESS) { - if (out_rtt1) - *out_rtt1 = regs.a1; - - if (out_rtt2) - *out_rtt2 = regs.a2; - } - - return regs.a0; -} - static inline int rmi_ccal_block_create(unsigned long rd, unsigned long data, unsigned long ipa, unsigned long src, - unsigned long flags) + unsigned long flags, unsigned long level) { struct arm_smccc_1_2_regs regs = { SMC_RMI_HISI_EXT, CCAL_BLOCK_DATA_CREATE_LVL2, - rd, data, ipa, src, flags + rd, data, ipa, src, flags, level }; arm_smccc_1_2_smc(®s, ®s); @@ -610,11 +573,11 @@ static inline int rmi_ccal_block_create(unsigned long rd, unsigned long data, static inline int rmi_ccal_block_create_unknown(unsigned long rd, unsigned long data, - unsigned long ipa) + unsigned long ipa, unsigned long level) { struct arm_smccc_1_2_regs regs = { SMC_RMI_HISI_EXT, CCAL_BLOCK_DATA_CREATE_UNKNOWN_LVL2, - rd, data, ipa + rd, data, ipa, level }; arm_smccc_1_2_smc(®s, ®s); diff --git a/arch/arm64/include/asm/rmi_smc.h b/arch/arm64/include/asm/rmi_smc.h index c125a2b79cef..7237f02c1425 100644 --- a/arch/arm64/include/asm/rmi_smc.h +++ b/arch/arm64/include/asm/rmi_smc.h @@ -52,9 +52,6 @@ enum hisi_ext_cmd { CCAL_UNDELEGATE_RANGE, CCAL_IO_DELEGATE_RANGE, CCAL_IO_UNDELEGATE_RANGE, - CCAL_RTT_PAIR_CREATE, - CCAL_RTT_DESTROY, - CCAL_RTT_FOLD, CCAL_BLOCK_DATA_CREATE_LVL2, CCAL_BLOCK_DATA_CREATE_UNKNOWN_LVL2, CCAL_DATA_DESTROY, diff --git a/arch/arm64/kvm/rme-ccal.c b/arch/arm64/kvm/rme-ccal.c index e86f61b7becd..a99fbfbb06b1 100644 --- a/arch/arm64/kvm/rme-ccal.c +++ b/arch/arm64/kvm/rme-ccal.c @@ -49,222 +49,100 @@ void config_realm_ccal(struct realm *realm) realm->is_ccal = true; } -static int ccal_alloc_delegated_rtt(phys_addr_t *phys) +int get_start_level(struct realm *realm) { - struct page *pages = alloc_pages(GFP_KERNEL, CCAL_RTT_PAGE_ORDER); - phys_addr_t pa; - - if (!pages) - return -ENOMEM; - - pa = page_to_phys(pages); - - if (rmi_ccal_delegate_range(pa, RMM_PAGE_SIZE * CCAL_RTT_PAGE_NUM)) { - __free_pages(pages, CCAL_RTT_PAGE_ORDER); - return -ENXIO; - } - - *phys = pa; - - return 0; -} - -static void ccal_free_delegated_rtt(phys_addr_t phys1, phys_addr_t phys2) -{ - if (!WARN_ON(rmi_granule_undelegate(phys1))) - free_page((unsigned long)phys_to_virt(phys1)); - - if (!WARN_ON(rmi_granule_undelegate(phys2))) - free_page((unsigned long)phys_to_virt(phys2)); -} - -int ccal_create_rtt_levels(struct realm *realm, unsigned long ipa, int level, - int max_level) -{ - unsigned long aligned_ipa; - phys_addr_t phys; - int ret; - - if (WARN_ON(level == max_level)) - return 0; - - while (level++ < max_level) { - if (ccal_alloc_delegated_rtt(&phys)) - return -ENOMEM; - - aligned_ipa = ALIGN_DOWN(ipa, rme_rtt_level_mapsize(level - 1)); - - ret = rmi_ccal_rtt_create(virt_to_phys(realm->rd), aligned_ipa, - level, phys, phys + RMM_PAGE_SIZE); - if (ret) { - ccal_free_delegated_rtt(phys, phys + RMM_PAGE_SIZE); - - if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT && - RMI_RETURN_INDEX(ret) == level - 1) { - /* The RTT already exists, continue. */ - continue; - } - - WARN(1, "Failed to create CCAL RTT at level %d: %d\n", - level, ret); - return -ENXIO; - } - } - - return 0; + return 4 - ((realm->ia_bits - 8) / (RMM_PAGE_SHIFT - 3)); } -/* - * Returns 0 on successful fold, a negative value on error, a positive value if - * we were not able to fold all tables at this level. - */ -int ccal_fold_rtt_level(struct realm *realm, int level, unsigned long start, - unsigned long end) +int find_map_level(struct realm *realm, + unsigned long start, + unsigned long end) { - int not_folded = 0; - ssize_t map_size; - unsigned long addr, next_addr; - - if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) - return -EINVAL; - - map_size = rme_rtt_level_mapsize(level - 1); + int level = RMM_RTT_MAX_LEVEL; - for (addr = start; addr < end; addr = next_addr) { - unsigned long protected_rtt, unprotected_rtt; - int ret; - - next_addr = ALIGN(addr + 1, map_size); - - ret = rmi_ccal_rtt_fold(virt_to_phys(realm->rd), addr, level, - &protected_rtt, &unprotected_rtt); + while (level > get_start_level(realm)) { + unsigned long map_size = rme_rtt_level_mapsize(level - 1); - switch (RMI_RETURN_STATUS(ret)) { - case RMI_SUCCESS: - ccal_free_delegated_rtt(protected_rtt, unprotected_rtt); - break; - case RMI_ERROR_RTT: - if (level == RMM_RTT_MAX_LEVEL || - RMI_RETURN_INDEX(ret) < level) { - not_folded++; - break; - } - /* Recurse a level deeper */ - ret = ccal_fold_rtt_level(realm, level + 1, addr, - next_addr); - if (ret < 0) - return ret; - else if (ret == 0) - /* Try again at this level */ - next_addr = addr; + if (!IS_ALIGNED(start, map_size) || + (start + map_size) > end) break; - default: - WARN_ON(1); - return -ENXIO; - } - } - - return not_folded; -} -int ccal_fold_rtt(struct realm *realm, unsigned long addr, int level) -{ - unsigned long protected_rtt, unprotected_rtt; - int ret; - - ret = rmi_ccal_rtt_fold(virt_to_phys(realm->rd), addr, level, - &protected_rtt, &unprotected_rtt); - if (ret) - return ret; - - ccal_free_delegated_rtt(protected_rtt, unprotected_rtt); + level--; + } - return 0; + return level; } -static int ccal_create_data_page(struct realm *realm, unsigned long ipa, - struct page *dst_page, struct page *src_page, - unsigned long flags) +static int ccal_create_data_page_unknown(struct realm *realm, unsigned long ipa, + struct page *page) { phys_addr_t rd = virt_to_phys(realm->rd); - phys_addr_t dst_phys, src_phys; - int ret; + phys_addr_t phys = page_to_phys(page); + int ret, offset; + + for (offset = 0; offset < PAGE_SIZE; offset += RMM_PAGE_SIZE) { + if (rmi_granule_delegate(phys)) { + /* + * It's likely we raced with another VCPU on the same + * fault. Assume the other VCPU has handled the fault + * and return to the guest. + */ + return 0; + } - copy_page(page_address(src_page), page_address(dst_page)); + ret = rmi_data_create_unknown(rd, phys, ipa); - dst_phys = page_to_phys(dst_page); - src_phys = page_to_phys(src_page); + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry */ + int level = RMI_RETURN_INDEX(ret); - if (rmi_granule_delegate(dst_phys)) - return -ENXIO; + WARN_ON(level == RMM_RTT_MAX_LEVEL); - ret = rmi_data_create(rd, dst_phys, ipa, src_phys, flags); + ret = realm_create_rtt_levels(realm, ipa, level, + RMM_RTT_MAX_LEVEL, + NULL); + if (ret) + goto err_undelegate; - if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { - /* Create missing RTTs and retry. */ - int err_level = RMI_RETURN_INDEX(ret); + ret = rmi_data_create_unknown(rd, phys, ipa); + } - ret = ccal_create_rtt_levels(realm, ipa, err_level, - RMM_RTT_MAX_LEVEL); - if (ret) - goto err; + if (WARN_ON(ret)) + goto err_undelegate; - ret = rmi_data_create(rd, dst_phys, ipa, src_phys, flags); + phys += RMM_PAGE_SIZE; + ipa += RMM_PAGE_SIZE; } - if (ret) - goto err; - return 0; -err: - if (WARN_ON(rmi_granule_undelegate(dst_phys))) { - /* Page can't be returned to NS world so is lost. */ - get_page(dst_page); - } - return -ENXIO; -} - -static int ccal_create_data_page_unknown(struct realm *realm, unsigned long ipa, - struct page *page) -{ - phys_addr_t rd = virt_to_phys(realm->rd); - phys_addr_t phys = page_to_phys(page); - int ret; - if (rmi_granule_delegate(phys)) { - /* Race with another thread. */ - return 0; +err_undelegate: + if (WARN_ON(rmi_granule_undelegate(phys))) { + /* Page can't be returned to NS world so is lost */ + get_page(phys_to_page(phys)); } - ret = rmi_data_create_unknown(rd, phys, ipa); - - if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { - /* Create missing RTTs and retry. */ - int err_level = RMI_RETURN_INDEX(ret); - - ret = ccal_create_rtt_levels(realm, ipa, err_level, - RMM_RTT_MAX_LEVEL); - if (ret) - goto err; + while (offset > 0) { + unsigned long data, top; - ret = rmi_data_create_unknown(rd, phys, ipa); - } + phys -= RMM_PAGE_SIZE; + offset -= RMM_PAGE_SIZE; + ipa -= RMM_PAGE_SIZE; - if (ret) - goto err; + WARN_ON(rmi_data_destroy(rd, ipa, &data, &top)); - return 0; -err: - if (WARN_ON(rmi_granule_undelegate(phys))) { - /* Page can't be returned to NS world so is lost. */ - get_page(phys_to_page(phys)); + if (WARN_ON(rmi_granule_undelegate(phys))) { + /* Page can't be returned to NS world so is lost */ + get_page(phys_to_page(phys)); + } } return -ENXIO; + } static int ccal_create_data_block(struct realm *realm, unsigned long ipa, - struct page **dst_pages, - struct page *tmp_block, unsigned long flags) + struct page **dst_pages, struct page *tmp_block, + unsigned long flags, unsigned long level) { phys_addr_t dst_phys, tmp_phys; int ret; @@ -279,18 +157,18 @@ static int ccal_create_data_block(struct realm *realm, unsigned long ipa, return -ENXIO; ret = rmi_ccal_block_create(virt_to_phys(realm->rd), dst_phys, ipa, - tmp_phys, flags); + tmp_phys, flags, level); if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { /* Create missing RTTs and retry. */ int err_level = RMI_RETURN_INDEX(ret); - ret = ccal_create_rtt_levels(realm, ipa, err_level, - RMM_RTT_BLOCK_LEVEL); + ret = realm_create_rtt_levels(realm, ipa, err_level, RMM_RTT_BLOCK_LEVEL, NULL); + if (ret) goto err_undelegate; ret = rmi_ccal_block_create(virt_to_phys(realm->rd), dst_phys, - ipa, tmp_phys, flags); + ipa, tmp_phys, flags, level); } if (ret) @@ -311,7 +189,7 @@ static int ccal_create_data_block(struct realm *realm, unsigned long ipa, static int ccal_create_data_block_unknown(struct realm *realm, struct page **dst_pages, - unsigned long ipa) + unsigned long ipa, unsigned long level) { phys_addr_t dst_phys; int ret; @@ -323,19 +201,16 @@ static int ccal_create_data_block_unknown(struct realm *realm, return 0; } - ret = rmi_ccal_block_create_unknown(virt_to_phys(realm->rd), dst_phys, - ipa); + ret = rmi_ccal_block_create_unknown(virt_to_phys(realm->rd), dst_phys, ipa, level); if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { /* Create missing RTTs and retry. */ int err_level = RMI_RETURN_INDEX(ret); - ret = ccal_create_rtt_levels(realm, ipa, err_level, - RMM_RTT_BLOCK_LEVEL); + ret = realm_create_rtt_levels(realm, ipa, err_level, RMM_RTT_BLOCK_LEVEL, NULL); if (ret) goto err_undelegate; - ret = rmi_ccal_block_create_unknown(virt_to_phys(realm->rd), - dst_phys, ipa); + ret = rmi_ccal_block_create_unknown(virt_to_phys(realm->rd), dst_phys, ipa, level); } if (ret) goto err_undelegate; @@ -411,6 +286,11 @@ int realm_ccal_populate_region(struct kvm *kvm, phys_addr_t ipa_base, } pages = kmalloc(CCAL_RTT_ENTRY_NUM * sizeof(*pages), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out_srcu; + } + nr_pinned = pin_user_pages_fast(hva, nr_pages, FOLL_WRITE, pages); if (nr_pinned != nr_pages) { ret = -EFAULT; @@ -436,16 +316,15 @@ int realm_ccal_populate_region(struct kvm *kvm, phys_addr_t ipa_base, if (block_map) { ret = ccal_create_data_block(realm, ipa_base, pages, tmp_pages, - data_flags); + data_flags, RMM_RTT_BLOCK_LEVEL); if (ALIGN(ipa_base, RMM_L1_BLOCK_SIZE) == (ipa_base + RMM_L2_BLOCK_SIZE)) - ccal_fold_rtt(realm, - ALIGN_DOWN(ipa_base, RMM_L1_BLOCK_SIZE), - RMM_RTT_BLOCK_LEVEL); + fold_rtt(realm, ALIGN_DOWN(ipa_base, RMM_L1_BLOCK_SIZE), + RMM_RTT_BLOCK_LEVEL); } else { for (int i = 0; i < nr_pinned; i++) { - ret = ccal_create_data_page(realm, ipa_base, pages[i], - tmp_pages, data_flags); + ret = realm_create_protected_data_page(realm, ipa_base, + pages[i], tmp_pages, data_flags); if (ret) break; ipa_base += RMM_PAGE_SIZE; @@ -502,8 +381,13 @@ static int ccal_map_range(struct kvm *kvm, unsigned long ipa_base, goto out_srcu; } - hva = gfn_to_hva_memslot(memslot, gpa_to_gfn(ipa_base)); pages = kmalloc(CCAL_RTT_ENTRY_NUM * sizeof(*pages), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out_srcu; + } + + hva = gfn_to_hva_memslot(memslot, gpa_to_gfn(ipa_base)); nr_pinned = pin_user_pages_fast(hva, nr_pages, FOLL_WRITE, pages); if (nr_pinned != nr_pages) { ret = -EFAULT; @@ -517,12 +401,12 @@ static int ccal_map_range(struct kvm *kvm, unsigned long ipa_base, block_map = false; if (block_map) { - ret = ccal_create_data_block_unknown(realm, pages, ipa_base); + ret = ccal_create_data_block_unknown(realm, pages, ipa_base, + RMM_RTT_BLOCK_LEVEL); if (ALIGN(ipa_base, RMM_L1_BLOCK_SIZE) == (ipa_base + RMM_L2_BLOCK_SIZE)) - ccal_fold_rtt(realm, - ALIGN_DOWN(ipa_base, RMM_L1_BLOCK_SIZE), - RMM_RTT_BLOCK_LEVEL); + fold_rtt(realm, ALIGN_DOWN(ipa_base, RMM_L1_BLOCK_SIZE), + RMM_RTT_BLOCK_LEVEL); } else { for (int i = 0; i < nr_pinned; i++) { ret = ccal_create_data_page_unknown(realm, ipa_base, @@ -627,4 +511,68 @@ void realm_ccal_destroy_data_range(struct kvm *kvm, unsigned long start, break; cond_resched_rwlock_write(&kvm->mmu_lock); } -} \ No newline at end of file +} + +static int ccal_rtt_complement(struct realm *realm, unsigned long ipa, + int walk_level, int level) +{ + int protected_level = walk_level & 0xF; + int unprotected_level = (walk_level >> 4) & 0xF; + int ret = 0; + + level = max(max(protected_level, unprotected_level), level); + if (protected_level < level) + ret = realm_create_rtt_levels(realm, ipa, protected_level, + level, NULL); + + if (ret) + return ret; + + ipa = (1UL << (realm->ia_bits - 1)) | ipa; + if (unprotected_level < level) + ret = realm_create_rtt_levels(realm, ipa, unprotected_level, + level, NULL); + + return ret; +} + +int realm_ccal_set_ipa_state(struct kvm_vcpu *vcpu, unsigned long start, + unsigned long end, unsigned long ripas, + unsigned long *top_ipa) +{ + struct kvm *kvm = vcpu->kvm; + struct realm *realm = &kvm->arch.realm; + struct realm_rec *rec = vcpu->arch.rec; + phys_addr_t rd_phys = virt_to_phys(realm->rd); + phys_addr_t rec_phys = virt_to_phys(rec->rec_page); + unsigned long ipa = start; + int ret = 0; + + while (ipa < end) { + unsigned long next; + + ret = rmi_rtt_set_ripas(rd_phys, rec_phys, ipa, end, &next); + + if (RMI_RETURN_STATUS(ret) == RMI_SUCCESS) { + ipa = next; + } else if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + int walk_level = RMI_RETURN_INDEX(ret); + int level = find_map_level(realm, ipa, end); + + ret = ccal_rtt_complement(realm, ipa, walk_level, level); + + if (ret) + break; + /* Retry with RTTs created */ + } else { + WARN(1, "Unexpected error in %s: %#x\n", __func__, + ret); + ret = -ENXIO; + break; + } + } + + *top_ipa = ipa; + + return ret; +} diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c index a7f6a2a2ecb9..15b2714adc64 100644 --- a/arch/arm64/kvm/rme.c +++ b/arch/arm64/kvm/rme.c @@ -122,30 +122,6 @@ u64 kvm_realm_reset_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) return val; } -static int get_start_level(struct realm *realm) -{ - return 4 - ((realm->ia_bits - 8) / (RMM_PAGE_SHIFT - 3)); -} - -static int find_map_level(struct realm *realm, - unsigned long start, - unsigned long end) -{ - int level = RMM_RTT_MAX_LEVEL; - - while (level > get_start_level(realm)) { - unsigned long map_size = rme_rtt_level_mapsize(level - 1); - - if (!IS_ALIGNED(start, map_size) || - (start + map_size) > end) - break; - - level--; - } - - return level; -} - static phys_addr_t alloc_delegated_granule(struct kvm_mmu_memory_cache *mc) { phys_addr_t phys; @@ -560,15 +536,12 @@ static int realm_rtt_destroy(struct realm *realm, unsigned long addr, return ret; } -static int realm_create_rtt_levels(struct realm *realm, +int realm_create_rtt_levels(struct realm *realm, unsigned long ipa, int level, int max_level, struct kvm_mmu_memory_cache *mc) { - if (is_ccal_rvm(realm)) - return ccal_create_rtt_levels(realm, ipa, level, max_level); - if (level == max_level) return 0; @@ -769,7 +742,7 @@ static int realm_create_protected_data_granule(struct realm *realm, return 0; } -static int realm_create_protected_data_page(struct realm *realm, +int realm_create_protected_data_page(struct realm *realm, unsigned long ipa, struct page *dst_page, struct page *src_page, @@ -829,7 +802,7 @@ static int realm_create_protected_data_page(struct realm *realm, return -ENXIO; } -static int fold_rtt(struct realm *realm, unsigned long addr, int level) +int fold_rtt(struct realm *realm, unsigned long addr, int level) { phys_addr_t rtt_addr; int ret; @@ -843,6 +816,7 @@ static int fold_rtt(struct realm *realm, unsigned long addr, int level) return 0; } + int realm_map_protected(struct realm *realm, unsigned long ipa, kvm_pfn_t pfn, @@ -1491,7 +1465,11 @@ static void kvm_complete_ripas_change(struct kvm_vcpu *vcpu) kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_cache, kvm_mmu_cache_min_pages(kvm)); write_lock(&kvm->mmu_lock); - ret = realm_set_ipa_state(vcpu, base, top, ripas, &top_ipa); + if (is_ccal_rvm(&kvm->arch.realm)) { + ret = realm_ccal_set_ipa_state(vcpu, base, top, ripas, &top_ipa); + } else { + ret = realm_set_ipa_state(vcpu, base, top, ripas, &top_ipa); + } write_unlock(&kvm->mmu_lock); if (WARN_RATELIMIT(ret && ret != -ENOMEM, -- Gitee