From 29f0270143f3d3387ae5bdad1584f935e3a52989 Mon Sep 17 00:00:00 2001 From: carmincol Date: Fri, 8 Dec 2023 10:45:20 +0800 Subject: [PATCH] Init Change-Id: I17de0ac0d2108623be9c2a1d641cc555b4435dcb --- include/v8-platform.h | 5 +++++ src/base/page-allocator.cc | 13 ++++++++----- src/base/page-allocator.h | 3 +++ src/base/platform/platform-posix.cc | 4 ++-- src/utils/allocation.cc | 4 +--- src/wasm/wasm-code-manager.cc | 2 +- 6 files changed, 20 insertions(+), 11 deletions(-) diff --git a/include/v8-platform.h b/include/v8-platform.h index 788122955..dd85cf4fe 100644 --- a/include/v8-platform.h +++ b/include/v8-platform.h @@ -375,6 +375,11 @@ class PageAllocator { */ virtual size_t CommitPageSize() = 0; + /** + * Get Cookie which generated when PageAllocator is Initialized, work as + * identifier for current isolate. + */ + virtual unsigned long Cookie() { return 0; } /** * Sets the random seed so that GetRandomMmapAddr() will generate repeatable * sequences of random mmap addresses. diff --git a/src/base/page-allocator.cc b/src/base/page-allocator.cc index 2956bf147..7066880dc 100644 --- a/src/base/page-allocator.cc +++ b/src/base/page-allocator.cc @@ -1,7 +1,7 @@ // Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. - +#include #include "src/base/page-allocator.h" #include "src/base/platform/platform.h" @@ -33,7 +33,8 @@ STATIC_ASSERT_ENUM(PageAllocator::kNoAccessWillJitLater, PageAllocator::PageAllocator() : allocate_page_size_(base::OS::AllocatePageSize()), - commit_page_size_(base::OS::CommitPageSize()) {} + commit_page_size_(base::OS::CommitPageSize()), + cookie_((unsigned int) time(0)) {} void PageAllocator::SetRandomMmapSeed(int64_t seed) { base::OS::SetRandomMmapSeed(seed); @@ -49,9 +50,11 @@ void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, // kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular // kNoAccess on other platforms, so code doesn't have to handle both enum // values. - if (access == PageAllocator::kNoAccessWillJitLater) { - access = PageAllocator::kNoAccess; - } + + // Comment out following code for not bypassing JIT in harmony os + // if (access == PageAllocator::kNoAccessWillJitLater) { + // access = PageAllocator::kNoAccess; + // } #endif return base::OS::Allocate(hint, size, alignment, static_cast(access)); diff --git a/src/base/page-allocator.h b/src/base/page-allocator.h index 7374c6783..f46c973c8 100644 --- a/src/base/page-allocator.h +++ b/src/base/page-allocator.h @@ -26,6 +26,8 @@ class V8_BASE_EXPORT PageAllocator size_t CommitPageSize() override { return commit_page_size_; } + unsigned long Cookie() override { return cookie_; } + void SetRandomMmapSeed(int64_t seed) override; void* GetRandomMmapAddr() override; @@ -56,6 +58,7 @@ class V8_BASE_EXPORT PageAllocator const size_t allocate_page_size_; const size_t commit_page_size_; + const size_t cookie_; }; } // namespace base diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc index 651a538cd..c4c049b73 100644 --- a/src/base/platform/platform-posix.cc +++ b/src/base/platform/platform-posix.cc @@ -153,7 +153,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, flags |= MAP_LAZY; #endif // V8_OS_QNX } -#if V8_OS_MACOSX +// #if V8_OS_MACOSX // MAP_JIT is required to obtain writable and executable pages when the // hardened runtime/memory protection is enabled, which is optional (via code // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also @@ -161,7 +161,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, if (access == OS::MemoryPermission::kNoAccessWillJitLater) { flags |= MAP_JIT; } -#endif // V8_OS_MACOSX +// #endif // V8_OS_MACOSX return flags; } diff --git a/src/utils/allocation.cc b/src/utils/allocation.cc index 033cdc32f..667f13d01 100644 --- a/src/utils/allocation.cc +++ b/src/utils/allocation.cc @@ -200,9 +200,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size, DCHECK_NOT_NULL(page_allocator); DCHECK_EQ(hint, AlignedAddress(hint, alignment)); DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); - if (FLAG_randomize_all_allocations) { - hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment); - } + hint = reinterpret_cast(page_allocator->Cookie()); void* result = nullptr; for (int i = 0; i < kAllocationTries; ++i) { result = page_allocator->AllocatePages(hint, size, alignment, access); diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc index ba18fddf6..dd4e80170 100644 --- a/src/wasm/wasm-code-manager.cc +++ b/src/wasm/wasm-code-manager.cc @@ -1965,7 +1965,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { DCHECK_GT(size, 0); size_t allocate_page_size = page_allocator->AllocatePageSize(); size = RoundUp(size, allocate_page_size); - if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); + if (hint == nullptr) hint = reinterpret_cast(page_allocator->Cookie()); // When we start exposing Wasm in jitless mode, then the jitless flag // will have to determine whether we set kMapAsJittable or not. -- Gitee