diff --git a/include/v8-platform.h b/include/v8-platform.h index 788122955fda9188a50e97b81a4432aebeb87371..dd85cf4feb09232961bb250be3b359da52bcf2d2 100644 --- a/include/v8-platform.h +++ b/include/v8-platform.h @@ -375,6 +375,11 @@ class PageAllocator { */ virtual size_t CommitPageSize() = 0; + /** + * Get Cookie which generated when PageAllocator is Initialized, work as + * identifier for current isolate. + */ + virtual unsigned long Cookie() { return 0; } /** * Sets the random seed so that GetRandomMmapAddr() will generate repeatable * sequences of random mmap addresses. diff --git a/src/base/page-allocator.cc b/src/base/page-allocator.cc index 2956bf1475519760149feadcda2caf29812f22f8..7066880dc3f6140993260a054ea3815d2bd2d8d7 100644 --- a/src/base/page-allocator.cc +++ b/src/base/page-allocator.cc @@ -1,7 +1,7 @@ // Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. - +#include #include "src/base/page-allocator.h" #include "src/base/platform/platform.h" @@ -33,7 +33,8 @@ STATIC_ASSERT_ENUM(PageAllocator::kNoAccessWillJitLater, PageAllocator::PageAllocator() : allocate_page_size_(base::OS::AllocatePageSize()), - commit_page_size_(base::OS::CommitPageSize()) {} + commit_page_size_(base::OS::CommitPageSize()), + cookie_((unsigned int) time(0)) {} void PageAllocator::SetRandomMmapSeed(int64_t seed) { base::OS::SetRandomMmapSeed(seed); @@ -49,9 +50,11 @@ void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, // kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular // kNoAccess on other platforms, so code doesn't have to handle both enum // values. - if (access == PageAllocator::kNoAccessWillJitLater) { - access = PageAllocator::kNoAccess; - } + + // Comment out following code for not bypassing JIT in harmony os + // if (access == PageAllocator::kNoAccessWillJitLater) { + // access = PageAllocator::kNoAccess; + // } #endif return base::OS::Allocate(hint, size, alignment, static_cast(access)); diff --git a/src/base/page-allocator.h b/src/base/page-allocator.h index 7374c67837717adebb3df03c45a585caa6ad605c..f46c973c89f9794810eac3c44a018eebefa8fb63 100644 --- a/src/base/page-allocator.h +++ b/src/base/page-allocator.h @@ -26,6 +26,8 @@ class V8_BASE_EXPORT PageAllocator size_t CommitPageSize() override { return commit_page_size_; } + unsigned long Cookie() override { return cookie_; } + void SetRandomMmapSeed(int64_t seed) override; void* GetRandomMmapAddr() override; @@ -56,6 +58,7 @@ class V8_BASE_EXPORT PageAllocator const size_t allocate_page_size_; const size_t commit_page_size_; + const size_t cookie_; }; } // namespace base diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc index 651a538cd08916697e8d80173a8641cd0f67d186..c4c049b73114386bf6d4178347a3c38f2d4419f1 100644 --- a/src/base/platform/platform-posix.cc +++ b/src/base/platform/platform-posix.cc @@ -153,7 +153,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, flags |= MAP_LAZY; #endif // V8_OS_QNX } -#if V8_OS_MACOSX +// #if V8_OS_MACOSX // MAP_JIT is required to obtain writable and executable pages when the // hardened runtime/memory protection is enabled, which is optional (via code // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also @@ -161,7 +161,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, if (access == OS::MemoryPermission::kNoAccessWillJitLater) { flags |= MAP_JIT; } -#endif // V8_OS_MACOSX +// #endif // V8_OS_MACOSX return flags; } diff --git a/src/utils/allocation.cc b/src/utils/allocation.cc index 033cdc32f0d29f1a261265918e5e0e6de956dba7..667f13d01e80de7ff50747446f12d8b6289296e3 100644 --- a/src/utils/allocation.cc +++ b/src/utils/allocation.cc @@ -200,9 +200,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size, DCHECK_NOT_NULL(page_allocator); DCHECK_EQ(hint, AlignedAddress(hint, alignment)); DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); - if (FLAG_randomize_all_allocations) { - hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment); - } + hint = reinterpret_cast(page_allocator->Cookie()); void* result = nullptr; for (int i = 0; i < kAllocationTries; ++i) { result = page_allocator->AllocatePages(hint, size, alignment, access); diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc index ba18fddf6cb30cde9e83c5e61bfb11902c295020..dd4e80170bca350c4fdef0dbdce5eee6ce6c64df 100644 --- a/src/wasm/wasm-code-manager.cc +++ b/src/wasm/wasm-code-manager.cc @@ -1965,7 +1965,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { DCHECK_GT(size, 0); size_t allocate_page_size = page_allocator->AllocatePageSize(); size = RoundUp(size, allocate_page_size); - if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); + if (hint == nullptr) hint = reinterpret_cast(page_allocator->Cookie()); // When we start exposing Wasm in jitless mode, then the jitless flag // will have to determine whether we set kMapAsJittable or not.