Refactor OSROptimizedCodeCache
Tweak a few names, remove a few GetIsolate calls, other minor usability refactors. It may be worth taking a closer look at the impl in the future, currently the design choices don't seem ideal (see the added TODO on top of the class). Bug: v8:12161 Change-Id: Ib34e372aa58a30c68c9c5cdd0d1da0ec3e86717c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3560447 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Commit-Queue: Jakob Linke <jgruber@chromium.org> Cr-Commit-Position: refs/heads/main@{#79687}
This commit is contained in:
parent
dc9b48e406
commit
d368dcf4ae
@ -903,9 +903,8 @@ class OptimizedCodeCache : public AllStatic {
|
||||
CodeT code;
|
||||
if (IsOSR(osr_offset)) {
|
||||
// For OSR, check the OSR optimized code cache.
|
||||
code = function->native_context()
|
||||
.GetOSROptimizedCodeCache()
|
||||
.GetOptimizedCode(shared, osr_offset, isolate);
|
||||
code = function->native_context().osr_code_cache().TryGet(
|
||||
shared, osr_offset, isolate);
|
||||
} else {
|
||||
// Non-OSR code may be cached on the feedback vector.
|
||||
if (function->has_feedback_vector()) {
|
||||
@ -943,8 +942,8 @@ class OptimizedCodeCache : public AllStatic {
|
||||
DCHECK(CodeKindCanOSR(kind));
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
osr_offset);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
osr_offset);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -377,8 +377,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
|
||||
isolate->heap()->InvalidateCodeDeoptimizationData(code);
|
||||
}
|
||||
|
||||
native_context.GetOSROptimizedCodeCache().EvictMarkedCode(
|
||||
native_context.GetIsolate());
|
||||
native_context.osr_code_cache().EvictDeoptimizedCode(isolate);
|
||||
}
|
||||
|
||||
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
||||
@ -393,7 +392,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
||||
while (!context.IsUndefined(isolate)) {
|
||||
NativeContext native_context = NativeContext::cast(context);
|
||||
MarkAllCodeForContext(native_context);
|
||||
OSROptimizedCodeCache::Clear(native_context);
|
||||
OSROptimizedCodeCache::Clear(isolate, native_context);
|
||||
DeoptimizeMarkedCodeForContext(native_context);
|
||||
context = native_context.next_context_link();
|
||||
}
|
||||
@ -452,7 +451,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
|
||||
// pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove
|
||||
// this call from here.
|
||||
OSROptimizedCodeCache::Compact(
|
||||
Handle<NativeContext>(function.native_context(), isolate));
|
||||
isolate, Handle<NativeContext>(function.native_context(), isolate));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,8 +156,7 @@ bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame,
|
||||
BytecodeArray bytecode = frame->GetBytecodeArray();
|
||||
const int bytecode_offset = frame->GetBytecodeOffset();
|
||||
if (V8_UNLIKELY(function.shared().osr_code_cache_state() != kNotCached)) {
|
||||
OSROptimizedCodeCache cache =
|
||||
function.native_context().GetOSROptimizedCodeCache();
|
||||
OSROptimizedCodeCache cache = function.native_context().osr_code_cache();
|
||||
interpreter::BytecodeArrayIterator iterator(
|
||||
handle(bytecode, frame->isolate()));
|
||||
for (int jump_offset : cache.GetBytecodeOffsetsFromSFI(function.shared())) {
|
||||
|
@ -1184,7 +1184,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
|
||||
context.set_math_random_index(Smi::zero());
|
||||
context.set_serialized_objects(*empty_fixed_array());
|
||||
context.set_microtask_queue(isolate(), nullptr);
|
||||
context.set_osr_code_cache(*empty_weak_fixed_array());
|
||||
context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate()));
|
||||
context.set_retained_maps(*empty_weak_array_list());
|
||||
return handle(context, isolate());
|
||||
}
|
||||
|
@ -296,10 +296,6 @@ ScriptContextTable NativeContext::synchronized_script_context_table() const {
|
||||
get(SCRIPT_CONTEXT_TABLE_INDEX, kAcquireLoad));
|
||||
}
|
||||
|
||||
OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() {
|
||||
return OSROptimizedCodeCache::cast(osr_code_cache());
|
||||
}
|
||||
|
||||
void NativeContext::SetOptimizedCodeListHead(Object head) {
|
||||
set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ enum ContextLookupFlags {
|
||||
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
|
||||
V(WRAPPED_FUNCTION_MAP_INDEX, Map, wrapped_function_map) \
|
||||
V(RETAINED_MAPS, Object, retained_maps) \
|
||||
V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache)
|
||||
V(OSR_CODE_CACHE_INDEX, OSROptimizedCodeCache, osr_code_cache)
|
||||
|
||||
#include "torque-generated/src/objects/contexts-tq.inc"
|
||||
|
||||
@ -777,8 +777,6 @@ class NativeContext : public Context {
|
||||
inline void SetDeoptimizedCodeListHead(Object head);
|
||||
inline Object DeoptimizedCodeListHead();
|
||||
|
||||
inline OSROptimizedCodeCache GetOSROptimizedCodeCache();
|
||||
|
||||
void ResetErrorsThrown();
|
||||
void IncrementErrorsThrown();
|
||||
int GetErrorsThrown();
|
||||
|
@ -12,22 +12,28 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
const int OSROptimizedCodeCache::kInitialLength;
|
||||
const int OSROptimizedCodeCache::kMaxLength;
|
||||
// static
|
||||
Handle<OSROptimizedCodeCache> OSROptimizedCodeCache::Empty(Isolate* isolate) {
|
||||
return Handle<OSROptimizedCodeCache>::cast(
|
||||
isolate->factory()->empty_weak_fixed_array());
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::AddOptimizedCode(
|
||||
Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared,
|
||||
Handle<CodeT> code, BytecodeOffset osr_offset) {
|
||||
// static
|
||||
void OSROptimizedCodeCache::Insert(Isolate* isolate,
|
||||
Handle<NativeContext> native_context,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
Handle<CodeT> code,
|
||||
BytecodeOffset osr_offset) {
|
||||
DCHECK(!osr_offset.IsNone());
|
||||
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
|
||||
STATIC_ASSERT(kEntryLength == 3);
|
||||
Isolate* isolate = native_context->GetIsolate();
|
||||
DCHECK(!isolate->serializer_enabled());
|
||||
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
|
||||
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
|
||||
DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1);
|
||||
|
||||
STATIC_ASSERT(kEntryLength == 3);
|
||||
int entry = -1;
|
||||
for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
|
||||
if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
|
||||
@ -37,28 +43,31 @@ void OSROptimizedCodeCache::AddOptimizedCode(
|
||||
}
|
||||
}
|
||||
|
||||
if (entry == -1 && osr_cache->length() + kEntryLength <= kMaxLength) {
|
||||
entry = GrowOSRCache(native_context, &osr_cache);
|
||||
} else if (entry == -1) {
|
||||
// We reached max capacity and cannot grow further. Reuse an existing entry.
|
||||
// TODO(mythria): We could use better mechanisms (like lru) to replace
|
||||
// existing entries. Though we don't expect this to be a common case, so
|
||||
// for now choosing to replace the first entry.
|
||||
entry = 0;
|
||||
if (entry == -1) {
|
||||
if (osr_cache->length() + kEntryLength <= kMaxLength) {
|
||||
entry = GrowOSRCache(isolate, native_context, &osr_cache);
|
||||
} else {
|
||||
// We reached max capacity and cannot grow further. Reuse an existing
|
||||
// entry.
|
||||
// TODO(mythria): We could use better mechanisms (like lru) to replace
|
||||
// existing entries. Though we don't expect this to be a common case, so
|
||||
// for now choosing to replace the first entry.
|
||||
entry = 0;
|
||||
}
|
||||
}
|
||||
|
||||
osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::Clear(NativeContext native_context) {
|
||||
native_context.set_osr_code_cache(
|
||||
*native_context.GetIsolate()->factory()->empty_weak_fixed_array());
|
||||
void OSROptimizedCodeCache::Clear(Isolate* isolate,
|
||||
NativeContext native_context) {
|
||||
native_context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate));
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), native_context->GetIsolate());
|
||||
Isolate* isolate = native_context->GetIsolate();
|
||||
void OSROptimizedCodeCache::Compact(Isolate* isolate,
|
||||
Handle<NativeContext> native_context) {
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
|
||||
// Re-adjust the cache so all the valid entries are on one side. This will
|
||||
// enable us to compress the cache if needed.
|
||||
@ -83,29 +92,31 @@ void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
|
||||
DCHECK_LT(new_osr_cache->length(), osr_cache->length());
|
||||
{
|
||||
DisallowGarbageCollection no_gc;
|
||||
new_osr_cache->CopyElements(native_context->GetIsolate(), 0, *osr_cache, 0,
|
||||
new_osr_cache->CopyElements(isolate, 0, *osr_cache, 0,
|
||||
new_osr_cache->length(),
|
||||
new_osr_cache->GetWriteBarrierMode(no_gc));
|
||||
}
|
||||
native_context->set_osr_code_cache(*new_osr_cache);
|
||||
}
|
||||
|
||||
CodeT OSROptimizedCodeCache::GetOptimizedCode(SharedFunctionInfo shared,
|
||||
BytecodeOffset osr_offset,
|
||||
Isolate* isolate) {
|
||||
CodeT OSROptimizedCodeCache::TryGet(SharedFunctionInfo shared,
|
||||
BytecodeOffset osr_offset,
|
||||
Isolate* isolate) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
int index = FindEntry(shared, osr_offset);
|
||||
if (index == -1) return CodeT();
|
||||
if (index == -1) return {};
|
||||
|
||||
CodeT code = GetCodeFromEntry(index);
|
||||
if (code.is_null()) {
|
||||
ClearEntry(index, isolate);
|
||||
return CodeT();
|
||||
return {};
|
||||
}
|
||||
|
||||
DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
|
||||
return code;
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
|
||||
void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) {
|
||||
// This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
|
||||
// and hence the DisallowGarbageCollection scope here.
|
||||
DisallowGarbageCollection no_gc;
|
||||
@ -135,9 +146,8 @@ std::vector<int> OSROptimizedCodeCache::GetBytecodeOffsetsFromSFI(
|
||||
}
|
||||
|
||||
int OSROptimizedCodeCache::GrowOSRCache(
|
||||
Handle<NativeContext> native_context,
|
||||
Isolate* isolate, Handle<NativeContext> native_context,
|
||||
Handle<OSROptimizedCodeCache>* osr_cache) {
|
||||
Isolate* isolate = native_context->GetIsolate();
|
||||
int old_length = (*osr_cache)->length();
|
||||
int grow_by = CapacityForLength(old_length) - old_length;
|
||||
DCHECK_GT(grow_by, kEntryLength);
|
||||
@ -256,5 +266,13 @@ bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
|
||||
return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
|
||||
}
|
||||
|
||||
MaybeObject OSROptimizedCodeCache::RawGetForTesting(int index) const {
|
||||
return WeakFixedArray::Get(index);
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::RawSetForTesting(int index, MaybeObject value) {
|
||||
WeakFixedArray::Set(index, value);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
|
||||
|
||||
#include "src/objects/fixed-array.h"
|
||||
|
||||
// Has to be the last include (doesn't have include guards):
|
||||
#include "src/objects/object-macros.h"
|
||||
|
||||
@ -21,10 +22,55 @@ enum OSRCodeCacheStateOfSFI : uint8_t {
|
||||
kCachedMultiple, // Very unlikely state, multiple entries.
|
||||
};
|
||||
|
||||
// TODO(jgruber): There are a few issues with the current implementation:
|
||||
//
|
||||
// - The cache is a flat list, thus any search operation is O(N). This resulted
|
||||
// in optimization attempts, see OSRCodeCacheStateOfSFI.
|
||||
// - We always iterate up to `length` (== capacity).
|
||||
// - We essentially reimplement WeakArrayList, i.e. growth and shrink logic.
|
||||
// - On overflow, new entries always pick slot 0.
|
||||
//
|
||||
// There are a few alternatives:
|
||||
//
|
||||
// 1) we could reuse WeakArrayList logic (but then we'd still have to
|
||||
// implement custom compaction due to our entry tuple structure).
|
||||
// 2) we could reuse CompilationCacheTable (but then we lose weakness and have
|
||||
// to deal with aging).
|
||||
// 3) we could try to base on a weak HashTable variant (EphemeronHashTable?).
|
||||
class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
|
||||
public:
|
||||
DECL_CAST(OSROptimizedCodeCache)
|
||||
|
||||
static Handle<OSROptimizedCodeCache> Empty(Isolate* isolate);
|
||||
|
||||
// Caches the optimized code |code| corresponding to the shared function
|
||||
// |shared| and bailout id |osr_offset| in the OSROptimized code cache.
|
||||
// If the OSR code cache wasn't created before it creates a code cache with
|
||||
// kOSRCodeCacheInitialLength entries.
|
||||
static void Insert(Isolate* isolate, Handle<NativeContext> context,
|
||||
Handle<SharedFunctionInfo> shared, Handle<CodeT> code,
|
||||
BytecodeOffset osr_offset);
|
||||
|
||||
// Returns the code corresponding to the shared function |shared| and
|
||||
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
|
||||
// object otherwise.
|
||||
CodeT TryGet(SharedFunctionInfo shared, BytecodeOffset osr_offset,
|
||||
Isolate* isolate);
|
||||
|
||||
// Remove all code objects marked for deoptimization from OSR code cache.
|
||||
void EvictDeoptimizedCode(Isolate* isolate);
|
||||
|
||||
// Reduces the size of the OSR code cache if the number of valid entries are
|
||||
// less than the current capacity of the cache.
|
||||
static void Compact(Isolate* isolate, Handle<NativeContext> context);
|
||||
|
||||
// Sets the OSR optimized code cache to an empty array.
|
||||
static void Clear(Isolate* isolate, NativeContext context);
|
||||
|
||||
// Returns vector of bytecode offsets corresponding to the shared function
|
||||
// |shared|
|
||||
std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared);
|
||||
|
||||
enum OSRCodeCacheConstants {
|
||||
kSharedOffset,
|
||||
kCachedCodeOffset,
|
||||
@ -32,40 +78,23 @@ class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
|
||||
kEntryLength
|
||||
};
|
||||
|
||||
static const int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
|
||||
static const int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
|
||||
static constexpr int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
|
||||
static constexpr int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
|
||||
|
||||
// Caches the optimized code |code| corresponding to the shared function
|
||||
// |shared| and bailout id |osr_offset| in the OSROptimized code cache.
|
||||
// If the OSR code cache wasn't created before it creates a code cache with
|
||||
// kOSRCodeCacheInitialLength entries.
|
||||
static void AddOptimizedCode(Handle<NativeContext> context,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
Handle<CodeT> code, BytecodeOffset osr_offset);
|
||||
// Reduces the size of the OSR code cache if the number of valid entries are
|
||||
// less than the current capacity of the cache.
|
||||
static void Compact(Handle<NativeContext> context);
|
||||
// Sets the OSR optimized code cache to an empty array.
|
||||
static void Clear(NativeContext context);
|
||||
|
||||
// Returns the code corresponding to the shared function |shared| and
|
||||
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
|
||||
// object otherwise.
|
||||
CodeT GetOptimizedCode(SharedFunctionInfo shared, BytecodeOffset osr_offset,
|
||||
Isolate* isolate);
|
||||
|
||||
// Remove all code objects marked for deoptimization from OSR code cache.
|
||||
void EvictMarkedCode(Isolate* isolate);
|
||||
|
||||
// Returns vector of bytecode offsets corresponding to the shared function
|
||||
// |shared|
|
||||
std::vector<int> GetBytecodeOffsetsFromSFI(SharedFunctionInfo shared);
|
||||
// For osr-code-cache-unittest.cc.
|
||||
MaybeObject RawGetForTesting(int index) const;
|
||||
void RawSetForTesting(int index, MaybeObject value);
|
||||
|
||||
private:
|
||||
// Hide raw accessors to avoid terminology confusion.
|
||||
using WeakFixedArray::Get;
|
||||
using WeakFixedArray::Set;
|
||||
|
||||
// Functions that implement heuristics on when to grow / shrink the cache.
|
||||
static int CapacityForLength(int curr_capacity);
|
||||
static bool NeedsTrimming(int num_valid_entries, int curr_capacity);
|
||||
static int GrowOSRCache(Handle<NativeContext> native_context,
|
||||
static int GrowOSRCache(Isolate* isolate,
|
||||
Handle<NativeContext> native_context,
|
||||
Handle<OSROptimizedCodeCache>* osr_cache);
|
||||
|
||||
// Helper functions to get individual items from an entry in the cache.
|
||||
|
@ -52,23 +52,24 @@ TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
BytecodeOffset bailout_id(1);
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
bailout_id);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
bailout_id);
|
||||
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength);
|
||||
|
||||
HeapObject sfi_entry;
|
||||
osr_cache->Get(OSROptimizedCodeCache::kSharedOffset)
|
||||
osr_cache->RawGetForTesting(OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&sfi_entry);
|
||||
EXPECT_EQ(sfi_entry, *shared);
|
||||
HeapObject code_entry;
|
||||
osr_cache->Get(OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
osr_cache->RawGetForTesting(OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&code_entry);
|
||||
EXPECT_EQ(code_entry, *code);
|
||||
Smi osr_offset_entry;
|
||||
osr_cache->Get(OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&osr_offset_entry);
|
||||
osr_cache->RawGetForTesting(OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&osr_offset_entry);
|
||||
EXPECT_EQ(osr_offset_entry.value(), bailout_id.ToInt());
|
||||
}
|
||||
|
||||
@ -87,30 +88,30 @@ TEST_F(TestWithNativeContext, GrowCodeCache) {
|
||||
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength);
|
||||
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength * 2);
|
||||
|
||||
int index = kInitialLength;
|
||||
HeapObject sfi_entry;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&sfi_entry);
|
||||
EXPECT_EQ(sfi_entry, *shared);
|
||||
HeapObject code_entry;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&code_entry);
|
||||
EXPECT_EQ(code_entry, *code);
|
||||
Smi osr_offset_entry;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&osr_offset_entry);
|
||||
EXPECT_EQ(osr_offset_entry.value(), bailout_id);
|
||||
}
|
||||
@ -130,8 +131,8 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
|
||||
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
|
||||
base::ScopedVector<char> source1(1024);
|
||||
@ -139,26 +140,22 @@ TEST_F(TestWithNativeContext, FindCachedEntry) {
|
||||
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
|
||||
Handle<CodeT> code1(function1->code(), isolate);
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
EXPECT_EQ(osr_cache->GetOptimizedCode(*shared, BytecodeOffset(0), isolate),
|
||||
*code);
|
||||
EXPECT_EQ(osr_cache->GetOptimizedCode(*shared1, BytecodeOffset(bailout_id),
|
||||
isolate),
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->TryGet(*shared, BytecodeOffset(0), isolate), *code);
|
||||
EXPECT_EQ(osr_cache->TryGet(*shared1, BytecodeOffset(bailout_id), isolate),
|
||||
*code1);
|
||||
|
||||
RunJS("%DeoptimizeFunction(f1)");
|
||||
EXPECT_TRUE(
|
||||
osr_cache->GetOptimizedCode(*shared1, BytecodeOffset(bailout_id), isolate)
|
||||
.is_null());
|
||||
|
||||
osr_cache->Set(OSROptimizedCodeCache::kCachedCodeOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
EXPECT_TRUE(osr_cache->GetOptimizedCode(*shared, BytecodeOffset(0), isolate)
|
||||
EXPECT_TRUE(osr_cache->TryGet(*shared1, BytecodeOffset(bailout_id), isolate)
|
||||
.is_null());
|
||||
|
||||
osr_cache->RawSetForTesting(OSROptimizedCodeCache::kCachedCodeOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
EXPECT_TRUE(osr_cache->TryGet(*shared, BytecodeOffset(0), isolate).is_null());
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, MaxCapacityCache) {
|
||||
@ -177,11 +174,11 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
|
||||
int bailout_id = 0;
|
||||
// Add max_capacity - 1 entries.
|
||||
for (bailout_id = 0; bailout_id < kMaxEntries - 1; bailout_id++) {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kMaxLength);
|
||||
|
||||
// Add an entry to reach max capacity.
|
||||
@ -190,22 +187,23 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
|
||||
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
|
||||
Handle<CodeT> code1(function1->code(), isolate);
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kMaxLength);
|
||||
|
||||
int index = (kMaxEntries - 1) * OSROptimizedCodeCache::kEntryLength;
|
||||
HeapObject object;
|
||||
Smi smi;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared1);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code1);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
|
||||
// Add an entry beyond max capacity.
|
||||
@ -215,20 +213,21 @@ TEST_F(TestWithNativeContext, MaxCapacityCache) {
|
||||
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
|
||||
Handle<CodeT> code2(function2->code(), isolate);
|
||||
bailout_id++;
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared2, code2,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kMaxLength);
|
||||
|
||||
index = 0;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared2);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code2);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
}
|
||||
|
||||
@ -249,41 +248,44 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
|
||||
int expected_length = kInitialLength * 2;
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
int clear_index1 = 0;
|
||||
int clear_index2 = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
|
||||
osr_cache->Set(clear_index1 + OSROptimizedCodeCache::kSharedOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
osr_cache->Set(clear_index2 + OSROptimizedCodeCache::kCachedCodeOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
osr_cache->RawSetForTesting(
|
||||
clear_index1 + OSROptimizedCodeCache::kSharedOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
osr_cache->RawSetForTesting(
|
||||
clear_index2 + OSROptimizedCodeCache::kCachedCodeOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
|
||||
base::ScopedVector<char> source1(1024);
|
||||
GetSource(&source1, 1);
|
||||
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
|
||||
Handle<CodeT> code1(function1->code(), isolate);
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
int index = clear_index1;
|
||||
HeapObject object;
|
||||
Smi smi;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared1);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code1);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
|
||||
base::ScopedVector<char> source2(1024);
|
||||
@ -292,20 +294,21 @@ TEST_F(TestWithNativeContext, ReuseClearedEntry) {
|
||||
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
|
||||
Handle<CodeT> code2(function2->code(), isolate);
|
||||
bailout_id++;
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared2, code2,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared2, code2,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
index = clear_index2;
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared2);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code2);
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->ToSmi(&smi);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
}
|
||||
|
||||
@ -335,37 +338,45 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesNoCompact) {
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
|
||||
if (bailout_id == deopt_id1 || bailout_id == deopt_id2) {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(
|
||||
native_context, deopt_shared, deopt_code, BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, deopt_shared,
|
||||
deopt_code, BytecodeOffset(bailout_id));
|
||||
} else {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
RunJS("%DeoptimizeFunction(f1)");
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
int index = (num_entries - 2) * OSROptimizedCodeCache::kEntryLength;
|
||||
EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->IsCleared());
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache
|
||||
->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->IsCleared());
|
||||
|
||||
index = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
|
||||
EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(osr_cache->Get(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache->Get(index + OSROptimizedCodeCache::kOsrIdOffset)->IsCleared());
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache
|
||||
->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->IsCleared());
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
|
||||
@ -392,20 +403,20 @@ TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
|
||||
if (bailout_id % 2 == 0) {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(
|
||||
native_context, deopt_shared, deopt_code, BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, deopt_shared,
|
||||
deopt_code, BytecodeOffset(bailout_id));
|
||||
} else {
|
||||
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
RunJS("%DeoptimizeFunction(f1)");
|
||||
osr_cache = Handle<OSROptimizedCodeCache>(
|
||||
native_context->GetOSROptimizedCodeCache(), isolate);
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user