diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc index dc1f185985..f66a9e3a27 100644 --- a/src/code-stubs-hydrogen.cc +++ b/src/code-stubs-hydrogen.cc @@ -1194,9 +1194,14 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( Label install_optimized; HValue* first_context_slot = Add(optimized_map, HObjectAccess::ForFirstContextSlot()); + HValue* first_osr_ast_slot = Add(optimized_map, + HObjectAccess::ForFirstOsrAstIdSlot()); + HValue* osr_ast_id_none = Add(BailoutId::None().ToInt()); IfBuilder already_in(this); already_in.If(native_context, first_context_slot); + already_in.AndIf(first_osr_ast_slot, + osr_ast_id_none); already_in.Then(); { HValue* code_object = Add(optimized_map, @@ -1213,7 +1218,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( shared_function_entry_length); HValue* array_length = Add(optimized_map, HObjectAccess::ForFixedArrayLength()); - HValue* key = loop_builder.BeginBody(array_length, + HValue* slot_iterator = loop_builder.BeginBody(array_length, graph()->GetConstant0(), Token::GT); { @@ -1222,8 +1227,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( HValue* second_entry_index = Add(SharedFunctionInfo::kSecondEntryIndex); IfBuilder restore_check(this); - restore_check.If(key, second_entry_index, - Token::EQ); + restore_check.If( + slot_iterator, second_entry_index, Token::EQ); restore_check.Then(); { // Store the unoptimized code @@ -1232,20 +1237,29 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( } restore_check.Else(); { - HValue* keyed_minus = AddUncasted( - key, shared_function_entry_length); - HInstruction* keyed_lookup = Add(optimized_map, - keyed_minus, static_cast(NULL), FAST_ELEMENTS); + STATIC_ASSERT(SharedFunctionInfo::kContextOffset == 0); + STATIC_ASSERT(SharedFunctionInfo::kEntryLength - + SharedFunctionInfo::kOsrAstIdOffset == 1); + HValue* native_context_slot = AddUncasted( + slot_iterator, shared_function_entry_length); + HValue* osr_ast_id_slot = AddUncasted( + slot_iterator, graph()->GetConstant1()); + HInstruction* native_context_entry = Add(optimized_map, + native_context_slot, static_cast(NULL), FAST_ELEMENTS); + HInstruction* osr_ast_id_entry = Add(optimized_map, + osr_ast_id_slot, static_cast(NULL), FAST_ELEMENTS); IfBuilder done_check(this); done_check.If(native_context, - keyed_lookup); + native_context_entry); + done_check.AndIf(osr_ast_id_entry, + osr_ast_id_none); done_check.Then(); { // Hit: fetch the optimized code. - HValue* keyed_plus = AddUncasted( - keyed_minus, graph()->GetConstant1()); + HValue* code_slot = AddUncasted( + native_context_slot, graph()->GetConstant1()); HValue* code_object = Add(optimized_map, - keyed_plus, static_cast(NULL), FAST_ELEMENTS); + code_slot, static_cast(NULL), FAST_ELEMENTS); BuildInstallOptimizedCode(js_function, native_context, code_object); // Fall out of the loop diff --git a/src/compiler.cc b/src/compiler.cc index 82bec65d85..65bccf2122 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -1028,16 +1028,20 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, } -static Handle GetCodeFromOptimizedCodeMap(Handle function) { +static Handle GetCodeFromOptimizedCodeMap(Handle function, + BailoutId osr_ast_id) { if (FLAG_cache_optimized_code) { Handle shared(function->shared()); DisallowHeapAllocation no_gc; int index = shared->SearchOptimizedCodeMap( - function->context()->native_context()); + function->context()->native_context(), osr_ast_id); if (index > 0) { if (FLAG_trace_opt) { PrintF("[found optimized code for "); function->ShortPrint(); + if (!osr_ast_id.IsNone()) { + PrintF(" at OSR AST id %d", osr_ast_id.ToInt()); + } PrintF("]\n"); } FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index); @@ -1053,14 +1057,14 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) { Handle code = info->code(); if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do. - // Cache non-OSR optimized code. - if (FLAG_cache_optimized_code && !info->is_osr()) { + // Cache optimized code. + if (FLAG_cache_optimized_code) { Handle function = info->closure(); Handle shared(function->shared()); Handle literals(function->literals()); Handle native_context(function->context()->native_context()); SharedFunctionInfo::AddToOptimizedCodeMap( - shared, native_context, code, literals); + shared, native_context, code, literals, info->osr_ast_id()); } } @@ -1137,10 +1141,8 @@ Handle Compiler::GetOptimizedCode(Handle function, Handle current_code, ConcurrencyMode mode, BailoutId osr_ast_id) { - if (osr_ast_id.IsNone()) { // No cache for OSR. - Handle cached_code = GetCodeFromOptimizedCodeMap(function); - if (!cached_code.is_null()) return cached_code; - } + Handle cached_code = GetCodeFromOptimizedCodeMap(function, osr_ast_id); + if (!cached_code.is_null()) return cached_code; SmartPointer info(new CompilationInfoWithZone(function)); Isolate* isolate = info->isolate(); @@ -1210,7 +1212,7 @@ Handle Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) { Compiler::RecordFunctionCompilation( Logger::LAZY_COMPILE_TAG, info.get(), shared); if (info->shared_info()->SearchOptimizedCodeMap( - info->context()->native_context()) == -1) { + info->context()->native_context(), info->osr_ast_id()) == -1) { InsertCodeIntoOptimizedCodeMap(info.get()); } diff --git a/src/factory.cc b/src/factory.cc index 1a70f6c601..9f1f085fc7 100644 --- a/src/factory.cc +++ b/src/factory.cc @@ -924,7 +924,8 @@ Handle Factory::NewFunctionFromSharedFunctionInfo( result->set_context(*context); - int index = function_info->SearchOptimizedCodeMap(context->native_context()); + int index = function_info->SearchOptimizedCodeMap(context->native_context(), + BailoutId::None()); if (!function_info->bound() && index < 0) { int number_of_literals = function_info->num_literals(); Handle literals = NewFixedArray(number_of_literals, pretenure); diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h index 1e270ab2c5..25ac2df121 100644 --- a/src/hydrogen-instructions.h +++ b/src/hydrogen-instructions.h @@ -6028,6 +6028,10 @@ class HObjectAccess V8_FINAL { return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot); } + static HObjectAccess ForFirstOsrAstIdSlot() { + return HObjectAccess(kInobject, SharedFunctionInfo::kFirstOsrAstIdSlot); + } + static HObjectAccess ForOptimizedCodeMap() { return HObjectAccess(kInobject, SharedFunctionInfo::kOptimizedCodeMapOffset); diff --git a/src/mark-compact.cc b/src/mark-compact.cc index 0a6793503b..0594c0792a 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -1065,55 +1065,40 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() { void CodeFlusher::ProcessOptimizedCodeMaps() { - static const int kEntriesStart = SharedFunctionInfo::kEntriesStart; - static const int kEntryLength = SharedFunctionInfo::kEntryLength; - static const int kContextOffset = 0; - static const int kCodeOffset = 1; - static const int kLiteralsOffset = 2; - STATIC_ASSERT(kEntryLength == 3); + STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4); SharedFunctionInfo* holder = optimized_code_map_holder_head_; SharedFunctionInfo* next_holder; + while (holder != NULL) { next_holder = GetNextCodeMap(holder); ClearNextCodeMap(holder); FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); - int new_length = kEntriesStart; + int new_length = SharedFunctionInfo::kEntriesStart; int old_length = code_map->length(); - for (int i = kEntriesStart; i < old_length; i += kEntryLength) { - Code* code = Code::cast(code_map->get(i + kCodeOffset)); - MarkBit code_mark = Marking::MarkBitFrom(code); - if (!code_mark.Get()) { - continue; + for (int i = SharedFunctionInfo::kEntriesStart; + i < old_length; + i += SharedFunctionInfo::kEntryLength) { + Code* code = + Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset)); + if (!Marking::MarkBitFrom(code).Get()) continue; + + // Move every slot in the entry. + for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) { + int dst_index = new_length++; + Object** slot = code_map->RawFieldOfElementAt(dst_index); + Object* object = code_map->get(i + j); + code_map->set(dst_index, object); + if (j == SharedFunctionInfo::kOsrAstIdOffset) { + ASSERT(object->IsSmi()); + } else { + ASSERT(Marking::IsBlack( + Marking::MarkBitFrom(HeapObject::cast(*slot)))); + isolate_->heap()->mark_compact_collector()-> + RecordSlot(slot, slot, *slot); + } } - - // Update and record the context slot in the optimized code map. - Object** context_slot = HeapObject::RawField(code_map, - FixedArray::OffsetOfElementAt(new_length)); - code_map->set(new_length++, code_map->get(i + kContextOffset)); - ASSERT(Marking::IsBlack( - Marking::MarkBitFrom(HeapObject::cast(*context_slot)))); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(context_slot, context_slot, *context_slot); - - // Update and record the code slot in the optimized code map. - Object** code_slot = HeapObject::RawField(code_map, - FixedArray::OffsetOfElementAt(new_length)); - code_map->set(new_length++, code_map->get(i + kCodeOffset)); - ASSERT(Marking::IsBlack( - Marking::MarkBitFrom(HeapObject::cast(*code_slot)))); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(code_slot, code_slot, *code_slot); - - // Update and record the literals slot in the optimized code map. - Object** literals_slot = HeapObject::RawField(code_map, - FixedArray::OffsetOfElementAt(new_length)); - code_map->set(new_length++, code_map->get(i + kLiteralsOffset)); - ASSERT(Marking::IsBlack( - Marking::MarkBitFrom(HeapObject::cast(*literals_slot)))); - isolate_->heap()->mark_compact_collector()-> - RecordSlot(literals_slot, literals_slot, *literals_slot); } // Trim the optimized code map if entries have been removed. @@ -2608,9 +2593,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { cached_map, SKIP_WRITE_BARRIER); } - Object** slot = - HeapObject::RawField(prototype_transitions, - FixedArray::OffsetOfElementAt(proto_index)); + Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index); RecordSlot(slot, slot, prototype); new_number_of_transitions++; } @@ -2715,12 +2698,10 @@ void MarkCompactCollector::ProcessWeakCollections() { for (int i = 0; i < table->Capacity(); i++) { if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { Object** key_slot = - HeapObject::RawField(table, FixedArray::OffsetOfElementAt( - ObjectHashTable::EntryToIndex(i))); + table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i)); RecordSlot(anchor, key_slot, *key_slot); Object** value_slot = - HeapObject::RawField(table, FixedArray::OffsetOfElementAt( - ObjectHashTable::EntryToValueIndex(i))); + table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i)); MarkCompactMarkingVisitor::MarkObjectByPointer( this, anchor, value_slot); } diff --git a/src/objects-inl.h b/src/objects-inl.h index 185f9d0803..5ad0453a18 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -2364,9 +2364,7 @@ void Map::LookupTransition(JSObject* holder, Object** DescriptorArray::GetKeySlot(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); - return HeapObject::RawField( - reinterpret_cast(this), - OffsetOfElementAt(ToKeyIndex(descriptor_number))); + return RawFieldOfElementAt(ToKeyIndex(descriptor_number)); } @@ -2421,9 +2419,7 @@ void DescriptorArray::InitializeRepresentations(Representation representation) { Object** DescriptorArray::GetValueSlot(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); - return HeapObject::RawField( - reinterpret_cast(this), - OffsetOfElementAt(ToValueIndex(descriptor_number))); + return RawFieldOfElementAt(ToValueIndex(descriptor_number)); } @@ -3224,7 +3220,7 @@ void JSFunctionResultCache::MakeZeroSize() { void JSFunctionResultCache::Clear() { int cache_size = size(); - Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex)); + Object** entries_start = RawFieldOfElementAt(kEntriesIndex); MemsetPointer(entries_start, GetHeap()->the_hole_value(), cache_size - kEntriesIndex); @@ -3830,8 +3826,7 @@ Object* DependentCode::object_at(int i) { Object** DependentCode::slot_at(int i) { - return HeapObject::RawField( - this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i)); + return RawFieldOfElementAt(kCodesStartIndex + i); } diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h index 1a68344b26..a7fc84f24d 100644 --- a/src/objects-visiting-inl.h +++ b/src/objects-visiting-inl.h @@ -331,8 +331,7 @@ void StaticMarkingVisitor::VisitNativeContext( for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) { - Object** slot = - HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx)); + Object** slot = Context::cast(object)->RawFieldOfElementAt(idx); collector->RecordSlot(slot, slot, *slot); } } diff --git a/src/objects.cc b/src/objects.cc index b9dcaca3b8..0e2f4c1cbe 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -9526,42 +9526,48 @@ void SharedFunctionInfo::AddToOptimizedCodeMap( Handle shared, Handle native_context, Handle code, - Handle literals) { + Handle literals, + BailoutId osr_ast_id) { CALL_HEAP_FUNCTION_VOID( shared->GetIsolate(), - shared->AddToOptimizedCodeMap(*native_context, *code, *literals)); + shared->AddToOptimizedCodeMap( + *native_context, *code, *literals, osr_ast_id)); } MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context, Code* code, - FixedArray* literals) { + FixedArray* literals, + BailoutId osr_ast_id) { ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); ASSERT(native_context->IsNativeContext()); - STATIC_ASSERT(kEntryLength == 3); + STATIC_ASSERT(kEntryLength == 4); Heap* heap = GetHeap(); FixedArray* new_code_map; Object* value = optimized_code_map(); + Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt()); if (value->IsSmi()) { // No optimized code map. ASSERT_EQ(0, Smi::cast(value)->value()); // Create 3 entries per context {context, code, literals}. MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength); if (!maybe->To(&new_code_map)) return maybe; - new_code_map->set(kEntriesStart + 0, native_context); - new_code_map->set(kEntriesStart + 1, code); - new_code_map->set(kEntriesStart + 2, literals); + new_code_map->set(kEntriesStart + kContextOffset, native_context); + new_code_map->set(kEntriesStart + kCachedCodeOffset, code); + new_code_map->set(kEntriesStart + kLiteralsOffset, literals); + new_code_map->set(kEntriesStart + kOsrAstIdOffset, osr_ast_id_smi); } else { // Copy old map and append one new entry. FixedArray* old_code_map = FixedArray::cast(value); - ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context)); + ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context, osr_ast_id)); int old_length = old_code_map->length(); int new_length = old_length + kEntryLength; MaybeObject* maybe = old_code_map->CopySize(new_length); if (!maybe->To(&new_code_map)) return maybe; - new_code_map->set(old_length + 0, native_context); - new_code_map->set(old_length + 1, code); - new_code_map->set(old_length + 2, literals); + new_code_map->set(old_length + kContextOffset, native_context); + new_code_map->set(old_length + kCachedCodeOffset, code); + new_code_map->set(old_length + kLiteralsOffset, literals); + new_code_map->set(old_length + kOsrAstIdOffset, osr_ast_id_smi); // Zap the old map for the sake of the heap verifier. if (Heap::ShouldZapGarbage()) { Object** data = old_code_map->data_start(); @@ -9570,11 +9576,12 @@ MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context, } #ifdef DEBUG for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) { - ASSERT(new_code_map->get(i)->IsNativeContext()); - ASSERT(new_code_map->get(i + 1)->IsCode()); - ASSERT(Code::cast(new_code_map->get(i + 1))->kind() == + ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext()); + ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode()); + ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() == Code::OPTIMIZED_FUNCTION); - ASSERT(new_code_map->get(i + 2)->IsFixedArray()); + ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray()); + ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi()); } #endif set_optimized_code_map(new_code_map); @@ -9594,7 +9601,6 @@ FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) { } - Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) { ASSERT(index > kEntriesStart); FixedArray* code_map = FixedArray::cast(optimized_code_map()); @@ -9639,9 +9645,14 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code, } } while (i < (code_map->length() - kEntryLength)) { - code_map->set(i, code_map->get(i + kEntryLength)); - code_map->set(i + 1, code_map->get(i + 1 + kEntryLength)); - code_map->set(i + 2, code_map->get(i + 2 + kEntryLength)); + code_map->set(i + kContextOffset, + code_map->get(i + kContextOffset + kEntryLength)); + code_map->set(i + kCachedCodeOffset, + code_map->get(i + kCachedCodeOffset + kEntryLength)); + code_map->set(i + kLiteralsOffset, + code_map->get(i + kLiteralsOffset + kEntryLength)); + code_map->set(i + kOsrAstIdOffset, + code_map->get(i + kOsrAstIdOffset + kEntryLength)); i += kEntryLength; } if (removed_entry) { @@ -10203,16 +10214,19 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() { } -int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) { +int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context, + BailoutId osr_ast_id) { ASSERT(native_context->IsNativeContext()); if (!FLAG_cache_optimized_code) return -1; Object* value = optimized_code_map(); if (!value->IsSmi()) { FixedArray* optimized_code_map = FixedArray::cast(value); int length = optimized_code_map->length(); + Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt()); for (int i = kEntriesStart; i < length; i += kEntryLength) { - if (optimized_code_map->get(i) == native_context) { - return i + 1; + if (optimized_code_map->get(i + kContextOffset) == native_context && + optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) { + return i + kCachedCodeOffset; } } if (FLAG_trace_opt) { diff --git a/src/objects.h b/src/objects.h index a24f30ec69..7a557e910b 100644 --- a/src/objects.h +++ b/src/objects.h @@ -2957,6 +2957,11 @@ class FixedArray: public FixedArrayBase { // Code Generation support. static int OffsetOfElementAt(int index) { return SizeFor(index); } + // Garbage collection support. + Object** RawFieldOfElementAt(int index) { + return HeapObject::RawField(this, OffsetOfElementAt(index)); + } + // Casting. static inline FixedArray* cast(Object* obj); @@ -6536,10 +6541,10 @@ class SharedFunctionInfo: public HeapObject { // and a shared literals array or Smi(0) if none. DECL_ACCESSORS(optimized_code_map, Object) - // Returns index i of the entry with the specified context. At position - // i - 1 is the context, position i the code, and i + 1 the literals array. - // Returns -1 when no matching entry is found. - int SearchOptimizedCodeMap(Context* native_context); + // Returns index i of the entry with the specified context and OSR entry. + // At position i - 1 is the context, position i the code, and i + 1 the + // literals array. Returns -1 when no matching entry is found. + int SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id); // Installs optimized code from the code map on the given closure. The // index has to be consistent with a search result as defined above. @@ -6559,18 +6564,28 @@ class SharedFunctionInfo: public HeapObject { // Add a new entry to the optimized code map. MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context, Code* code, - FixedArray* literals); + FixedArray* literals, + BailoutId osr_ast_id); static void AddToOptimizedCodeMap(Handle shared, Handle native_context, Handle code, - Handle literals); + Handle literals, + BailoutId osr_ast_id); // Layout description of the optimized code map. static const int kNextMapIndex = 0; static const int kEntriesStart = 1; - static const int kEntryLength = 3; - static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize; - static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize; + static const int kContextOffset = 0; + static const int kCachedCodeOffset = 1; + static const int kLiteralsOffset = 2; + static const int kOsrAstIdOffset = 3; + static const int kEntryLength = 4; + static const int kFirstContextSlot = FixedArray::kHeaderSize + + (kEntriesStart + kContextOffset) * kPointerSize; + static const int kFirstCodeSlot = FixedArray::kHeaderSize + + (kEntriesStart + kCachedCodeOffset) * kPointerSize; + static const int kFirstOsrAstIdSlot = FixedArray::kHeaderSize + + (kEntriesStart + kOsrAstIdOffset) * kPointerSize; static const int kSecondEntryIndex = kEntryLength + kEntriesStart; static const int kInitialLength = kEntriesStart + kEntryLength; diff --git a/src/transitions-inl.h b/src/transitions-inl.h index c4825fcf73..7d8608b050 100644 --- a/src/transitions-inl.h +++ b/src/transitions-inl.h @@ -115,9 +115,7 @@ Object** TransitionArray::GetPrototypeTransitionsSlot() { Object** TransitionArray::GetKeySlot(int transition_number) { ASSERT(!IsSimpleTransition()); ASSERT(transition_number < number_of_transitions()); - return HeapObject::RawField( - reinterpret_cast(this), - OffsetOfElementAt(ToKeyIndex(transition_number))); + return RawFieldOfElementAt(ToKeyIndex(transition_number)); }