// Copyright 2016 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/code-stub-assembler.h" #include "src/code-factory.h" namespace v8 { namespace internal { using compiler::Node; CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, Code::Flags flags, const char* name, size_t result_size) : compiler::CodeAssembler(isolate, zone, descriptor, flags, name, result_size) {} CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count, Code::Flags flags, const char* name) : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {} Node* CodeStubAssembler::BooleanMapConstant() { return HeapConstant(isolate()->factory()->boolean_map()); } Node* CodeStubAssembler::EmptyStringConstant() { return LoadRoot(Heap::kempty_stringRootIndex); } Node* CodeStubAssembler::HeapNumberMapConstant() { return HeapConstant(isolate()->factory()->heap_number_map()); } Node* CodeStubAssembler::NoContextConstant() { return SmiConstant(Smi::FromInt(0)); } Node* CodeStubAssembler::NullConstant() { return LoadRoot(Heap::kNullValueRootIndex); } Node* CodeStubAssembler::UndefinedConstant() { return LoadRoot(Heap::kUndefinedValueRootIndex); } Node* CodeStubAssembler::Float64Round(Node* x) { Node* one = Float64Constant(1.0); Node* one_half = Float64Constant(0.5); Variable var_x(this, MachineRepresentation::kFloat64); Label return_x(this); // Round up {x} towards Infinity. var_x.Bind(Float64Ceil(x)); GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x), &return_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_x); Bind(&return_x); return var_x.value(); } Node* CodeStubAssembler::Float64Ceil(Node* x) { if (IsFloat64RoundUpSupported()) { return Float64RoundUp(x); } Node* one = Float64Constant(1.0); Node* zero = Float64Constant(0.0); Node* two_52 = Float64Constant(4503599627370496.0E0); Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); Variable var_x(this, MachineRepresentation::kFloat64); Label return_x(this), return_minus_x(this); var_x.Bind(x); // Check if {x} is greater than zero. Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, &if_xnotgreaterthanzero); Bind(&if_xgreaterthanzero); { // Just return {x} unless it's in the range ]0,2^52[. GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); // Round positive {x} towards Infinity. var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); GotoUnless(Float64LessThan(var_x.value(), x), &return_x); var_x.Bind(Float64Add(var_x.value(), one)); Goto(&return_x); } Bind(&if_xnotgreaterthanzero); { // Just return {x} unless it's in the range ]-2^52,0[ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); GotoUnless(Float64LessThan(x, zero), &return_x); // Round negated {x} towards Infinity and return the result negated. Node* minus_x = Float64Neg(x); var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_minus_x); } Bind(&return_minus_x); var_x.Bind(Float64Neg(var_x.value())); Goto(&return_x); Bind(&return_x); return var_x.value(); } Node* CodeStubAssembler::Float64Floor(Node* x) { if (IsFloat64RoundDownSupported()) { return Float64RoundDown(x); } Node* one = Float64Constant(1.0); Node* zero = Float64Constant(0.0); Node* two_52 = Float64Constant(4503599627370496.0E0); Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); Variable var_x(this, MachineRepresentation::kFloat64); Label return_x(this), return_minus_x(this); var_x.Bind(x); // Check if {x} is greater than zero. Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, &if_xnotgreaterthanzero); Bind(&if_xgreaterthanzero); { // Just return {x} unless it's in the range ]0,2^52[. GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); // Round positive {x} towards -Infinity. var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_x); } Bind(&if_xnotgreaterthanzero); { // Just return {x} unless it's in the range ]-2^52,0[ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); GotoUnless(Float64LessThan(x, zero), &return_x); // Round negated {x} towards -Infinity and return the result negated. Node* minus_x = Float64Neg(x); var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x); var_x.Bind(Float64Add(var_x.value(), one)); Goto(&return_minus_x); } Bind(&return_minus_x); var_x.Bind(Float64Neg(var_x.value())); Goto(&return_x); Bind(&return_x); return var_x.value(); } Node* CodeStubAssembler::Float64Trunc(Node* x) { if (IsFloat64RoundTruncateSupported()) { return Float64RoundTruncate(x); } Node* one = Float64Constant(1.0); Node* zero = Float64Constant(0.0); Node* two_52 = Float64Constant(4503599627370496.0E0); Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); Variable var_x(this, MachineRepresentation::kFloat64); Label return_x(this), return_minus_x(this); var_x.Bind(x); // Check if {x} is greater than 0. Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, &if_xnotgreaterthanzero); Bind(&if_xgreaterthanzero); { if (IsFloat64RoundDownSupported()) { var_x.Bind(Float64RoundDown(x)); } else { // Just return {x} unless it's in the range ]0,2^52[. GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); // Round positive {x} towards -Infinity. var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x); var_x.Bind(Float64Sub(var_x.value(), one)); } Goto(&return_x); } Bind(&if_xnotgreaterthanzero); { if (IsFloat64RoundUpSupported()) { var_x.Bind(Float64RoundUp(x)); Goto(&return_x); } else { // Just return {x} unless its in the range ]-2^52,0[. GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); GotoUnless(Float64LessThan(x, zero), &return_x); // Round negated {x} towards -Infinity and return result negated. Node* minus_x = Float64Neg(x); var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); var_x.Bind(Float64Sub(var_x.value(), one)); Goto(&return_minus_x); } } Bind(&return_minus_x); var_x.Bind(Float64Neg(var_x.value())); Goto(&return_x); Bind(&return_x); return var_x.value(); } Node* CodeStubAssembler::SmiFromWord32(Node* value) { if (Is64()) { value = ChangeInt32ToInt64(value); } return WordShl(value, SmiShiftBitsConstant()); } Node* CodeStubAssembler::SmiTag(Node* value) { return WordShl(value, SmiShiftBitsConstant()); } Node* CodeStubAssembler::SmiUntag(Node* value) { return WordSar(value, SmiShiftBitsConstant()); } Node* CodeStubAssembler::SmiToWord32(Node* value) { Node* result = WordSar(value, SmiShiftBitsConstant()); if (Is64()) { result = TruncateInt64ToInt32(result); } return result; } Node* CodeStubAssembler::SmiToFloat64(Node* value) { return ChangeInt32ToFloat64(SmiUntag(value)); } Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); } Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) { return IntPtrAddWithOverflow(a, b); } Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); } Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) { return IntPtrSubWithOverflow(a, b); } Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); } Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) { return UintPtrGreaterThanOrEqual(a, b); } Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) { return IntPtrLessThan(a, b); } Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) { return IntPtrLessThanOrEqual(a, b); } Node* CodeStubAssembler::SmiMin(Node* a, Node* b) { // TODO(bmeurer): Consider using Select once available. Variable min(this, MachineRepresentation::kTagged); Label if_a(this), if_b(this), join(this); BranchIfSmiLessThan(a, b, &if_a, &if_b); Bind(&if_a); min.Bind(a); Goto(&join); Bind(&if_b); min.Bind(b); Goto(&join); Bind(&join); return min.value(); } Node* CodeStubAssembler::WordIsSmi(Node* a) { return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0)); } Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) { return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)), IntPtrConstant(0)); } Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags, Node* top_address, Node* limit_address) { Node* top = Load(MachineType::Pointer(), top_address); Node* limit = Load(MachineType::Pointer(), limit_address); // If there's not enough space, call the runtime. Variable result(this, MachineRepresentation::kTagged); Label runtime_call(this, Label::kDeferred), no_runtime_call(this); Label merge_runtime(this, &result); Node* new_top = IntPtrAdd(top, size_in_bytes); Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, &no_runtime_call); Bind(&runtime_call); // AllocateInTargetSpace does not use the context. Node* context = IntPtrConstant(0); Node* runtime_flags = SmiTag(Int32Constant( AllocateDoubleAlignFlag::encode(false) | AllocateTargetSpace::encode(flags & kPretenured ? AllocationSpace::OLD_SPACE : AllocationSpace::NEW_SPACE))); Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context, SmiTag(size_in_bytes), runtime_flags); result.Bind(runtime_result); Goto(&merge_runtime); // When there is enough space, return `top' and bump it up. Bind(&no_runtime_call); Node* no_runtime_result = top; StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, new_top); no_runtime_result = BitcastWordToTagged( IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag))); result.Bind(no_runtime_result); Goto(&merge_runtime); Bind(&merge_runtime); return result.value(); } Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags, Node* top_address, Node* limit_address) { Node* top = Load(MachineType::Pointer(), top_address); Node* limit = Load(MachineType::Pointer(), limit_address); Variable adjusted_size(this, MachineType::PointerRepresentation()); adjusted_size.Bind(size_in_bytes); if (flags & kDoubleAlignment) { // TODO(epertoso): Simd128 alignment. Label aligned(this), not_aligned(this), merge(this, &adjusted_size); Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), ¬_aligned, &aligned); Bind(¬_aligned); Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize)); adjusted_size.Bind(not_aligned_size); Goto(&merge); Bind(&aligned); Goto(&merge); Bind(&merge); } Variable address(this, MachineRepresentation::kTagged); address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit)); Label needs_filler(this), doesnt_need_filler(this), merge_address(this, &address); Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &doesnt_need_filler, &needs_filler); Bind(&needs_filler); // Store a filler and increase the address by kPointerSize. // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change // it when Simd128 alignment is supported. StoreNoWriteBarrier(MachineType::PointerRepresentation(), top, LoadRoot(Heap::kOnePointerFillerMapRootIndex)); address.Bind(BitcastWordToTagged( IntPtrAdd(address.value(), IntPtrConstant(kPointerSize)))); Goto(&merge_address); Bind(&doesnt_need_filler); Goto(&merge_address); Bind(&merge_address); // Update the top. StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, IntPtrAdd(top, adjusted_size.value())); return address.value(); } Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) { bool const new_space = !(flags & kPretenured); Node* top_address = ExternalConstant( new_space ? ExternalReference::new_space_allocation_top_address(isolate()) : ExternalReference::old_space_allocation_top_address(isolate())); Node* limit_address = ExternalConstant( new_space ? ExternalReference::new_space_allocation_limit_address(isolate()) : ExternalReference::old_space_allocation_limit_address(isolate())); #ifdef V8_HOST_ARCH_32_BIT if (flags & kDoubleAlignment) { return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address); } #endif return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address); } Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) { return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); } Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) { return BitcastWordToTagged(IntPtrAdd(previous, IntPtrConstant(offset))); } Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, MachineType rep) { return Load(rep, buffer, IntPtrConstant(offset)); } Node* CodeStubAssembler::LoadObjectField(Node* object, int offset, MachineType rep) { return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) { return Load(MachineType::Float64(), object, IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadMap(Node* object) { return LoadObjectField(object, HeapObject::kMapOffset); } Node* CodeStubAssembler::LoadInstanceType(Node* object) { return LoadMapInstanceType(LoadMap(object)); } Node* CodeStubAssembler::LoadElements(Node* object) { return LoadObjectField(object, JSObject::kElementsOffset); } Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) { return LoadObjectField(array, FixedArrayBase::kLengthOffset); } Node* CodeStubAssembler::LoadMapBitField(Node* map) { return Load(MachineType::Uint8(), map, IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadMapBitField2(Node* map) { return Load(MachineType::Uint8(), map, IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadMapBitField3(Node* map) { return Load(MachineType::Uint32(), map, IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadMapInstanceType(Node* map) { return Load(MachineType::Uint8(), map, IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadMapDescriptors(Node* map) { return LoadObjectField(map, Map::kDescriptorsOffset); } Node* CodeStubAssembler::LoadNameHash(Node* name) { return Load(MachineType::Uint32(), name, IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag)); } Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) { Node* header_size = IntPtrConstant(FixedArray::kHeaderSize); Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2)); Node* total_size = IntPtrAdd(data_size, header_size); Node* result = Allocate(total_size, kNone); StoreMapNoWriteBarrier(result, LoadRoot(Heap::kFixedArrayMapRootIndex)); StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, SmiTag(length)); return result; } Node* CodeStubAssembler::LoadFixedArrayElementInt32Index( Node* object, Node* index, int additional_offset) { Node* header_size = IntPtrConstant(additional_offset + FixedArray::kHeaderSize - kHeapObjectTag); if (Is64()) { index = ChangeInt32ToInt64(index); } Node* scaled_index = WordShl(index, IntPtrConstant(kPointerSizeLog2)); Node* offset = IntPtrAdd(scaled_index, header_size); return Load(MachineType::AnyTagged(), object, offset); } Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) { return Load(MachineType::Uint8(), map, IntPtrConstant(Map::kInstanceSizeOffset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index, int additional_offset) { int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; Node* header_size = IntPtrConstant(additional_offset + FixedArray::kHeaderSize - kHeapObjectTag); Node* scaled_index = (kSmiShiftBits > kPointerSizeLog2) ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2)) : WordShl(smi_index, IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits)); Node* offset = IntPtrAdd(scaled_index, header_size); return Load(MachineType::AnyTagged(), object, offset); } Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object, int index) { Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize); return Load(MachineType::AnyTagged(), object, offset); } Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) { return StoreNoWriteBarrier( MachineRepresentation::kFloat64, object, IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value); } Node* CodeStubAssembler::StoreObjectField( Node* object, int offset, Node* value) { return Store(MachineRepresentation::kTagged, object, IntPtrConstant(offset - kHeapObjectTag), value); } Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier( Node* object, int offset, Node* value, MachineRepresentation rep) { return StoreNoWriteBarrier(rep, object, IntPtrConstant(offset - kHeapObjectTag), value); } Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) { return StoreNoWriteBarrier( MachineRepresentation::kTagged, object, IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map); } Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index, Node* value) { Node* offset = IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)), IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag)); return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value); } Node* CodeStubAssembler::StoreFixedArrayElementInt32Index(Node* object, Node* index, Node* value) { if (Is64()) { index = ChangeInt32ToInt64(index); } Node* offset = IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)), IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag)); return Store(MachineRepresentation::kTagged, object, offset, value); } Node* CodeStubAssembler::AllocateHeapNumber() { Node* result = Allocate(HeapNumber::kSize, kNone); StoreMapNoWriteBarrier(result, HeapNumberMapConstant()); return result; } Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) { Node* result = AllocateHeapNumber(); StoreHeapNumberValue(result, value); return result; } Node* CodeStubAssembler::AllocateSeqOneByteString(int length) { Node* result = Allocate(SeqOneByteString::SizeFor(length)); StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex)); StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset, SmiConstant(Smi::FromInt(length))); StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot, IntPtrConstant(String::kEmptyHashField)); return result; } Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) { Node* result = Allocate(SeqTwoByteString::SizeFor(length)); StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex)); StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset, SmiConstant(Smi::FromInt(length))); StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot, IntPtrConstant(String::kEmptyHashField)); return result; } Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { // We might need to loop once due to ToNumber conversion. Variable var_value(this, MachineRepresentation::kTagged), var_result(this, MachineRepresentation::kFloat64); Label loop(this, &var_value), done_loop(this, &var_result); var_value.Bind(value); Goto(&loop); Bind(&loop); { // Load the current {value}. value = var_value.value(); // Check if the {value} is a Smi or a HeapObject. Label if_valueissmi(this), if_valueisnotsmi(this); Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi); Bind(&if_valueissmi); { // Convert the Smi {value}. var_result.Bind(SmiToFloat64(value)); Goto(&done_loop); } Bind(&if_valueisnotsmi); { // Check if {value} is a HeapNumber. Label if_valueisheapnumber(this), if_valueisnotheapnumber(this, Label::kDeferred); Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()), &if_valueisheapnumber, &if_valueisnotheapnumber); Bind(&if_valueisheapnumber); { // Load the floating point value. var_result.Bind(LoadHeapNumberValue(value)); Goto(&done_loop); } Bind(&if_valueisnotheapnumber); { // Convert the {value} to a Number first. Callable callable = CodeFactory::NonNumberToNumber(isolate()); var_value.Bind(CallStub(callable, context, value)); Goto(&loop); } } } Bind(&done_loop); return var_result.value(); } Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) { // We might need to loop once due to ToNumber conversion. Variable var_value(this, MachineRepresentation::kTagged), var_result(this, MachineRepresentation::kWord32); Label loop(this, &var_value), done_loop(this, &var_result); var_value.Bind(value); Goto(&loop); Bind(&loop); { // Load the current {value}. value = var_value.value(); // Check if the {value} is a Smi or a HeapObject. Label if_valueissmi(this), if_valueisnotsmi(this); Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi); Bind(&if_valueissmi); { // Convert the Smi {value}. var_result.Bind(SmiToWord32(value)); Goto(&done_loop); } Bind(&if_valueisnotsmi); { // Check if {value} is a HeapNumber. Label if_valueisheapnumber(this), if_valueisnotheapnumber(this, Label::kDeferred); Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()), &if_valueisheapnumber, &if_valueisnotheapnumber); Bind(&if_valueisheapnumber); { // Truncate the floating point value. var_result.Bind(TruncateHeapNumberValueToWord32(value)); Goto(&done_loop); } Bind(&if_valueisnotheapnumber); { // Convert the {value} to a Number first. Callable callable = CodeFactory::NonNumberToNumber(isolate()); var_value.Bind(CallStub(callable, context, value)); Goto(&loop); } } } Bind(&done_loop); return var_result.value(); } Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) { Node* value = LoadHeapNumberValue(object); return TruncateFloat64ToWord32(value); } Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) { Node* value32 = RoundFloat64ToInt32(value); Node* value64 = ChangeInt32ToFloat64(value32); Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this); Label if_valueisequal(this), if_valueisnotequal(this); Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal); Bind(&if_valueisequal); { GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32); BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0), &if_valueisheapnumber, &if_valueisint32); } Bind(&if_valueisnotequal); Goto(&if_valueisheapnumber); Variable var_result(this, MachineRepresentation::kTagged); Bind(&if_valueisint32); { if (Is64()) { Node* result = SmiTag(ChangeInt32ToInt64(value32)); var_result.Bind(result); Goto(&if_join); } else { Node* pair = Int32AddWithOverflow(value32, value32); Node* overflow = Projection(1, pair); Label if_overflow(this, Label::kDeferred), if_notoverflow(this); Branch(overflow, &if_overflow, &if_notoverflow); Bind(&if_overflow); Goto(&if_valueisheapnumber); Bind(&if_notoverflow); { Node* result = Projection(0, pair); var_result.Bind(result); Goto(&if_join); } } } Bind(&if_valueisheapnumber); { Node* result = AllocateHeapNumberWithValue(value); var_result.Bind(result); Goto(&if_join); } Bind(&if_join); return var_result.value(); } Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) { if (Is64()) { return SmiTag(ChangeInt32ToInt64(value)); } Variable var_result(this, MachineRepresentation::kTagged); Node* pair = Int32AddWithOverflow(value, value); Node* overflow = Projection(1, pair); Label if_overflow(this, Label::kDeferred), if_notoverflow(this), if_join(this); Branch(overflow, &if_overflow, &if_notoverflow); Bind(&if_overflow); { Node* value64 = ChangeInt32ToFloat64(value); Node* result = AllocateHeapNumberWithValue(value64); var_result.Bind(result); } Goto(&if_join); Bind(&if_notoverflow); { Node* result = Projection(0, pair); var_result.Bind(result); } Goto(&if_join); Bind(&if_join); return var_result.value(); } Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) { Label if_overflow(this, Label::kDeferred), if_not_overflow(this), if_join(this); Variable var_result(this, MachineRepresentation::kTagged); // If {value} > 2^31 - 1, we need to store it in a HeapNumber. Branch(Int32LessThan(value, Int32Constant(0)), &if_overflow, &if_not_overflow); Bind(&if_not_overflow); { if (Is64()) { var_result.Bind(SmiTag(ChangeUint32ToUint64(value))); } else { // If tagging {value} results in an overflow, we need to use a HeapNumber // to represent it. Node* pair = Int32AddWithOverflow(value, value); Node* overflow = Projection(1, pair); GotoIf(overflow, &if_overflow); Node* result = Projection(0, pair); var_result.Bind(result); } } Goto(&if_join); Bind(&if_overflow); { Node* float64_value = ChangeUint32ToFloat64(value); var_result.Bind(AllocateHeapNumberWithValue(float64_value)); } Goto(&if_join); Bind(&if_join); return var_result.value(); } Node* CodeStubAssembler::ToThisString(Node* context, Node* value, char const* method_name) { Variable var_value(this, MachineRepresentation::kTagged); var_value.Bind(value); // Check if the {value} is a Smi or a HeapObject. Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this), if_valueisstring(this); Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi); Bind(&if_valueisnotsmi); { // Load the instance type of the {value}. Node* value_instance_type = LoadInstanceType(value); // Check if the {value} is already String. Label if_valueisnotstring(this, Label::kDeferred); Branch( Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)), &if_valueisstring, &if_valueisnotstring); Bind(&if_valueisnotstring); { // Check if the {value} is null. Label if_valueisnullorundefined(this, Label::kDeferred), if_valueisnotnullorundefined(this, Label::kDeferred), if_valueisnotnull(this, Label::kDeferred); Branch(WordEqual(value, NullConstant()), &if_valueisnullorundefined, &if_valueisnotnull); Bind(&if_valueisnotnull); { // Check if the {value} is undefined. Branch(WordEqual(value, UndefinedConstant()), &if_valueisnullorundefined, &if_valueisnotnullorundefined); Bind(&if_valueisnotnullorundefined); { // Convert the {value} to a String. Callable callable = CodeFactory::ToString(isolate()); var_value.Bind(CallStub(callable, context, value)); Goto(&if_valueisstring); } } Bind(&if_valueisnullorundefined); { // The {value} is either null or undefined. CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context, HeapConstant(factory()->NewStringFromAsciiChecked( method_name, TENURED))); Goto(&if_valueisstring); // Never reached. } } } Bind(&if_valueissmi); { // The {value} is a Smi, convert it to a String. Callable callable = CodeFactory::NumberToString(isolate()); var_value.Bind(CallStub(callable, context, value)); Goto(&if_valueisstring); } Bind(&if_valueisstring); return var_value.value(); } Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) { // Translate the {index} into a Word. index = SmiToWord(index); // We may need to loop in case of cons or sliced strings. Variable var_index(this, MachineType::PointerRepresentation()); Variable var_result(this, MachineRepresentation::kWord32); Variable var_string(this, MachineRepresentation::kTagged); Variable* loop_vars[] = {&var_index, &var_string}; Label done_loop(this, &var_result), loop(this, 2, loop_vars); var_string.Bind(string); var_index.Bind(index); Goto(&loop); Bind(&loop); { // Load the current {index}. index = var_index.value(); // Load the current {string}. string = var_string.value(); // Load the instance type of the {string}. Node* string_instance_type = LoadInstanceType(string); // Check if the {string} is a SeqString. Label if_stringissequential(this), if_stringisnotsequential(this); Branch(Word32Equal(Word32And(string_instance_type, Int32Constant(kStringRepresentationMask)), Int32Constant(kSeqStringTag)), &if_stringissequential, &if_stringisnotsequential); Bind(&if_stringissequential); { // Check if the {string} is a TwoByteSeqString or a OneByteSeqString. Label if_stringistwobyte(this), if_stringisonebyte(this); Branch(Word32Equal(Word32And(string_instance_type, Int32Constant(kStringEncodingMask)), Int32Constant(kTwoByteStringTag)), &if_stringistwobyte, &if_stringisonebyte); Bind(&if_stringisonebyte); { var_result.Bind( Load(MachineType::Uint8(), string, IntPtrAdd(index, IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)))); Goto(&done_loop); } Bind(&if_stringistwobyte); { var_result.Bind( Load(MachineType::Uint16(), string, IntPtrAdd(WordShl(index, IntPtrConstant(1)), IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag)))); Goto(&done_loop); } } Bind(&if_stringisnotsequential); { // Check if the {string} is a ConsString. Label if_stringiscons(this), if_stringisnotcons(this); Branch(Word32Equal(Word32And(string_instance_type, Int32Constant(kStringRepresentationMask)), Int32Constant(kConsStringTag)), &if_stringiscons, &if_stringisnotcons); Bind(&if_stringiscons); { // Check whether the right hand side is the empty string (i.e. if // this is really a flat string in a cons string). If that is not // the case we flatten the string first. Label if_rhsisempty(this), if_rhsisnotempty(this, Label::kDeferred); Node* rhs = LoadObjectField(string, ConsString::kSecondOffset); Branch(WordEqual(rhs, EmptyStringConstant()), &if_rhsisempty, &if_rhsisnotempty); Bind(&if_rhsisempty); { // Just operate on the left hand side of the {string}. var_string.Bind(LoadObjectField(string, ConsString::kFirstOffset)); Goto(&loop); } Bind(&if_rhsisnotempty); { // Flatten the {string} and lookup in the resulting string. var_string.Bind(CallRuntime(Runtime::kFlattenString, NoContextConstant(), string)); Goto(&loop); } } Bind(&if_stringisnotcons); { // Check if the {string} is an ExternalString. Label if_stringisexternal(this), if_stringisnotexternal(this); Branch(Word32Equal(Word32And(string_instance_type, Int32Constant(kStringRepresentationMask)), Int32Constant(kExternalStringTag)), &if_stringisexternal, &if_stringisnotexternal); Bind(&if_stringisexternal); { // Check if the {string} is a short external string. Label if_stringisshort(this), if_stringisnotshort(this, Label::kDeferred); Branch(Word32Equal(Word32And(string_instance_type, Int32Constant(kShortExternalStringMask)), Int32Constant(0)), &if_stringisshort, &if_stringisnotshort); Bind(&if_stringisshort); { // Load the actual resource data from the {string}. Node* string_resource_data = LoadObjectField(string, ExternalString::kResourceDataOffset, MachineType::Pointer()); // Check if the {string} is a TwoByteExternalString or a // OneByteExternalString. Label if_stringistwobyte(this), if_stringisonebyte(this); Branch(Word32Equal(Word32And(string_instance_type, Int32Constant(kStringEncodingMask)), Int32Constant(kTwoByteStringTag)), &if_stringistwobyte, &if_stringisonebyte); Bind(&if_stringisonebyte); { var_result.Bind( Load(MachineType::Uint8(), string_resource_data, index)); Goto(&done_loop); } Bind(&if_stringistwobyte); { var_result.Bind(Load(MachineType::Uint16(), string_resource_data, WordShl(index, IntPtrConstant(1)))); Goto(&done_loop); } } Bind(&if_stringisnotshort); { // The {string} might be compressed, call the runtime. var_result.Bind(SmiToWord32( CallRuntime(Runtime::kExternalStringGetChar, NoContextConstant(), string, SmiTag(index)))); Goto(&done_loop); } } Bind(&if_stringisnotexternal); { // The {string} is a SlicedString, continue with its parent. Node* string_offset = SmiToWord(LoadObjectField(string, SlicedString::kOffsetOffset)); Node* string_parent = LoadObjectField(string, SlicedString::kParentOffset); var_index.Bind(IntPtrAdd(index, string_offset)); var_string.Bind(string_parent); Goto(&loop); } } } } Bind(&done_loop); return var_result.value(); } Node* CodeStubAssembler::StringFromCharCode(Node* code) { Variable var_result(this, MachineRepresentation::kTagged); // Check if the {code} is a one-byte char code. Label if_codeisonebyte(this), if_codeistwobyte(this, Label::kDeferred), if_done(this); Branch(Int32LessThanOrEqual(code, Int32Constant(String::kMaxOneByteCharCode)), &if_codeisonebyte, &if_codeistwobyte); Bind(&if_codeisonebyte); { // Load the isolate wide single character string cache. Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex); // Check if we have an entry for the {code} in the single character string // cache already. Label if_entryisundefined(this, Label::kDeferred), if_entryisnotundefined(this); Node* entry = LoadFixedArrayElementInt32Index(cache, code); Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined, &if_entryisnotundefined); Bind(&if_entryisundefined); { // Allocate a new SeqOneByteString for {code} and store it in the {cache}. Node* result = AllocateSeqOneByteString(1); StoreNoWriteBarrier( MachineRepresentation::kWord8, result, IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code); StoreFixedArrayElementInt32Index(cache, code, result); var_result.Bind(result); Goto(&if_done); } Bind(&if_entryisnotundefined); { // Return the entry from the {cache}. var_result.Bind(entry); Goto(&if_done); } } Bind(&if_codeistwobyte); { // Allocate a new SeqTwoByteString for {code}. Node* result = AllocateSeqTwoByteString(1); StoreNoWriteBarrier( MachineRepresentation::kWord16, result, IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), code); var_result.Bind(result); Goto(&if_done); } Bind(&if_done); return var_result.value(); } Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask) { return Word32Shr(Word32And(word32, Int32Constant(mask)), Int32Constant(shift)); } } // namespace internal } // namespace v8