X64: Extract all smi operations into MacroAssembler macros.
First step in changing Smi representation. Review URL: http://codereview.chromium.org/196077 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2869 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
cf37189c65
commit
158dcbc39d
@ -852,7 +852,7 @@ class Assembler : public Malloced {
|
||||
class EnsureSpace BASE_EMBEDDED {
|
||||
public:
|
||||
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
|
||||
if (assembler_->overflow()) assembler_->GrowBuffer();
|
||||
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
|
||||
#ifdef DEBUG
|
||||
space_before_ = assembler_->available_space();
|
||||
#endif
|
||||
|
@ -366,7 +366,7 @@ void Assembler::bind(Label* L) {
|
||||
|
||||
|
||||
void Assembler::GrowBuffer() {
|
||||
ASSERT(overflow()); // should not call this otherwise
|
||||
ASSERT(buffer_overflow()); // should not call this otherwise
|
||||
if (!own_buffer_) FATAL("external code buffer is too small");
|
||||
|
||||
// compute new buffer size
|
||||
@ -428,7 +428,7 @@ void Assembler::GrowBuffer() {
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(!overflow());
|
||||
ASSERT(!buffer_overflow());
|
||||
}
|
||||
|
||||
|
||||
@ -1410,6 +1410,15 @@ void Assembler::neg(Register dst) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::negl(Register dst) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_optional_rex_32(dst);
|
||||
emit(0xF7);
|
||||
emit_modrm(0x3, dst);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::neg(const Operand& dst) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
|
@ -721,6 +721,7 @@ class Assembler : public Malloced {
|
||||
|
||||
void neg(Register dst);
|
||||
void neg(const Operand& dst);
|
||||
void negl(Register dst);
|
||||
|
||||
void not_(Register dst);
|
||||
void not_(const Operand& dst);
|
||||
@ -729,6 +730,10 @@ class Assembler : public Malloced {
|
||||
arithmetic_op(0x0B, dst, src);
|
||||
}
|
||||
|
||||
void orl(Register dst, Register src) {
|
||||
arithmetic_op_32(0x0B, dst, src);
|
||||
}
|
||||
|
||||
void or_(Register dst, const Operand& src) {
|
||||
arithmetic_op(0x0B, dst, src);
|
||||
}
|
||||
@ -860,6 +865,10 @@ class Assembler : public Malloced {
|
||||
arithmetic_op(0x33, dst, src);
|
||||
}
|
||||
|
||||
void xorl(Register dst, Register src) {
|
||||
arithmetic_op_32(0x33, dst, src);
|
||||
}
|
||||
|
||||
void xor_(Register dst, const Operand& src) {
|
||||
arithmetic_op(0x33, dst, src);
|
||||
}
|
||||
@ -1049,7 +1058,9 @@ class Assembler : public Malloced {
|
||||
// Check if there is less than kGap bytes available in the buffer.
|
||||
// If this is the case, we need to grow the buffer before emitting
|
||||
// an instruction or relocation information.
|
||||
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
|
||||
inline bool buffer_overflow() const {
|
||||
return pc_ >= reloc_info_writer.pos() - kGap;
|
||||
}
|
||||
|
||||
// Get the number of bytes available in the buffer.
|
||||
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
|
||||
@ -1279,7 +1290,7 @@ class Assembler : public Malloced {
|
||||
class EnsureSpace BASE_EMBEDDED {
|
||||
public:
|
||||
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
|
||||
if (assembler_->overflow()) assembler_->GrowBuffer();
|
||||
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
|
||||
#ifdef DEBUG
|
||||
space_before_ = assembler_->available_space();
|
||||
#endif
|
||||
|
@ -61,8 +61,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
|
||||
// Preserve the number of arguments on the stack. Must preserve both
|
||||
// rax and rbx because these registers are used when copying the
|
||||
// arguments and the receiver.
|
||||
ASSERT(kSmiTagSize == 1);
|
||||
__ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
|
||||
__ Integer32ToSmi(rcx, rax);
|
||||
__ push(rcx);
|
||||
}
|
||||
|
||||
@ -77,10 +76,13 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
|
||||
|
||||
// Remove caller arguments from the stack.
|
||||
// rbx holds a Smi, so we convery to dword offset by multiplying by 4.
|
||||
// TODO(smi): Find a way to abstract indexing by a smi.
|
||||
ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
|
||||
ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
|
||||
// TODO(smi): Find way to abstract indexing by a smi.
|
||||
__ pop(rcx);
|
||||
__ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
|
||||
// 1 * kPointerSize is offset of receiver.
|
||||
__ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
|
||||
__ push(rcx);
|
||||
}
|
||||
|
||||
@ -192,8 +194,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
{ Label done, non_function, function;
|
||||
// The function to call is at position n+1 on the stack.
|
||||
__ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
|
||||
__ testl(rdi, Immediate(kSmiTagMask));
|
||||
__ j(zero, &non_function);
|
||||
__ JumpIfSmi(rdi, &non_function);
|
||||
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
|
||||
__ j(equal, &function);
|
||||
|
||||
@ -213,8 +214,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
{ Label call_to_object, use_global_receiver, patch_receiver, done;
|
||||
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
|
||||
|
||||
__ testl(rbx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &call_to_object);
|
||||
__ JumpIfSmi(rbx, &call_to_object);
|
||||
|
||||
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
|
||||
__ j(equal, &use_global_receiver);
|
||||
@ -230,8 +230,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
__ EnterInternalFrame(); // preserves rax, rbx, rdi
|
||||
|
||||
// Store the arguments count on the stack (smi tagged).
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ shl(rax, Immediate(kSmiTagSize));
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ push(rax);
|
||||
|
||||
__ push(rdi); // save edi across the call
|
||||
@ -242,7 +241,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
|
||||
// Get the arguments count and untag it.
|
||||
__ pop(rax);
|
||||
__ shr(rax, Immediate(kSmiTagSize));
|
||||
__ SmiToInteger32(rax, rax);
|
||||
|
||||
__ LeaveInternalFrame();
|
||||
__ jmp(&patch_receiver);
|
||||
@ -355,8 +354,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
Label okay;
|
||||
// Make rdx the space we need for the array when it is unrolled onto the
|
||||
// stack.
|
||||
__ movq(rdx, rax);
|
||||
__ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
|
||||
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
|
||||
__ cmpq(rcx, rdx);
|
||||
__ j(greater, &okay);
|
||||
|
||||
@ -382,8 +380,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
// Compute the receiver.
|
||||
Label call_to_object, use_global_receiver, push_receiver;
|
||||
__ movq(rbx, Operand(rbp, kReceiverOffset));
|
||||
__ testl(rbx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &call_to_object);
|
||||
__ JumpIfSmi(rbx, &call_to_object);
|
||||
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
|
||||
__ j(equal, &use_global_receiver);
|
||||
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
|
||||
@ -446,7 +443,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
|
||||
// Invoke the function.
|
||||
ParameterCount actual(rax);
|
||||
__ shr(rax, Immediate(kSmiTagSize));
|
||||
__ SmiToInteger32(rax, rax);
|
||||
__ movq(rdi, Operand(rbp, kFunctionOffset));
|
||||
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
|
||||
|
||||
@ -463,8 +460,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
|
||||
|
||||
Label non_function_call;
|
||||
// Check that function is not a smi.
|
||||
__ testl(rdi, Immediate(kSmiTagMask));
|
||||
__ j(zero, &non_function_call);
|
||||
__ JumpIfSmi(rdi, &non_function_call);
|
||||
// Check that function is a JSFunction.
|
||||
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
|
||||
__ j(not_equal, &non_function_call);
|
||||
@ -492,7 +488,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
__ EnterConstructFrame();
|
||||
|
||||
// Store a smi-tagged arguments count on the stack.
|
||||
__ shl(rax, Immediate(kSmiTagSize));
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ push(rax);
|
||||
|
||||
// Push the function to invoke on the stack.
|
||||
@ -517,8 +513,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// rdi: constructor
|
||||
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
// Will both indicate a NULL and a Smi
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(zero, &rt_call);
|
||||
__ JumpIfSmi(rax, &rt_call);
|
||||
// rdi: constructor
|
||||
// rax: initial map (if proven valid below)
|
||||
__ CmpObjectType(rax, MAP_TYPE, rbx);
|
||||
@ -668,7 +663,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Retrieve smi-tagged arguments count from the stack.
|
||||
__ movq(rax, Operand(rsp, 0));
|
||||
__ shr(rax, Immediate(kSmiTagSize));
|
||||
__ SmiToInteger32(rax, rax);
|
||||
|
||||
// Push the allocated receiver to the stack. We need two copies
|
||||
// because we may have to return the original one and the calling
|
||||
@ -701,8 +696,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// on page 74.
|
||||
Label use_receiver, exit;
|
||||
// If the result is a smi, it is *not* an object in the ECMA sense.
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(zero, &use_receiver);
|
||||
__ JumpIfSmi(rax, &use_receiver);
|
||||
|
||||
// If the type of the result (stored in its map) is less than
|
||||
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
|
||||
@ -721,8 +715,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Remove caller arguments from the stack and return.
|
||||
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
||||
// TODO(smi): Find a way to abstract indexing by a smi.
|
||||
__ pop(rcx);
|
||||
__ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
|
||||
// 1 * kPointerSize is offset of receiver.
|
||||
__ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
|
||||
__ push(rcx);
|
||||
__ IncrementCounter(&Counters::constructed_objects, 1);
|
||||
__ ret(0);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -95,7 +95,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
StringDictionary::kHeaderSize +
|
||||
StringDictionary::kCapacityIndex * kPointerSize;
|
||||
__ movq(r2, FieldOperand(r0, kCapacityOffset));
|
||||
__ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int
|
||||
__ SmiToInteger32(r2, r2);
|
||||
__ decl(r2);
|
||||
|
||||
// Generate an unrolled loop that performs a few probes before
|
||||
@ -132,7 +132,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
__ bind(&done);
|
||||
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
|
||||
__ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
|
||||
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
|
||||
Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
|
||||
__ j(not_zero, miss_label);
|
||||
|
||||
// Get the value at the masked, scaled index.
|
||||
@ -148,8 +148,7 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
|
||||
Register value) {
|
||||
Label done;
|
||||
// Check if the value is a Smi.
|
||||
__ testl(value, Immediate(kSmiTagMask));
|
||||
__ j(zero, &done);
|
||||
__ JumpIfSmi(value, &done);
|
||||
// Check if the object has been loaded.
|
||||
__ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
|
||||
__ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
|
||||
@ -265,8 +264,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(rcx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &slow);
|
||||
__ JumpIfSmi(rcx, &slow);
|
||||
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type.
|
||||
// In the case that the object is a value-wrapper object,
|
||||
@ -283,9 +281,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &check_string);
|
||||
__ sarl(rax, Immediate(kSmiTagSize));
|
||||
__ JumpIfNotSmi(rax, &check_string);
|
||||
__ SmiToInteger32(rax, rax);
|
||||
// Get the elements array of the object.
|
||||
__ bind(&index_int);
|
||||
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
|
||||
@ -410,8 +407,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Get the receiver from the stack.
|
||||
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &slow);
|
||||
__ JumpIfSmi(rdx, &slow);
|
||||
// Get the map from the receiver.
|
||||
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks. We need
|
||||
@ -422,8 +418,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Get the key from the stack.
|
||||
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
|
||||
// Check that the key is a smi.
|
||||
__ testl(rbx, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &slow);
|
||||
__ JumpIfNotSmi(rbx, &slow);
|
||||
// If it is a smi, make sure it is zero-extended, so it can be
|
||||
// used as an index in a memory operand.
|
||||
__ movl(rbx, rbx); // Clear the high bits of rbx.
|
||||
@ -443,8 +438,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
|
||||
__ j(not_equal, &slow);
|
||||
// Untag the key (for checking against untagged length in the fixed array).
|
||||
__ movl(rdx, rbx);
|
||||
__ sarl(rdx, Immediate(kSmiTagSize));
|
||||
__ SmiToInteger32(rdx, rbx);
|
||||
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
|
||||
// rax: value
|
||||
// rcx: FixedArray
|
||||
@ -473,13 +467,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// rbx: index (as a smi)
|
||||
// flags: compare (rbx, rdx.length())
|
||||
__ j(not_equal, &slow); // do not leave holes in the array
|
||||
__ sarl(rbx, Immediate(kSmiTagSize)); // untag
|
||||
__ SmiToInteger64(rbx, rbx);
|
||||
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
// Restore tag and increment.
|
||||
__ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
|
||||
// Increment and restore smi-tag.
|
||||
__ Integer64AddToSmi(rbx, rbx, 1);
|
||||
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
|
||||
__ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again
|
||||
__ SmiSubConstant(rbx, rbx, 1, NULL);
|
||||
__ jmp(&fast);
|
||||
|
||||
|
||||
@ -544,8 +538,7 @@ void CallIC::Generate(MacroAssembler* masm,
|
||||
// Check if the receiver is a global object of some sort.
|
||||
Label invoke, global;
|
||||
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &invoke);
|
||||
__ JumpIfSmi(rdx, &invoke);
|
||||
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
|
||||
__ j(equal, &global);
|
||||
__ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
|
||||
@ -594,8 +587,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
|
||||
// to probe.
|
||||
//
|
||||
// Check for number.
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &number);
|
||||
__ JumpIfSmi(rdx, &number);
|
||||
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
|
||||
__ j(not_equal, &non_number);
|
||||
__ bind(&number);
|
||||
@ -640,8 +632,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
|
||||
|
||||
// Move the result to register rdi and check that it isn't a smi.
|
||||
__ movq(rdi, rdx);
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss);
|
||||
__ JumpIfSmi(rdx, miss);
|
||||
|
||||
// Check that the value is a JavaScript function.
|
||||
__ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
|
||||
@ -683,8 +674,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
|
||||
__ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rdx, &miss);
|
||||
|
||||
// Check that the receiver is a valid JS object.
|
||||
// Because there are so many map checks and type checks, do not
|
||||
@ -844,8 +834,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
__ movq(rax, Operand(rsp, kPointerSize));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rax, &miss);
|
||||
|
||||
// Check that the receiver is a valid JS object.
|
||||
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
|
||||
|
@ -412,6 +412,687 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Smi tagging, untagging and tag detection.
|
||||
|
||||
|
||||
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
#ifdef DEBUG
|
||||
cmpq(src, Immediate(0xC0000000u));
|
||||
Check(positive, "Smi conversion overflow");
|
||||
#endif
|
||||
if (dst.is(src)) {
|
||||
addl(dst, src);
|
||||
} else {
|
||||
lea(dst, Operand(src, src, times_1, 0));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Integer32ToSmi(Register dst,
|
||||
Register src,
|
||||
Label* on_overflow) {
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
addl(dst, src);
|
||||
j(overflow, on_overflow);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Integer64AddToSmi(Register dst,
|
||||
Register src,
|
||||
int constant) {
|
||||
#ifdef DEBUG
|
||||
movl(kScratchRegister, src);
|
||||
addl(kScratchRegister, Immediate(constant));
|
||||
Check(no_overflow, "Add-and-smi-convert overflow");
|
||||
Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
|
||||
Check(valid, "Add-and-smi-convert overflow");
|
||||
#endif
|
||||
lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
sarl(dst, Immediate(kSmiTagSize));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
movsxlq(dst, src);
|
||||
sar(dst, Immediate(kSmiTagSize));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
|
||||
Register src,
|
||||
int power) {
|
||||
ASSERT(power >= 0);
|
||||
ASSERT(power < 64);
|
||||
if (power == 0) {
|
||||
SmiToInteger64(dst, src);
|
||||
return;
|
||||
}
|
||||
movsxlq(dst, src);
|
||||
shl(dst, Immediate(power - 1));
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
testl(src, Immediate(kSmiTagMask));
|
||||
j(zero, on_smi);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
|
||||
Condition not_smi = CheckNotSmi(src);
|
||||
j(not_smi, on_not_smi);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfNotPositiveSmi(Register src,
|
||||
Label* on_not_positive_smi) {
|
||||
Condition not_positive_smi = CheckNotPositiveSmi(src);
|
||||
j(not_positive_smi, on_not_positive_smi);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
|
||||
int constant,
|
||||
Label* on_equals) {
|
||||
if (Smi::IsValid(constant)) {
|
||||
Condition are_equal = CheckSmiEqualsConstant(src, constant);
|
||||
j(are_equal, on_equals);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
|
||||
Condition is_valid = CheckInteger32ValidSmiValue(src);
|
||||
j(ReverseCondition(is_valid), on_invalid);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfNotBothSmi(Register src1,
|
||||
Register src2,
|
||||
Label* on_not_both_smi) {
|
||||
Condition not_both_smi = CheckNotBothSmi(src1, src2);
|
||||
j(not_both_smi, on_not_both_smi);
|
||||
}
|
||||
|
||||
Condition MacroAssembler::CheckSmi(Register src) {
|
||||
testb(src, Immediate(kSmiTagMask));
|
||||
return zero;
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckNotSmi(Register src) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
testb(src, Immediate(kSmiTagMask));
|
||||
return not_zero;
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckPositiveSmi(Register src) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
|
||||
return zero;
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
|
||||
return not_zero;
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
|
||||
if (first.is(second)) {
|
||||
return CheckSmi(first);
|
||||
}
|
||||
movl(kScratchRegister, first);
|
||||
orl(kScratchRegister, second);
|
||||
return CheckSmi(kScratchRegister);
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
if (first.is(second)) {
|
||||
return CheckNotSmi(first);
|
||||
}
|
||||
movl(kScratchRegister, first);
|
||||
or_(kScratchRegister, second);
|
||||
return CheckNotSmi(kScratchRegister);
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckIsMinSmi(Register src) {
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
cmpl(src, Immediate(0x40000000));
|
||||
return equal;
|
||||
}
|
||||
|
||||
Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
|
||||
if (constant == 0) {
|
||||
testl(src, src);
|
||||
return zero;
|
||||
}
|
||||
if (Smi::IsValid(constant)) {
|
||||
cmpl(src, Immediate(Smi::FromInt(constant)));
|
||||
return zero;
|
||||
}
|
||||
// Can't be equal.
|
||||
UNREACHABLE();
|
||||
return no_condition;
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
|
||||
// A 32-bit integer value can be converted to a smi if it is in the
|
||||
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
|
||||
// representation have bits 30 and 31 be equal.
|
||||
cmpl(src, Immediate(0xC0000000u));
|
||||
return positive;
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiNeg(Register dst,
|
||||
Register src,
|
||||
Label* on_not_smi_result) {
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
negl(dst);
|
||||
testl(dst, Immediate(0x7fffffff));
|
||||
// If the result is zero or 0x80000000, negation failed to create a smi.
|
||||
j(equal, on_not_smi_result);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiAdd(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!dst.is(src2));
|
||||
if (!dst.is(src1)) {
|
||||
movl(dst, src1);
|
||||
}
|
||||
addl(dst, src2);
|
||||
if (!dst.is(src1)) {
|
||||
j(overflow, on_not_smi_result);
|
||||
} else {
|
||||
Label smi_result;
|
||||
j(no_overflow, &smi_result);
|
||||
// Restore src1.
|
||||
subl(src1, src2);
|
||||
jmp(on_not_smi_result);
|
||||
bind(&smi_result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::SmiSub(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!dst.is(src2));
|
||||
if (!dst.is(src1)) {
|
||||
movl(dst, src1);
|
||||
}
|
||||
subl(dst, src2);
|
||||
if (!dst.is(src1)) {
|
||||
j(overflow, on_not_smi_result);
|
||||
} else {
|
||||
Label smi_result;
|
||||
j(no_overflow, &smi_result);
|
||||
// Restore src1.
|
||||
addl(src1, src2);
|
||||
jmp(on_not_smi_result);
|
||||
bind(&smi_result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiMul(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!dst.is(src2));
|
||||
|
||||
if (dst.is(src1)) {
|
||||
movq(kScratchRegister, src1);
|
||||
}
|
||||
SmiToInteger32(dst, src1);
|
||||
|
||||
imull(dst, src2);
|
||||
j(overflow, on_not_smi_result);
|
||||
|
||||
// Check for negative zero result. If product is zero, and one
|
||||
// argument is negative, go to slow case. The frame is unchanged
|
||||
// in this block, so local control flow can use a Label rather
|
||||
// than a JumpTarget.
|
||||
Label non_zero_result;
|
||||
testl(dst, dst);
|
||||
j(not_zero, &non_zero_result);
|
||||
|
||||
// Test whether either operand is negative (the other must be zero).
|
||||
orl(kScratchRegister, src2);
|
||||
j(negative, on_not_smi_result);
|
||||
bind(&non_zero_result);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiTryAddConstant(Register dst,
|
||||
Register src,
|
||||
int32_t constant,
|
||||
Label* on_not_smi_result) {
|
||||
// Does not assume that src is a smi.
|
||||
ASSERT_EQ(1, kSmiTagMask);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
ASSERT(Smi::IsValid(constant));
|
||||
|
||||
Register tmp = (src.is(dst) ? kScratchRegister : dst);
|
||||
movl(tmp, src);
|
||||
addl(tmp, Immediate(Smi::FromInt(constant)));
|
||||
if (tmp.is(kScratchRegister)) {
|
||||
j(overflow, on_not_smi_result);
|
||||
testl(tmp, Immediate(kSmiTagMask));
|
||||
j(not_zero, on_not_smi_result);
|
||||
movl(dst, tmp);
|
||||
} else {
|
||||
movl(kScratchRegister, Immediate(kSmiTagMask));
|
||||
cmovl(overflow, dst, kScratchRegister);
|
||||
testl(dst, kScratchRegister);
|
||||
j(not_zero, on_not_smi_result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiAddConstant(Register dst,
|
||||
Register src,
|
||||
int32_t constant,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(Smi::IsValid(constant));
|
||||
if (on_not_smi_result == NULL) {
|
||||
if (dst.is(src)) {
|
||||
movl(dst, src);
|
||||
} else {
|
||||
lea(dst, Operand(src, constant << kSmiTagSize));
|
||||
}
|
||||
} else {
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
addl(dst, Immediate(Smi::FromInt(constant)));
|
||||
if (!dst.is(src)) {
|
||||
j(overflow, on_not_smi_result);
|
||||
} else {
|
||||
Label result_ok;
|
||||
j(no_overflow, &result_ok);
|
||||
subl(dst, Immediate(Smi::FromInt(constant)));
|
||||
jmp(on_not_smi_result);
|
||||
bind(&result_ok);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiSubConstant(Register dst,
|
||||
Register src,
|
||||
int32_t constant,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(Smi::IsValid(constant));
|
||||
Smi* smi_value = Smi::FromInt(constant);
|
||||
if (dst.is(src)) {
|
||||
// Optimistic subtract - may change value of dst register,
|
||||
// if it has garbage bits in the higher half, but will not change
|
||||
// the value as a tagged smi.
|
||||
subl(dst, Immediate(smi_value));
|
||||
if (on_not_smi_result != NULL) {
|
||||
Label add_success;
|
||||
j(no_overflow, &add_success);
|
||||
addl(dst, Immediate(smi_value));
|
||||
jmp(on_not_smi_result);
|
||||
bind(&add_success);
|
||||
}
|
||||
} else {
|
||||
UNIMPLEMENTED(); // Not used yet.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiDiv(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!src2.is(rax));
|
||||
ASSERT(!src2.is(rdx));
|
||||
ASSERT(!src1.is(rdx));
|
||||
|
||||
// Check for 0 divisor (result is +/-Infinity).
|
||||
Label positive_divisor;
|
||||
testl(src2, src2);
|
||||
j(zero, on_not_smi_result);
|
||||
j(positive, &positive_divisor);
|
||||
// Check for negative zero result. If the dividend is zero, and the
|
||||
// divisor is negative, return a floating point negative zero.
|
||||
testl(src1, src1);
|
||||
j(zero, on_not_smi_result);
|
||||
bind(&positive_divisor);
|
||||
|
||||
// Sign extend src1 into edx:eax.
|
||||
if (!src1.is(rax)) {
|
||||
movl(rax, src1);
|
||||
}
|
||||
cdq();
|
||||
|
||||
idivl(src2);
|
||||
// Check for the corner case of dividing the most negative smi by
|
||||
// -1. We cannot use the overflow flag, since it is not set by
|
||||
// idiv instruction.
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
cmpl(rax, Immediate(0x40000000));
|
||||
j(equal, on_not_smi_result);
|
||||
// Check that the remainder is zero.
|
||||
testl(rdx, rdx);
|
||||
j(not_zero, on_not_smi_result);
|
||||
// Tag the result and store it in the destination register.
|
||||
Integer32ToSmi(dst, rax);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiMod(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!dst.is(kScratchRegister));
|
||||
ASSERT(!src1.is(kScratchRegister));
|
||||
ASSERT(!src2.is(kScratchRegister));
|
||||
ASSERT(!src2.is(rax));
|
||||
ASSERT(!src2.is(rdx));
|
||||
ASSERT(!src1.is(rdx));
|
||||
|
||||
testl(src2, src2);
|
||||
j(zero, on_not_smi_result);
|
||||
|
||||
if (src1.is(rax)) {
|
||||
// Mist remember the value to see if a zero result should
|
||||
// be a negative zero.
|
||||
movl(kScratchRegister, rax);
|
||||
} else {
|
||||
movl(rax, src1);
|
||||
}
|
||||
// Sign extend eax into edx:eax.
|
||||
cdq();
|
||||
idivl(src2);
|
||||
// Check for a negative zero result. If the result is zero, and the
|
||||
// dividend is negative, return a floating point negative zero.
|
||||
Label non_zero_result;
|
||||
testl(rdx, rdx);
|
||||
j(not_zero, &non_zero_result);
|
||||
if (src1.is(rax)) {
|
||||
testl(kScratchRegister, kScratchRegister);
|
||||
} else {
|
||||
testl(src1, src1);
|
||||
}
|
||||
j(negative, on_not_smi_result);
|
||||
bind(&non_zero_result);
|
||||
if (!dst.is(rdx)) {
|
||||
movl(dst, rdx);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiNot(Register dst, Register src) {
|
||||
if (dst.is(src)) {
|
||||
not_(dst);
|
||||
// Remove inverted smi-tag. The mask is sign-extended to 64 bits.
|
||||
xor_(src, Immediate(kSmiTagMask));
|
||||
} else {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
lea(dst, Operand(src, kSmiTagMask));
|
||||
not_(dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
|
||||
if (!dst.is(src1)) {
|
||||
movl(dst, src1);
|
||||
}
|
||||
and_(dst, src2);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
|
||||
ASSERT(Smi::IsValid(constant));
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
and_(dst, Immediate(Smi::FromInt(constant)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
|
||||
if (!dst.is(src1)) {
|
||||
movl(dst, src1);
|
||||
}
|
||||
or_(dst, src2);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
|
||||
ASSERT(Smi::IsValid(constant));
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
or_(dst, Immediate(Smi::FromInt(constant)));
|
||||
}
|
||||
|
||||
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
|
||||
if (!dst.is(src1)) {
|
||||
movl(dst, src1);
|
||||
}
|
||||
xor_(dst, src2);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
|
||||
ASSERT(Smi::IsValid(constant));
|
||||
if (!dst.is(src)) {
|
||||
movl(dst, src);
|
||||
}
|
||||
xor_(dst, Immediate(Smi::FromInt(constant)));
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
|
||||
Register src,
|
||||
int shift_value) {
|
||||
if (shift_value > 0) {
|
||||
if (dst.is(src)) {
|
||||
sarl(dst, Immediate(shift_value));
|
||||
and_(dst, Immediate(~kSmiTagMask));
|
||||
} else {
|
||||
UNIMPLEMENTED(); // Not used.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
|
||||
Register src,
|
||||
int shift_value,
|
||||
Label* on_not_smi_result) {
|
||||
// Logic right shift interprets its result as an *unsigned* number.
|
||||
if (dst.is(src)) {
|
||||
UNIMPLEMENTED(); // Not used.
|
||||
} else {
|
||||
movl(dst, src);
|
||||
// Untag the smi.
|
||||
sarl(dst, Immediate(kSmiTagSize));
|
||||
if (shift_value < 2) {
|
||||
// A negative Smi shifted right two is in the positive Smi range,
|
||||
// but if shifted only by zero or one, it never is.
|
||||
j(negative, on_not_smi_result);
|
||||
}
|
||||
if (shift_value > 0) {
|
||||
// Do the right shift on the integer value.
|
||||
shrl(dst, Immediate(shift_value));
|
||||
}
|
||||
// Re-tag the result.
|
||||
addl(dst, dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiShiftLeftConstant(Register dst,
|
||||
Register src,
|
||||
int shift_value,
|
||||
Label* on_not_smi_result) {
|
||||
if (dst.is(src)) {
|
||||
UNIMPLEMENTED(); // Not used.
|
||||
} else {
|
||||
movl(dst, src);
|
||||
if (shift_value > 0) {
|
||||
// Treat dst as an untagged integer value equal to two times the
|
||||
// smi value of src, i.e., already shifted left by one.
|
||||
if (shift_value > 1) {
|
||||
shll(dst, Immediate(shift_value - 1));
|
||||
}
|
||||
// Convert int result to Smi, checking that it is in smi range.
|
||||
ASSERT(kSmiTagSize == 1); // adjust code if not the case
|
||||
Integer32ToSmi(dst, dst, on_not_smi_result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiShiftLeft(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!dst.is(rcx));
|
||||
Label result_ok;
|
||||
// Untag both operands.
|
||||
SmiToInteger32(dst, src1);
|
||||
SmiToInteger32(rcx, src2);
|
||||
shll(dst);
|
||||
// Check that the *signed* result fits in a smi.
|
||||
Condition is_valid = CheckInteger32ValidSmiValue(dst);
|
||||
j(is_valid, &result_ok);
|
||||
// Restore the relevant bits of the source registers
|
||||
// and call the slow version.
|
||||
if (dst.is(src1)) {
|
||||
shrl(dst);
|
||||
Integer32ToSmi(dst, dst);
|
||||
}
|
||||
Integer32ToSmi(rcx, rcx);
|
||||
jmp(on_not_smi_result);
|
||||
bind(&result_ok);
|
||||
Integer32ToSmi(dst, dst);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiShiftLogicalRight(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result) {
|
||||
ASSERT(!dst.is(rcx));
|
||||
Label result_ok;
|
||||
// Untag both operands.
|
||||
SmiToInteger32(dst, src1);
|
||||
SmiToInteger32(rcx, src2);
|
||||
|
||||
shrl(dst);
|
||||
// Check that the *unsigned* result fits in a smi.
|
||||
// I.e., that it is a valid positive smi value. The positive smi
|
||||
// values are 0..0x3fffffff, i.e., neither of the top-most two
|
||||
// bits can be set.
|
||||
//
|
||||
// These two cases can only happen with shifts by 0 or 1 when
|
||||
// handed a valid smi. If the answer cannot be represented by a
|
||||
// smi, restore the left and right arguments, and jump to slow
|
||||
// case. The low bit of the left argument may be lost, but only
|
||||
// in a case where it is dropped anyway.
|
||||
testl(dst, Immediate(0xc0000000));
|
||||
j(zero, &result_ok);
|
||||
if (dst.is(src1)) {
|
||||
shll(dst);
|
||||
Integer32ToSmi(dst, dst);
|
||||
}
|
||||
Integer32ToSmi(rcx, rcx);
|
||||
jmp(on_not_smi_result);
|
||||
bind(&result_ok);
|
||||
// Smi-tag the result in answer.
|
||||
Integer32ToSmi(dst, dst);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
|
||||
Register src1,
|
||||
Register src2) {
|
||||
ASSERT(!dst.is(rcx));
|
||||
// Untag both operands.
|
||||
SmiToInteger32(dst, src1);
|
||||
SmiToInteger32(rcx, src2);
|
||||
// Shift as integer.
|
||||
sarl(dst);
|
||||
// Retag result.
|
||||
Integer32ToSmi(dst, dst);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SelectNonSmi(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smis) {
|
||||
ASSERT(!dst.is(src1));
|
||||
ASSERT(!dst.is(src2));
|
||||
// Both operands must not be smis.
|
||||
#ifdef DEBUG
|
||||
Condition not_both_smis = CheckNotBothSmi(src1, src2);
|
||||
Check(not_both_smis, "Both registers were smis.");
|
||||
#endif
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
ASSERT_EQ(0, Smi::FromInt(0));
|
||||
movq(kScratchRegister, Immediate(kSmiTagMask));
|
||||
and_(kScratchRegister, src1);
|
||||
testl(kScratchRegister, src2);
|
||||
j(not_zero, on_not_smis);
|
||||
// One operand is a smi.
|
||||
|
||||
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
|
||||
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
|
||||
subq(kScratchRegister, Immediate(1));
|
||||
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
|
||||
movq(dst, src1);
|
||||
xor_(dst, src2);
|
||||
and_(dst, kScratchRegister);
|
||||
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
|
||||
xor_(dst, src1);
|
||||
// If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool MacroAssembler::IsUnsafeSmi(Smi* value) {
|
||||
return false;
|
||||
}
|
||||
|
@ -126,6 +126,215 @@ class MacroAssembler: public Assembler {
|
||||
// Store the code object for the given builtin in the target register.
|
||||
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Smi tagging, untagging and operations on tagged smis.
|
||||
|
||||
// Conversions between tagged smi values and non-tagged integer values.
|
||||
|
||||
// Tag an integer value. The result must be known to be a valid smi value.
|
||||
// Only uses the low 32 bits of the src register.
|
||||
void Integer32ToSmi(Register dst, Register src);
|
||||
|
||||
// Tag an integer value if possible, or jump the integer value cannot be
|
||||
// represented as a smi. Only uses the low 32 bit of the src registers.
|
||||
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
|
||||
|
||||
// Adds constant to src and tags the result as a smi.
|
||||
// Result must be a valid smi.
|
||||
void Integer64AddToSmi(Register dst, Register src, int constant);
|
||||
|
||||
// Convert smi to 32-bit integer. I.e., not sign extended into
|
||||
// high 32 bits of destination.
|
||||
void SmiToInteger32(Register dst, Register src);
|
||||
|
||||
// Convert smi to 64-bit integer (sign extended if necessary).
|
||||
void SmiToInteger64(Register dst, Register src);
|
||||
|
||||
// Multiply a positive smi's integer value by a power of two.
|
||||
// Provides result as 64-bit integer value.
|
||||
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
|
||||
Register src,
|
||||
int power);
|
||||
|
||||
// Functions performing a check on a known or potential smi. Returns
|
||||
// a condition that is satisfied if the check is successful.
|
||||
|
||||
// Is the value a tagged smi.
|
||||
Condition CheckSmi(Register src);
|
||||
|
||||
// Is the value not a tagged smi.
|
||||
Condition CheckNotSmi(Register src);
|
||||
|
||||
// Is the value a positive tagged smi.
|
||||
Condition CheckPositiveSmi(Register src);
|
||||
|
||||
// Is the value not a positive tagged smi.
|
||||
Condition CheckNotPositiveSmi(Register src);
|
||||
|
||||
// Are both values are tagged smis.
|
||||
Condition CheckBothSmi(Register first, Register second);
|
||||
|
||||
// Is one of the values not a tagged smi.
|
||||
Condition CheckNotBothSmi(Register first, Register second);
|
||||
|
||||
// Is the value the minimum smi value (since we are using
|
||||
// two's complement numbers, negating the value is known to yield
|
||||
// a non-smi value).
|
||||
Condition CheckIsMinSmi(Register src);
|
||||
|
||||
// Check whether a tagged smi is equal to a constant.
|
||||
Condition CheckSmiEqualsConstant(Register src, int constant);
|
||||
|
||||
// Checks whether an 32-bit integer value is a valid for conversion
|
||||
// to a smi.
|
||||
Condition CheckInteger32ValidSmiValue(Register src);
|
||||
|
||||
// Test-and-jump functions. Typically combines a check function
|
||||
// above with a conditional jump.
|
||||
|
||||
// Jump if the value cannot be represented by a smi.
|
||||
void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
|
||||
|
||||
// Jump to label if the value is a tagged smi.
|
||||
void JumpIfSmi(Register src, Label* on_smi);
|
||||
|
||||
// Jump to label if the value is not a tagged smi.
|
||||
void JumpIfNotSmi(Register src, Label* on_not_smi);
|
||||
|
||||
// Jump to label if the value is not a positive tagged smi.
|
||||
void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
|
||||
|
||||
// Jump to label if the value is a tagged smi with value equal
|
||||
// to the constant.
|
||||
void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
|
||||
|
||||
// Jump if either or both register are not smi values.
|
||||
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
|
||||
|
||||
// Operations on tagged smi values.
|
||||
|
||||
// Smis represent a subset of integers. The subset is always equivalent to
|
||||
// a two's complement interpretation of a fixed number of bits.
|
||||
|
||||
// Optimistically adds an integer constant to a supposed smi.
|
||||
// If the src is not a smi, or the result is not a smi, jump to
|
||||
// the label.
|
||||
void SmiTryAddConstant(Register dst,
|
||||
Register src,
|
||||
int32_t constant,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Add an integer constant to a tagged smi, giving a tagged smi as result,
|
||||
// or jumping to a label if the result cannot be represented by a smi.
|
||||
// If the label is NULL, no testing on the result is done.
|
||||
void SmiAddConstant(Register dst,
|
||||
Register src,
|
||||
int32_t constant,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Subtract an integer constant from a tagged smi, giving a tagged smi as
|
||||
// result, or jumping to a label if the result cannot be represented by a smi.
|
||||
// If the label is NULL, no testing on the result is done.
|
||||
void SmiSubConstant(Register dst,
|
||||
Register src,
|
||||
int32_t constant,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Negating a smi can give a negative zero or too larget positive value.
|
||||
void SmiNeg(Register dst,
|
||||
Register src,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Adds smi values and return the result as a smi.
|
||||
// If dst is src1, then src1 will be destroyed, even if
|
||||
// the operation is unsuccessful.
|
||||
void SmiAdd(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
// Subtracts smi values and return the result as a smi.
|
||||
// If dst is src1, then src1 will be destroyed, even if
|
||||
// the operation is unsuccessful.
|
||||
void SmiSub(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
// Multiplies smi values and return the result as a smi,
|
||||
// if possible.
|
||||
// If dst is src1, then src1 will be destroyed, even if
|
||||
// the operation is unsuccessful.
|
||||
void SmiMul(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Divides one smi by another and returns the quotient.
|
||||
// Clobbers rax and rdx registers.
|
||||
void SmiDiv(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Divides one smi by another and returns the remainder.
|
||||
// Clobbers rax and rdx registers.
|
||||
void SmiMod(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Bitwise operations.
|
||||
void SmiNot(Register dst, Register src);
|
||||
void SmiAnd(Register dst, Register src1, Register src2);
|
||||
void SmiOr(Register dst, Register src1, Register src2);
|
||||
void SmiXor(Register dst, Register src1, Register src2);
|
||||
void SmiAndConstant(Register dst, Register src1, int constant);
|
||||
void SmiOrConstant(Register dst, Register src1, int constant);
|
||||
void SmiXorConstant(Register dst, Register src1, int constant);
|
||||
|
||||
void SmiShiftLeftConstant(Register dst,
|
||||
Register src,
|
||||
int shift_value,
|
||||
Label* on_not_smi_result);
|
||||
void SmiShiftLogicalRightConstant(Register dst,
|
||||
Register src,
|
||||
int shift_value,
|
||||
Label* on_not_smi_result);
|
||||
void SmiShiftArithmeticRightConstant(Register dst,
|
||||
Register src,
|
||||
int shift_value);
|
||||
|
||||
// Shifts a smi value to the left, and returns the result if that is a smi.
|
||||
// Uses and clobbers rcx, so dst may not be rcx.
|
||||
void SmiShiftLeft(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
// Shifts a smi value to the right, shifting in zero bits at the top, and
|
||||
// returns the unsigned intepretation of the result if that is a smi.
|
||||
// Uses and clobbers rcx, so dst may not be rcx.
|
||||
void SmiShiftLogicalRight(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smi_result);
|
||||
// Shifts a smi value to the right, sign extending the top, and
|
||||
// returns the signed intepretation of the result. That will always
|
||||
// be a valid smi value, since it's numerically smaller than the
|
||||
// original.
|
||||
// Uses and clobbers rcx, so dst may not be rcx.
|
||||
void SmiShiftArithmeticRight(Register dst,
|
||||
Register src1,
|
||||
Register src2);
|
||||
|
||||
// Specialized operations
|
||||
|
||||
// Select the non-smi register of two registers where exactly one is a
|
||||
// smi. If neither are smis, jump to the failure label.
|
||||
void SelectNonSmi(Register dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Label* on_not_smis);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Macro instructions
|
||||
|
||||
|
@ -163,8 +163,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
||||
ASSERT(!scratch.is(name));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(receiver, &miss);
|
||||
|
||||
// Get the map of the receiver and compute the hash.
|
||||
__ movl(scratch, FieldOperand(name, String::kLengthOffset));
|
||||
@ -204,8 +203,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
||||
Register scratch,
|
||||
Label* miss_label) {
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(receiver_reg, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss_label);
|
||||
__ JumpIfSmi(receiver_reg, miss_label);
|
||||
|
||||
// Check that the map of the object hasn't changed.
|
||||
__ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
|
||||
@ -275,8 +273,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
|
||||
Register scratch,
|
||||
Label* miss_label) {
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss_label);
|
||||
__ JumpIfSmi(receiver, miss_label);
|
||||
|
||||
// Check that the object is a JS array.
|
||||
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
|
||||
@ -296,8 +293,7 @@ static void GenerateStringCheck(MacroAssembler* masm,
|
||||
Label* smi,
|
||||
Label* non_string_object) {
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, smi);
|
||||
__ JumpIfSmi(receiver, smi);
|
||||
|
||||
// Check that the object is a string.
|
||||
__ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
|
||||
@ -325,7 +321,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
|
||||
// rcx is also the receiver.
|
||||
__ lea(rcx, Operand(scratch, String::kLongLengthShift));
|
||||
__ shr(rax); // rcx is implicit shift register.
|
||||
__ shl(rax, Immediate(kSmiTagSize));
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ ret(0);
|
||||
|
||||
// Check if the object is a JSValue wrapper.
|
||||
@ -535,8 +531,7 @@ static void CompileLoadInterceptor(Compiler* compiler,
|
||||
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss);
|
||||
__ JumpIfSmi(receiver, miss);
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
Register reg =
|
||||
@ -701,8 +696,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
if (check != NUMBER_CHECK) {
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rdx, &miss);
|
||||
}
|
||||
|
||||
// Make sure that it's okay not to patch the on stack receiver
|
||||
@ -738,8 +732,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
case NUMBER_CHECK: {
|
||||
Label fast;
|
||||
// Check that the object is a smi or a heap number.
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &fast);
|
||||
__ JumpIfSmi(rdx, &fast);
|
||||
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
|
||||
__ j(not_equal, &miss);
|
||||
__ bind(&fast);
|
||||
@ -830,8 +823,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
|
||||
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rdx, &miss);
|
||||
|
||||
// Do the right check and compute the holder register.
|
||||
Register reg =
|
||||
@ -841,8 +833,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
|
||||
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
|
||||
|
||||
// Check that the function really is a function.
|
||||
__ testl(rdi, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rdi, &miss);
|
||||
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
@ -899,8 +890,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
|
||||
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
|
||||
|
||||
// Check that the function really is a function.
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rax, &miss);
|
||||
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
@ -952,8 +942,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rdx, &miss);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
@ -1112,8 +1101,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
||||
// object which can only happen for contextual loads. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rax, &miss);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
@ -1335,8 +1323,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
|
||||
__ movq(rbx, Operand(rsp, 1 * kPointerSize));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(rbx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rbx, &miss);
|
||||
|
||||
// Check that the map of the object hasn't changed.
|
||||
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
|
||||
@ -1424,8 +1411,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
|
||||
__ movq(rbx, Operand(rsp, 1 * kPointerSize));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(rbx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
__ JumpIfSmi(rbx, &miss);
|
||||
|
||||
// Check that the map of the object hasn't changed.
|
||||
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
|
||||
@ -1631,8 +1617,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss);
|
||||
__ JumpIfSmi(receiver, miss);
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
Register reg =
|
||||
@ -1701,8 +1686,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss);
|
||||
__ JumpIfSmi(receiver, miss);
|
||||
|
||||
// Check the prototype chain.
|
||||
Register reg =
|
||||
@ -1724,8 +1708,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
// Check that the receiver isn't a smi.
|
||||
__ testl(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss);
|
||||
__ JumpIfSmi(receiver, miss);
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
Register reg =
|
||||
@ -1766,8 +1749,7 @@ Object* ConstructStubCompiler::CompileConstructStub(
|
||||
// Load the initial map and verify that it is in fact a map.
|
||||
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
// Will both indicate a NULL and a Smi.
|
||||
__ testq(rbx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &generic_stub_call);
|
||||
__ JumpIfSmi(rbx, &generic_stub_call);
|
||||
__ CmpObjectType(rbx, MAP_TYPE, rcx);
|
||||
__ j(not_equal, &generic_stub_call);
|
||||
|
||||
|
@ -65,8 +65,8 @@ void VirtualFrame::Enter() {
|
||||
#ifdef DEBUG
|
||||
// Verify that rdi contains a JS function. The following code
|
||||
// relies on rax being available for use.
|
||||
__ testl(rdi, Immediate(kSmiTagMask));
|
||||
__ Check(not_zero,
|
||||
Condition not_smi = masm()->CheckNotSmi(rdi);
|
||||
__ Check(not_smi,
|
||||
"VirtualFrame::Enter - rdi is not a function (smi check).");
|
||||
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
|
||||
__ Check(equal,
|
||||
|
@ -47,40 +47,40 @@ assertEquals(one / (minus_one * minus_one), 1, "one / 1");
|
||||
assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
|
||||
assertEquals(one / (zero / one), Infinity, "one / 0 II");
|
||||
|
||||
assertEquals(one / (minus_four % two), -Infinity, "foo");
|
||||
assertEquals(one / (minus_four % minus_two), -Infinity, "foo");
|
||||
assertEquals(one / (four % two), Infinity, "foo");
|
||||
assertEquals(one / (four % minus_two), Infinity, "foo");
|
||||
assertEquals(one / (minus_four % two), -Infinity, "foo1");
|
||||
assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
|
||||
assertEquals(one / (four % two), Infinity, "foo3");
|
||||
assertEquals(one / (four % minus_two), Infinity, "foo4");
|
||||
|
||||
// literal op variable
|
||||
|
||||
assertEquals(one / (0 * minus_one), -Infinity, "bar");
|
||||
assertEquals(one / (-1 * zero), -Infinity, "bar");
|
||||
assertEquals(one / (0 * zero), Infinity, "bar");
|
||||
assertEquals(one / (-1 * minus_one), 1, "bar");
|
||||
assertEquals(one / (0 * minus_one), -Infinity, "bar1");
|
||||
assertEquals(one / (-1 * zero), -Infinity, "bar2");
|
||||
assertEquals(one / (0 * zero), Infinity, "bar3");
|
||||
assertEquals(one / (-1 * minus_one), 1, "bar4");
|
||||
|
||||
assertEquals(one / (0 / minus_one), -Infinity, "baz");
|
||||
assertEquals(one / (0 / one), Infinity, "baz");
|
||||
assertEquals(one / (0 / minus_one), -Infinity, "baz1");
|
||||
assertEquals(one / (0 / one), Infinity, "baz2");
|
||||
|
||||
assertEquals(one / (-4 % two), -Infinity, "baz");
|
||||
assertEquals(one / (-4 % minus_two), -Infinity, "baz");
|
||||
assertEquals(one / (4 % two), Infinity, "baz");
|
||||
assertEquals(one / (4 % minus_two), Infinity, "baz");
|
||||
assertEquals(one / (-4 % two), -Infinity, "baz3");
|
||||
assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
|
||||
assertEquals(one / (4 % two), Infinity, "baz5");
|
||||
assertEquals(one / (4 % minus_two), Infinity, "baz6");
|
||||
|
||||
// variable op literal
|
||||
|
||||
assertEquals(one / (zero * -1), -Infinity, "fizz");
|
||||
assertEquals(one / (minus_one * 0), -Infinity, "fizz");
|
||||
assertEquals(one / (zero * 0), Infinity, "fizz");
|
||||
assertEquals(one / (minus_one * -1), 1, "fizz");
|
||||
assertEquals(one / (zero * -1), -Infinity, "fizz1");
|
||||
assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
|
||||
assertEquals(one / (zero * 0), Infinity, "fizz3");
|
||||
assertEquals(one / (minus_one * -1), 1, "fizz4");
|
||||
|
||||
assertEquals(one / (zero / -1), -Infinity, "buzz");
|
||||
assertEquals(one / (zero / 1), Infinity, "buzz");
|
||||
assertEquals(one / (zero / -1), -Infinity, "buzz1");
|
||||
assertEquals(one / (zero / 1), Infinity, "buzz2");
|
||||
|
||||
assertEquals(one / (minus_four % 2), -Infinity, "buzz");
|
||||
assertEquals(one / (minus_four % -2), -Infinity, "buzz");
|
||||
assertEquals(one / (four % 2), Infinity, "buzz");
|
||||
assertEquals(one / (four % -2), Infinity, "buzz");
|
||||
assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
|
||||
assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
|
||||
assertEquals(one / (four % 2), Infinity, "buzz5");
|
||||
assertEquals(one / (four % -2), Infinity, "buzz6");
|
||||
|
||||
// literal op literal
|
||||
|
||||
@ -91,10 +91,10 @@ assertEquals(one / (-1 * 0), -Infinity, "fisk3");
|
||||
assertEquals(one / (0 * 0), Infinity, "fisk4");
|
||||
assertEquals(one / (-1 * -1), 1, "fisk5");
|
||||
|
||||
assertEquals(one / (0 / -1), -Infinity, "hest");
|
||||
assertEquals(one / (0 / 1), Infinity, "hest");
|
||||
assertEquals(one / (0 / -1), -Infinity, "hest1");
|
||||
assertEquals(one / (0 / 1), Infinity, "hest2");
|
||||
|
||||
assertEquals(one / (-4 % 2), -Infinity, "fiskhest");
|
||||
assertEquals(one / (-4 % -2), -Infinity, "fiskhest");
|
||||
assertEquals(one / (4 % 2), Infinity, "fiskhest");
|
||||
assertEquals(one / (4 % -2), Infinity, "fiskhest");
|
||||
assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
|
||||
assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
|
||||
assertEquals(one / (4 % 2), Infinity, "fiskhest3");
|
||||
assertEquals(one / (4 % -2), Infinity, "fiskhest4");
|
||||
|
Loading…
Reference in New Issue
Block a user