Remove the last remnants of the TranscendentalCache.

It was only used for Math.log, and even then only in full code and in %_MathLog. For crankshafted code, Intel already used the FP operations directly, while the ARM/MIPS ports were a bit lazy and simply called the stub. The latter directly call the C library now without any cache. It would be possible to directly generate machine code if somebody has the time, from what I've seen out in the wild it should be only about a dozen instructions.

LOG=y
R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/113343003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18344 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
svenpanne@chromium.org 2013-12-18 10:40:26 +00:00
parent 37d5abc939
commit 84aa5263f3
46 changed files with 45 additions and 1444 deletions

View File

@ -1231,216 +1231,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in d2, double result goes
// into d2.
// Tagged case: tagged input on top of stack and in r0,
// tagged result (heap number) goes into r0.
Label input_not_smi;
Label loaded;
Label calculate;
Label invalid_cache;
const Register scratch0 = r9;
Register scratch1 = no_reg; // will be r4
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
// Argument is a number and is on stack and in r0.
// Load argument and check if it is a smi.
__ JumpIfNotSmi(r0, &input_not_smi);
// Input is a smi. Convert to double and load the low and high words
// of the double into r2, r3.
__ SmiToDouble(d7, r0);
__ vmov(r2, r3, d7);
__ b(&loaded);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
__ CheckMap(r0,
r1,
Heap::kHeapNumberMapRootIndex,
&calculate,
DONT_DO_SMI_CHECK);
// Input is a HeapNumber. Load it to a double register and store the
// low and high words into r2, r3.
__ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ vmov(r2, r3, d0);
} else {
// Input is untagged double in d2. Output goes to d2.
__ vmov(r2, r3, d2);
}
__ bind(&loaded);
// r2 = low 32 bits of double value
// r3 = high 32 bits of double value
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ eor(r1, r2, Operand(r3));
__ eor(r1, r1, Operand(r1, ASR, 16));
__ eor(r1, r1, Operand(r1, ASR, 8));
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
__ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
// r1 = TranscendentalCache::hash(double value).
Isolate* isolate = masm->isolate();
ExternalReference cache_array =
ExternalReference::transcendental_cache_array_address(isolate);
__ mov(cache_entry, Operand(cache_array));
// cache_entry points to cache array.
int cache_array_index
= type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
__ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
// r0 points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ cmp(cache_entry, Operand::Zero());
__ b(eq, &invalid_cache);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
CHECK_EQ(0, elem_in0 - elem_start);
CHECK_EQ(kIntSize, elem_in1 - elem_start);
CHECK_EQ(2 * kIntSize, elem_out - elem_start);
}
#endif
// Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
__ add(r1, r1, Operand(r1, LSL, 1));
__ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
// Check if cache matches: Double value is stored in uint32_t[2] array.
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
__ cmp(r2, r4);
__ cmp(r3, r5, eq);
__ b(ne, &calculate);
scratch1 = r4; // Start of scratch1 range.
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into r0.
__ pop();
__ mov(r0, Operand(r6));
} else {
// Load result into d2.
__ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
}
__ Ret();
__ bind(&calculate);
__ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
ExternalReference runtime_function =
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
// r0: precalculated cache entry address.
// r2 and r3: parts of the double value.
// Store r0, r2 and r3 on stack for later before calling C function.
__ Push(r3, r2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(d2);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
__ Pop(r3, r2, cache_entry);
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
__ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
__ Ret();
__ bind(&invalid_cache);
// The cache is invalid. Call runtime which will recreate the
// cache.
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ CallRuntime(RuntimeFunction(), 1);
}
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
__ bind(&skip_cache);
// Call C function to calculate the result and answer directly
// without updating the cache.
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(d2);
__ bind(&no_update);
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Allocate an aligned object larger than a HeapNumber.
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
__ mov(scratch0, Operand(4 * kPointerSize));
__ push(scratch0);
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
}
__ Ret();
}
}
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
Isolate* isolate = masm->isolate();
__ push(lr);
__ PrepareCallCFunction(0, 1, scratch);
if (masm->use_eabi_hardfloat()) {
__ vmov(d0, d2);
} else {
__ vmov(r0, r1, d2);
}
AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::LOG:
__ CallCFunction(ExternalReference::math_log_double_function(isolate),
0, 1);
break;
default:
UNIMPLEMENTED();
break;
}
__ pop(lr);
}
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
switch (type_) {
// Add more cases when necessary.
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
return Runtime::kAbort;
}
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;

View File

@ -37,30 +37,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)

View File

@ -37,15 +37,6 @@ namespace v8 {
namespace internal {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
return NULL;
}
#define __ masm.

View File

@ -3698,13 +3698,11 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
__ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(r0);
}

View File

@ -1224,9 +1224,10 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathLog* result = new(zone()) LMathLog(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), d0);
return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
}

View File

@ -488,10 +488,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};

View File

@ -1085,13 +1085,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ ldr(r0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
@ -2134,7 +2127,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ PrepareCallCFunction(0, 2, scratch0());
__ SetCallCDoubleArguments(left, right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
__ GetCFunctionDoubleResult(result);
@ -3941,13 +3934,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
// Set the context register to a GC-safe fake value. Clobbering it is
// OK because this instruction is marked as a call.
__ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ PrepareCallCFunction(0, 1, scratch0());
__ SetCallCDoubleArguments(ToDoubleRegister(instr->value()));
__ CallCFunction(ExternalReference::math_log_double_function(isolate()),
0, 1);
__ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
}

View File

@ -1093,13 +1093,6 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
}
ExternalReference ExternalReference::transcendental_cache_array_address(
Isolate* isolate) {
return ExternalReference(
isolate->transcendental_cache()->cache_array_address());
}
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
@ -1395,40 +1388,10 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
#endif // V8_INTERPRETED_REGEXP
static double add_two_doubles(double x, double y) {
return x + y;
}
static double sub_two_doubles(double x, double y) {
return x - y;
}
static double mul_two_doubles(double x, double y) {
return x * y;
}
static double div_two_doubles(double x, double y) {
return x / y;
}
static double mod_two_doubles(double x, double y) {
return modulo(x, y);
}
static double math_log_double(double x) {
return log(x);
}
ExternalReference ExternalReference::math_log_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_log_double),
FUNCTION_ADDR(log),
BUILTIN_FP_CALL));
}
@ -1533,12 +1496,6 @@ ExternalReference ExternalReference::power_double_int_function(
}
static int native_compare_doubles(double y, double x) {
if (x == y) return EQUAL;
return x < y ? LESS : GREATER;
}
bool EvalComparison(Token::Value op, double op1, double op2) {
ASSERT(Token::IsCompareOp(op));
switch (op) {
@ -1556,42 +1513,14 @@ bool EvalComparison(Token::Value op, double op1, double op2) {
}
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation, Isolate* isolate) {
typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL;
switch (operation) {
case Token::ADD:
function = &add_two_doubles;
break;
case Token::SUB:
function = &sub_two_doubles;
break;
case Token::MUL:
function = &mul_two_doubles;
break;
case Token::DIV:
function = &div_two_doubles;
break;
case Token::MOD:
function = &mod_two_doubles;
break;
default:
UNREACHABLE();
}
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(function),
FUNCTION_ADDR(modulo),
BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(native_compare_doubles),
BUILTIN_COMPARE_CALL));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));

View File

@ -718,7 +718,6 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference transcendental_cache_array_address(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@ -784,9 +783,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_high_promotion_mode_active_address(
Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
static ExternalReference compare_doubles(Isolate* isolate);
static ExternalReference mod_two_doubles_operation(Isolate* isolate);
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);

View File

@ -56,7 +56,6 @@ namespace internal {
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
V(TranscendentalCache) \
V(Instanceof) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \

View File

@ -89,7 +89,6 @@ namespace internal {
// generated code both in runtime and compiled code.
typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();

View File

@ -747,56 +747,6 @@ void Heap::CompletelyClearInstanceofCache() {
}
MaybeObject* TranscendentalCache::Get(Type type, double input) {
SubCache* cache = caches_[type];
if (cache == NULL) {
caches_[type] = cache = new SubCache(isolate_, type);
}
return cache->Get(input);
}
Address TranscendentalCache::cache_array_address() {
return reinterpret_cast<Address>(caches_);
}
double TranscendentalCache::SubCache::Calculate(double input) {
switch (type_) {
case LOG:
return fast_log(input);
default:
UNREACHABLE();
return 0.0; // Never happens.
}
}
MaybeObject* TranscendentalCache::SubCache::Get(double input) {
Converter c;
c.dbl = input;
int hash = Hash(c);
Element e = elements_[hash];
if (e.in[0] == c.integers[0] &&
e.in[1] == c.integers[1]) {
ASSERT(e.output != NULL);
isolate_->counters()->transcendental_cache_hit()->Increment();
return e.output;
}
double answer = Calculate(input);
isolate_->counters()->transcendental_cache_miss()->Increment();
Object* heap_number;
{ MaybeObject* maybe_heap_number =
isolate_->heap()->AllocateHeapNumber(answer);
if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
}
elements_[hash].in[0] = c.integers[0];
elements_[hash].in[1] = c.integers[1];
elements_[hash].output = heap_number;
return heap_number;
}
AlwaysAllocateScope::AlwaysAllocateScope() {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but

View File

@ -436,7 +436,6 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
{ AllowHeapAllocation for_the_first_part_of_prologue;
isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
@ -7755,29 +7754,6 @@ void Heap::GarbageCollectionGreedyCheck() {
#endif
TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
: type_(t),
isolate_(isolate) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
elements_[i].in[0] = in0;
elements_[i].in[1] = in1;
elements_[i].output = NULL;
}
}
void TranscendentalCache::Clear() {
for (int i = 0; i < kNumberOfCaches; i++) {
if (caches_[i] != NULL) {
delete caches_[i];
caches_[i] = NULL;
}
}
}
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {

View File

@ -2894,85 +2894,6 @@ class RegExpResultsCache {
};
class TranscendentalCache {
public:
enum Type { LOG, kNumberOfCaches};
static const int kTranscendentalTypeBits = 3;
STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
// Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument.
MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
// The cache contains raw Object pointers. This method disposes of
// them before a garbage collection.
void Clear();
private:
class SubCache {
static const int kCacheSize = 512;
explicit SubCache(Isolate* isolate, Type t);
MUST_USE_RESULT inline MaybeObject* Get(double input);
inline double Calculate(double input);
struct Element {
uint32_t in[2];
Object* output;
};
union Converter {
double dbl;
uint32_t integers[2];
};
inline static int Hash(const Converter& c) {
uint32_t hash = (c.integers[0] ^ c.integers[1]);
hash ^= static_cast<int32_t>(hash) >> 16;
hash ^= static_cast<int32_t>(hash) >> 8;
return (hash & (kCacheSize - 1));
}
Element elements_[kCacheSize];
Type type_;
Isolate* isolate_;
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
// Inline implementation of the cache.
friend class TranscendentalCacheStub;
// For evaluating value.
friend class TranscendentalCache;
DISALLOW_COPY_AND_ASSIGN(SubCache);
};
explicit TranscendentalCache(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
}
~TranscendentalCache() {
for (int i = 0; i < kNumberOfCaches; ++i) delete caches_[i];
}
// Used to create an external reference.
inline Address cache_array_address();
// Instantiation
friend class Isolate;
// Inline implementation of the caching.
friend class TranscendentalCacheStub;
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
Isolate* isolate_;
SubCache* caches_[kNumberOfCaches];
DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
};
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:

View File

@ -3880,7 +3880,7 @@ HInstruction* HUnaryMathOperation::New(
case kMathExp:
return H_CONSTANT_DOUBLE(fast_exp(d));
case kMathLog:
return H_CONSTANT_DOUBLE(fast_log(d));
return H_CONSTANT_DOUBLE(log(d));
case kMathSqrt:
return H_CONSTANT_DOUBLE(fast_sqrt(d));
case kMathPowHalf:

View File

@ -2682,10 +2682,6 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
break;
case kMathLog:
set_representation(Representation::Double());
// These operations use the TranscendentalCache, so they may allocate.
SetGVNFlag(kChangesNewSpacePromotion);
break;
case kMathExp:
case kMathSqrt:
case kMathPowHalf:
@ -5287,13 +5283,6 @@ class HCallStub V8_FINAL : public HUnaryCall {
HValue* context() { return value(); }
void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
transcendental_type_ = transcendental_type;
}
TranscendentalCache::Type transcendental_type() {
return transcendental_type_;
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CallStub)
@ -5301,12 +5290,10 @@ class HCallStub V8_FINAL : public HUnaryCall {
private:
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
major_key_(major_key),
transcendental_type_(TranscendentalCache::kNumberOfCaches) {
major_key_(major_key) {
}
CodeStub::Major major_key_;
TranscendentalCache::Type transcendental_type_;
};

View File

@ -10146,11 +10146,10 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
return ast_context()->ReturnInstruction(result, call->id());
}

View File

@ -695,228 +695,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
// esp[4]: tagged number input argument (should be number).
// esp[0]: return address.
// Output:
// eax: tagged double result.
// UNTAGGED case:
// Input::
// esp[0]: return address.
// xmm1: untagged double input argument
// Output:
// xmm1: untagged double result.
Label runtime_call;
Label runtime_call_clear_stack;
Label skip_cache;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
// Test that eax is a number.
Label input_not_smi;
Label loaded;
__ mov(eax, Operand(esp, kPointerSize));
__ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
__ sar(eax, 1);
__ sub(esp, Immediate(2 * kPointerSize));
__ mov(Operand(esp, 0), eax);
__ fild_s(Operand(esp, 0));
__ fst_d(Operand(esp, 0));
__ pop(edx);
__ pop(ebx);
__ jmp(&loaded, Label::kNear);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
__ cmp(ebx, Immediate(factory->heap_number_map()));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// low and high words into ebx, edx.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
__ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
__ bind(&loaded);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse4_scope(masm, SSE4_1);
__ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ pshufd(xmm0, xmm1, 0x1);
__ movd(edx, xmm0);
}
__ movd(ebx, xmm1);
}
// ST[0] or xmm1 == double value
// ebx = low 32 bits of double value
// edx = high 32 bits of double value
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
__ xor_(ecx, edx);
__ mov(eax, ecx);
__ sar(eax, 16);
__ xor_(ecx, eax);
__ mov(eax, ecx);
__ sar(eax, 8);
__ xor_(ecx, eax);
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
__ and_(ecx,
Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] or xmm1 == double value.
// ebx = low 32 bits of double value.
// edx = high 32 bits of double value.
// ecx = TranscendentalCache::hash(double value).
ExternalReference cache_array =
ExternalReference::transcendental_cache_array_address(masm->isolate());
__ mov(eax, Immediate(cache_array));
int cache_array_index =
type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ mov(eax, Operand(eax, cache_array_index));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ test(eax, eax);
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
CHECK_EQ(0, elem_in0 - elem_start);
CHECK_EQ(kIntSize, elem_in1 - elem_start);
CHECK_EQ(2 * kIntSize, elem_out - elem_start);
}
#endif
// Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
__ lea(ecx, Operand(ecx, ecx, times_2, 0));
__ lea(ecx, Operand(eax, ecx, times_4, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
Label cache_miss;
__ cmp(ebx, Operand(ecx, 0));
__ j(not_equal, &cache_miss, Label::kNear);
__ cmp(edx, Operand(ecx, kIntSize));
__ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->transcendental_cache_hit(), 1);
__ mov(eax, Operand(ecx, 2 * kIntSize));
if (tagged) {
__ fstp(0);
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
__ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
__ bind(&cache_miss);
__ IncrementCounter(counters->transcendental_cache_miss(), 1);
// Update cache with new value.
// We are short on registers, so use no_reg as scratch.
// This gives slightly larger code.
if (tagged) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
GenerateOperation(masm, type_);
__ mov(Operand(ecx, 0), ebx);
__ mov(Operand(ecx, kIntSize), edx);
__ mov(Operand(ecx, 2 * kIntSize), eax);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
__ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
__ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Allocate an unused object bigger than a HeapNumber.
__ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
}
__ Ret();
}
// Call runtime, doing whatever allocation and cleanup is necessary.
if (tagged) {
__ bind(&runtime_call_clear_stack);
__ fstp(0);
__ bind(&runtime_call);
ExternalReference runtime =
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
__ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
switch (type_) {
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
return Runtime::kAbort;
}
}
void TranscendentalCacheStub::GenerateOperation(
MacroAssembler* masm, TranscendentalCache::Type type) {
// Only free register is edi.
// Input value is on FP stack, and also in ebx/edx.
// Input value is possibly in xmm1.
// Address of result (a newly allocated HeapNumber) may be in eax.
ASSERT(type == TranscendentalCache::LOG);
__ fldln2();
__ fxch();
__ fyl2x();
}
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;

View File

@ -40,30 +40,6 @@ void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code);
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
static void GenerateOperation(MacroAssembler* masm,
TranscendentalCache::Type type);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
class StoreBufferOverflowStub: public PlatformCodeStub {
public:

View File

@ -57,48 +57,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
}
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
__ push(ebx);
__ push(edx);
__ push(edi);
__ fld_d(Operand(esp, 4 * kPointerSize));
__ mov(ebx, Operand(esp, 4 * kPointerSize));
__ mov(edx, Operand(esp, 5 * kPointerSize));
TranscendentalCacheStub::GenerateOperation(&masm, type);
// The return value is expected to be on ST(0) of the FPU stack.
__ pop(edi);
__ pop(edx);
__ pop(ebx);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(SSE2)) return &exp;
if (!FLAG_fast_math) return &exp;

View File

@ -3660,13 +3660,11 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
__ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(eax);
}

View File

@ -1349,12 +1349,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
@ -2259,7 +2253,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ movsd(Operand(esp, 0 * kDoubleSize), left);
__ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
ExternalReference::mod_two_doubles_operation(isolate()),
4);
// Return value is in st(0) on ia32.
@ -2303,7 +2297,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
ExternalReference::mod_two_doubles_operation(isolate()),
4);
// Return value is in st(0) on ia32.

View File

@ -1309,8 +1309,7 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
LMathLog* result = new(zone()) LMathLog(input);
return DefineSameAsFirst(result);
return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}

View File

@ -498,10 +498,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};

View File

@ -1532,7 +1532,6 @@ Isolate::Isolate()
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
transcendental_cache_(NULL),
memory_allocator_(NULL),
keyed_lookup_cache_(NULL),
context_slot_cache_(NULL),
@ -1769,8 +1768,6 @@ Isolate::~Isolate() {
delete keyed_lookup_cache_;
keyed_lookup_cache_ = NULL;
delete transcendental_cache_;
transcendental_cache_ = NULL;
delete stub_cache_;
stub_cache_ = NULL;
delete stats_table_;
@ -1935,7 +1932,6 @@ bool Isolate::Init(Deserializer* des) {
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
transcendental_cache_ = new TranscendentalCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();

View File

@ -870,10 +870,6 @@ class Isolate {
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
TranscendentalCache* transcendental_cache() const {
return transcendental_cache_;
}
MemoryAllocator* memory_allocator() {
return memory_allocator_;
}
@ -1270,7 +1266,6 @@ class Isolate {
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
TranscendentalCache* transcendental_cache_;
MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;

View File

@ -1273,224 +1273,6 @@ void BinaryOpICStub::InitializeInterfaceDescriptor(
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
// Tagged case: tagged input on top of stack and in a0,
// tagged result (heap number) goes into v0.
Label input_not_smi;
Label loaded;
Label calculate;
Label invalid_cache;
const Register scratch0 = t5;
const Register scratch1 = t3;
const Register cache_entry = a0;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
// Argument is a number and is on stack and in a0.
// Load argument and check if it is a smi.
__ JumpIfNotSmi(a0, &input_not_smi);
// Input is a smi. Convert to double and load the low and high words
// of the double into a2, a3.
__ sra(t0, a0, kSmiTagSize);
__ mtc1(t0, f4);
__ cvt_d_w(f4, f4);
__ Move(a2, a3, f4);
__ Branch(&loaded);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
__ CheckMap(a0,
a1,
Heap::kHeapNumberMapRootIndex,
&calculate,
DONT_DO_SMI_CHECK);
// Input is a HeapNumber. Store the
// low and high words into a2, a3.
__ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
__ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
} else {
// Input is untagged double in f4. Output goes to f4.
__ Move(a2, a3, f4);
}
__ bind(&loaded);
// a2 = low 32 bits of double value.
// a3 = high 32 bits of double value.
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ Xor(a1, a2, a3);
__ sra(t0, a1, 16);
__ Xor(a1, a1, t0);
__ sra(t0, a1, 8);
__ Xor(a1, a1, t0);
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
__ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
// a2 = low 32 bits of double value.
// a3 = high 32 bits of double value.
// a1 = TranscendentalCache::hash(double value).
__ li(cache_entry, Operand(
ExternalReference::transcendental_cache_array_address(
masm->isolate())));
// a0 points to cache array.
__ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
Isolate::Current()->transcendental_cache()->caches_[0])));
// a0 points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
CHECK_EQ(0, elem_in0 - elem_start);
CHECK_EQ(kIntSize, elem_in1 - elem_start);
CHECK_EQ(2 * kIntSize, elem_out - elem_start);
}
#endif
// Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
__ sll(t0, a1, 1);
__ Addu(a1, a1, t0);
__ sll(t0, a1, 2);
__ Addu(cache_entry, cache_entry, t0);
// Check if cache matches: Double value is stored in uint32_t[2] array.
__ lw(t0, MemOperand(cache_entry, 0));
__ lw(t1, MemOperand(cache_entry, 4));
__ lw(t2, MemOperand(cache_entry, 8));
__ Branch(&calculate, ne, a2, Operand(t0));
__ Branch(&calculate, ne, a3, Operand(t1));
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
counters->transcendental_cache_hit(), 1, scratch0, scratch1);
if (tagged) {
// Pop input value from stack and load result into v0.
__ Drop(1);
__ mov(v0, t2);
} else {
// Load result into f4.
__ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
}
__ Ret();
__ bind(&calculate);
__ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
__ bind(&invalid_cache);
__ TailCallExternalReference(ExternalReference(RuntimeFunction(),
masm->isolate()),
1,
1);
} else {
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
// a0: precalculated cache entry address.
// a2 and a3: parts of the double value.
// Store a0, a2 and a3 on stack for later before calling C function.
__ Push(a3, a2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
__ Pop(a3, a2, cache_entry);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
__ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
__ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
__ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, cache_entry);
__ bind(&invalid_cache);
// The cache is invalid. Call runtime which will recreate the
// cache.
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
__ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(a0);
__ CallRuntime(RuntimeFunction(), 1);
}
__ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
__ bind(&skip_cache);
// Call C function to calculate the result and answer directly
// without updating the cache.
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
__ bind(&no_update);
// We return the value in f4 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Allocate an aligned object larger than a HeapNumber.
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
__ li(scratch0, Operand(4 * kPointerSize));
__ push(scratch0);
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
}
__ Ret();
}
}
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
__ push(ra);
__ PrepareCallCFunction(2, scratch);
if (IsMipsSoftFloatABI) {
__ Move(a0, a1, f4);
} else {
__ mov_d(f12, f4);
}
AllowExternalCallThatCantCauseGC scope(masm);
Isolate* isolate = masm->isolate();
switch (type_) {
case TranscendentalCache::LOG:
__ CallCFunction(
ExternalReference::math_log_double_function(isolate),
0, 1);
break;
default:
UNIMPLEMENTED();
break;
}
__ pop(ra);
}
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
switch (type_) {
// Add more cases when necessary.
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
return Runtime::kAbort;
}
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;

View File

@ -38,30 +38,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)

View File

@ -37,15 +37,6 @@ namespace v8 {
namespace internal {
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
return NULL;
}
#define __ masm.

View File

@ -3748,14 +3748,11 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
__ CallStub(&stub);
__ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(v0);
}

View File

@ -1045,13 +1045,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ lw(a0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
@ -1945,7 +1938,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ PrepareCallCFunction(0, 2, scratch0());
__ SetCallCDoubleArguments(left, right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
__ GetCFunctionDoubleResult(result);
@ -3864,13 +3857,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
// Set the context register to a GC-safe fake value. Clobbering it is
// OK because this instruction is marked as a call.
__ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ PrepareCallCFunction(0, 1, scratch0());
__ SetCallCDoubleArguments(ToDoubleRegister(instr->value()));
__ CallCFunction(ExternalReference::math_log_double_function(isolate()),
0, 1);
__ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
}

View File

@ -1201,9 +1201,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), f4);
LMathLog* result = new(zone()) LMathLog(input);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
}

View File

@ -487,10 +487,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};

View File

@ -302,7 +302,6 @@ double fast_##name(double x) { \
return (*fast_##name##_function)(x); \
}
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
@ -527,7 +526,6 @@ void OS::PostSetUp() {
OS::memcopy_uint8_function =
CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
#endif
init_fast_log_function();
// fast_exp is initialized lazily.
init_fast_sqrt_function();
}

View File

@ -196,7 +196,6 @@ double fast_##name(double x) { \
return (*fast_##name##_function)(x); \
}
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())

View File

@ -96,7 +96,6 @@ namespace internal {
double modulo(double x, double y);
// Custom implementation of math functions.
double fast_log(double input);
double fast_exp(double input);
double fast_sqrt(double input);
// The custom exp implementation needs 16KB of lookup data; initialize it

View File

@ -7732,7 +7732,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
isolate->counters()->math_log()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->heap()->AllocateHeapNumber(fast_log(x));
return isolate->heap()->AllocateHeapNumber(log(x));
}

View File

@ -378,30 +378,10 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
17,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
UNCLASSIFIED,
18,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
UNCLASSIFIED,
19,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
UNCLASSIFIED,
20,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
UNCLASSIFIED,
21,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
UNCLASSIFIED,
22,
"mod_two_doubles");
Add(ExternalReference::compare_doubles(isolate).address(),
UNCLASSIFIED,
23,
"compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
UNCLASSIFIED,
@ -429,10 +409,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
29,
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
UNCLASSIFIED,
30,
"TranscendentalCache::caches()");
Add(ExternalReference::handle_scope_next_address(isolate).address(),
UNCLASSIFIED,
31,

View File

@ -248,8 +248,6 @@ namespace internal {
SC(math_pow, V8.MathPow) \
SC(math_round, V8.MathRound) \
SC(math_sqrt, V8.MathSqrt) \
SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \

View File

@ -597,220 +597,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
// rsp[8] : argument (should be number).
// rsp[0] : return address.
// Output:
// rax: tagged double result.
// UNTAGGED case:
// Input::
// rsp[0] : return address.
// xmm1 : untagged double input argument
// Output:
// xmm1 : untagged double result.
Label runtime_call;
Label runtime_call_clear_stack;
Label skip_cache;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
Label input_not_smi, loaded;
// Test that rax is a number.
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movq(rax, args.GetArgumentOperand(0));
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kDoubleSize));
__ Cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
__ fld_d(Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
__ jmp(&loaded, Label::kNear);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
__ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
__ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// bits into rbx.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rdx, rbx);
__ bind(&loaded);
} else { // UNTAGGED.
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
}
// ST[0] == double value, if TAGGED.
// rbx = bits of double value.
// rdx = also bits of double value.
// Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
// h = h0 = bits ^ (bits >> 32);
// h ^= h >> 16;
// h ^= h >> 8;
// h = h & (cacheSize - 1);
// or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
__ sar(rdx, Immediate(32));
__ xorl(rdx, rbx);
__ movl(rcx, rdx);
__ movl(rax, rdx);
__ movl(rdi, rdx);
__ sarl(rdx, Immediate(8));
__ sarl(rcx, Immediate(16));
__ sarl(rax, Immediate(24));
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
__ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] == double value.
// rbx = bits of double value.
// rcx = TranscendentalCache::hash(double value).
ExternalReference cache_array =
ExternalReference::transcendental_cache_array_address(masm->isolate());
__ Move(rax, cache_array);
int cache_array_index =
type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax);
__ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
// Two uint_32's and a pointer per element.
CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
static_cast<int>(elem2_start - elem_start));
CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
}
#endif
// Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
__ addl(rcx, rcx);
__ lea(rcx, Operand(rax, rcx, times_8, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
Label cache_miss;
__ cmpq(rbx, Operand(rcx, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->transcendental_cache_hit(), 1);
__ movq(rax, Operand(rcx, 2 * kIntSize));
if (tagged) {
__ fstp(0); // Clear FPU stack.
__ ret(kPointerSize);
} else { // UNTAGGED.
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
__ bind(&cache_miss);
__ IncrementCounter(counters->transcendental_cache_miss(), 1);
// Update cache with new value.
if (tagged) {
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
} else { // UNTAGGED.
__ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
}
GenerateOperation(masm, type_);
__ movq(Operand(rcx, 0), rbx);
__ movq(Operand(rcx, 2 * kIntSize), rax);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), xmm1);
__ fld_d(Operand(rsp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(rsp, 0));
__ movsd(xmm1, Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Allocate an unused object bigger than a HeapNumber.
__ Push(Smi::FromInt(2 * kDoubleSize));
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
}
__ Ret();
}
// Call runtime, doing whatever allocation and cleanup is necessary.
if (tagged) {
__ bind(&runtime_call_clear_stack);
__ fstp(0);
__ bind(&runtime_call);
__ TailCallExternalReference(
ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
} else { // UNTAGGED.
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(rax);
__ CallRuntime(RuntimeFunction(), 1);
}
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
}
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
switch (type_) {
// Add more cases when necessary.
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
return Runtime::kAbort;
}
}
void TranscendentalCacheStub::GenerateOperation(
MacroAssembler* masm, TranscendentalCache::Type type) {
// Registers:
// rax: Newly allocated HeapNumber, which must be preserved.
// rbx: Bits of input double. Must be preserved.
// rcx: Pointer to cache entry. Must be preserved.
// st(0): Input double
ASSERT(type == TranscendentalCache::LOG);
__ fldln2();
__ fxch();
__ fyl2x();
}
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;

View File

@ -37,31 +37,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
explicit TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
static void GenerateOperation(MacroAssembler* masm,
TranscendentalCache::Type type);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)

View File

@ -55,47 +55,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
}
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
// Move double input into registers.
__ push(rbx);
__ push(rdi);
__ movq(rbx, xmm0);
__ push(rbx);
__ fld_d(Operand(rsp, 0));
TranscendentalCacheStub::GenerateOperation(&masm, type);
// The return value is expected to be in xmm0.
__ fstp_d(Operand(rsp, 0));
__ pop(rbx);
__ movq(xmm0, rbx);
__ pop(rdi);
__ pop(rbx);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &exp;
size_t actual_size;

View File

@ -3628,13 +3628,11 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
__ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(rax);
}

View File

@ -968,12 +968,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
@ -1904,7 +1898,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
ExternalReference::mod_two_doubles_operation(isolate()), 2);
__ movaps(result, xmm_scratch);
break;
}

View File

@ -1223,8 +1223,7 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
LMathLog* result = new(zone()) LMathLog(input);
return DefineSameAsFirst(result);
return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}

View File

@ -489,10 +489,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};