Update V8 to version 3.0.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5920 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
kasperl@chromium.org 2010-12-07 09:11:56 +00:00
parent 209eb1c219
commit e5860bd6a8
331 changed files with 52186 additions and 2189 deletions

View File

@ -523,7 +523,8 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-O2']
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0']
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
'prof:oprofile': {
'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
@ -578,13 +579,14 @@ SAMPLE_FLAGS = {
'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
'CPPDEFINES': ['DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
'CCFLAGS': ['/MDd']
}
}
}

0
include/v8-debug.h Executable file → Normal file
View File

99
include/v8-testing.h Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_TEST_H_
#define V8_V8_TEST_H_
#include "v8.h"
#ifdef _WIN32
// Setup for Windows DLL export/import. See v8.h in this directory for
// information on how to build/use V8 as a DLL.
#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
build configuration to ensure that at most one of these is set
#endif
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __declspec(dllexport)
#elif USING_V8_SHARED
#define V8EXPORT __declspec(dllimport)
#else
#define V8EXPORT
#endif
#else // _WIN32
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif // _WIN32
/**
* Testing support for the V8 JavaScript engine.
*/
namespace v8 {
class V8EXPORT Testing {
public:
enum StressType {
kStressTypeOpt,
kStressTypeDeopt
};
/**
* Set the type of stressing to do. The default if not set is kStressTypeOpt.
*/
static void SetStressRunType(StressType type);
/**
* Get the number of runs of a given test that is required to get the full
* stress coverage.
*/
static int GetStressRuns();
/**
* Indicate the number of the run which is about to start. The value of run
* should be between 0 and one less than the result from GetStressRuns()
*/
static void PrepareStressRun(int run);
};
} // namespace v8
#undef V8EXPORT
#endif // V8_V8_TEST_H_

View File

@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <v8.h>
#include <v8-testing.h>
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
@ -44,10 +45,10 @@ v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args);
v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::TryCatch* handler);
void SetFlagsFromString(const char* flags);
int RunMain(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::HandleScope handle_scope;
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
@ -63,11 +64,11 @@ int RunMain(int argc, char* argv[]) {
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
// Create a new execution environment containing the built-in
// functions
v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
bool run_shell = (argc == 1);
for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
const char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
run_shell = true;
@ -99,12 +100,48 @@ int RunMain(int argc, char* argv[]) {
}
}
if (run_shell) RunShell(context);
context.Dispose();
return 0;
}
int main(int argc, char* argv[]) {
int result = RunMain(argc, argv);
// Figure out if we're requested to stress the optimization
// infrastructure by running tests multiple times and forcing
// optimization in the last run.
bool FLAG_stress_opt = false;
bool FLAG_stress_deopt = false;
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i], "--stress-opt") == 0) {
FLAG_stress_opt = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
FLAG_stress_deopt = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--noalways-opt") == 0) {
// No support for stressing if we can't use --always-opt.
FLAG_stress_opt = false;
FLAG_stress_deopt = false;
break;
}
}
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
int result = 0;
if (FLAG_stress_opt || FLAG_stress_deopt) {
v8::Testing::SetStressRunType(FLAG_stress_opt
? v8::Testing::kStressTypeOpt
: v8::Testing::kStressTypeDeopt);
int stress_runs = v8::Testing::GetStressRuns();
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Stress %d/%d ============\n",
i + 1, stress_runs);
v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv);
}
} else {
result = RunMain(argc, argv);
}
v8::V8::Dispose();
return result;
}
@ -221,6 +258,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
void RunShell(v8::Handle<v8::Context> context) {
printf("V8 version %s\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
while (true) {
char buffer[kBufferSize];
printf("> ");
@ -306,3 +345,8 @@ void ReportException(v8::TryCatch* try_catch) {
}
}
}
void SetFlagsFromString(const char* flags) {
v8::V8::SetFlagsFromString(flags, strlen(flags));
}

View File

@ -40,6 +40,7 @@ SOURCES = {
api.cc
assembler.cc
ast.cc
atomicops_internals_x86_gcc.cc
bignum.cc
bignum-dtoa.cc
bootstrapper.cc
@ -59,6 +60,7 @@ SOURCES = {
dateparser.cc
debug-agent.cc
debug.cc
deoptimizer.cc
disassembler.cc
diy-fp.cc
dtoa.cc
@ -76,10 +78,13 @@ SOURCES = {
hashmap.cc
heap-profiler.cc
heap.cc
hydrogen.cc
hydrogen-instructions.cc
ic.cc
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
lithium-allocator.cc
liveedit.cc
log-utils.cc
log.cc
@ -99,6 +104,8 @@ SOURCES = {
register-allocator.cc
rewriter.cc
runtime.cc
runtime-profiler.cc
safepoint-table.cc
scanner-base.cc
scanner.cc
scopeinfo.cc
@ -134,11 +141,14 @@ SOURCES = {
arm/constants-arm.cc
arm/cpu-arm.cc
arm/debug-arm.cc
arm/deoptimizer-arm.cc
arm/disasm-arm.cc
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
@ -172,11 +182,14 @@ SOURCES = {
ia32/codegen-ia32.cc
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/deoptimizer-ia32.cc
ia32/disasm-ia32.cc
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
ia32/register-allocator-ia32.cc
@ -192,6 +205,7 @@ SOURCES = {
x64/codegen-x64.cc
x64/cpu-x64.cc
x64/debug-x64.cc
x64/deoptimizer-x64.cc
x64/disasm-x64.cc
x64/frames-x64.cc
x64/full-codegen-x64.cc

View File

@ -28,8 +28,11 @@
#include "v8.h"
#include "accessors.h"
#include "ast.h"
#include "deoptimizer.h"
#include "execution.h"
#include "factory.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "top.h"
@ -503,11 +506,9 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
HandleScope scope;
Handle<SharedFunctionInfo> shared(function->shared());
if (!CompileLazyShared(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
return Smi::FromInt(shared->length());
Handle<JSFunction> handle(function);
if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
return Smi::FromInt(handle->shared()->length());
} else {
return Smi::FromInt(function->shared()->length());
}
@ -545,6 +546,208 @@ const AccessorDescriptor Accessors::FunctionName = {
// Accessors::FunctionArguments
//
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
if (slot_index >= 0) {
const int offset = JavaScriptFrameConstants::kLocal0Offset;
return frame->fp() + offset - (slot_index * kPointerSize);
} else {
const int offset = JavaScriptFrameConstants::kReceiverOffset;
return frame->caller_sp() + offset + (slot_index * kPointerSize);
}
}
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {
UNKNOWN,
TAGGED,
INT32,
DOUBLE,
LITERAL
};
SlotRef()
: addr_(NULL), representation_(UNKNOWN) { }
SlotRef(Address addr, SlotRepresentation representation)
: addr_(addr), representation_(representation) { }
explicit SlotRef(Object* literal)
: literal_(literal), representation_(LITERAL) { }
Handle<Object> GetValue() {
switch (representation_) {
case TAGGED:
return Handle<Object>(Memory::Object_at(addr_));
case INT32: {
int value = Memory::int32_at(addr_);
if (Smi::IsValid(value)) {
return Handle<Object>(Smi::FromInt(value));
} else {
return Factory::NewNumberFromInt(value);
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
return Factory::NewNumber(value);
}
case LITERAL:
return literal_;
default:
UNREACHABLE();
return Handle<Object>::null();
}
}
private:
Address addr_;
Handle<Object> literal_;
SlotRepresentation representation_;
};
static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
DeoptimizationInputData* data,
JavaScriptFrame* frame) {
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::BEGIN:
case Translation::FRAME:
// Peeled off before getting here.
break;
case Translation::ARGUMENTS_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
break;
case Translation::STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::TAGGED);
}
case Translation::INT32_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::INT32);
}
case Translation::DOUBLE_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::DOUBLE);
}
case Translation::LITERAL: {
int literal_index = iterator->Next();
return SlotRef(data->LiteralArray()->get(literal_index));
}
}
UNREACHABLE();
return SlotRef();
}
static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
int inlined_frame_index,
Vector<SlotRef>* args_slots) {
AssertNoAllocation no_gc;
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
USE(frame_count);
ASSERT(frame_count > inlined_frame_index);
int frames_to_skip = inlined_frame_index;
while (true) {
opcode = static_cast<Translation::Opcode>(it.Next());
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::FRAME) {
if (frames_to_skip == 0) {
// We reached frame corresponding to inlined function in question.
// Process translation commands for arguments.
// Skip translation command for receiver.
it.Skip(Translation::NumberOfOperandsFor(
static_cast<Translation::Opcode>(it.Next())));
// Compute slots for arguments.
for (int i = 0; i < args_slots->length(); ++i) {
(*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
}
return;
}
frames_to_skip--;
}
}
UNREACHABLE();
}
static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count);
ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
Handle<JSObject> arguments =
Factory::NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = Factory::NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue();
array->set(i, *value);
}
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
return *arguments;
}
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
HandleScope scope;
@ -554,38 +757,50 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Handle<JSFunction> function(holder);
// Find the top invocation of the function by traversing frames.
List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
// Skip all frames that aren't invocations of the given function.
JavaScriptFrame* frame = it.frame();
if (frame->function() != *function) continue;
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
// Skip all frames that aren't invocations of the given function.
if (functions[i] != *function) continue;
// If there is an arguments variable in the stack, we return that.
int index = function->shared()->scope_info()->
StackSlotIndex(Heap::arguments_symbol());
if (index >= 0) {
Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
if (!arguments->IsTheHole()) return *arguments;
if (i > 0) {
// Function in question was inlined.
return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
} else {
// If there is an arguments variable in the stack, we return that.
int index = function->shared()->scope_info()->
StackSlotIndex(Heap::arguments_symbol());
if (index >= 0) {
Handle<Object> arguments =
Handle<Object>(frame->GetExpression(index));
if (!arguments->IsTheHole()) return *arguments;
}
// If there isn't an arguments variable in the stack, we need to
// find the frame that holds the actual arguments passed to the
// function on the stack.
it.AdvanceToArgumentsFrame();
frame = it.frame();
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
const int length = frame->GetProvidedParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
return *arguments;
}
}
// If there isn't an arguments variable in the stack, we need to
// find the frame that holds the actual arguments passed to the
// function on the stack.
it.AdvanceToArgumentsFrame();
frame = it.frame();
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
const int length = frame->GetProvidedParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
return *arguments;
functions.Rewind(0);
}
// No frame corresponding to the given function found. Return null.
@ -613,19 +828,34 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (!found_it) return Heap::undefined_value();
Handle<JSFunction> function(holder);
// Find the top invocation of the function by traversing frames.
List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
// Skip all frames that aren't invocations of the given function.
if (it.frame()->function() != *function) continue;
// Once we have found the frame, we need to go to the caller
// frame. This may require skipping through a number of top-level
// frames, e.g. frames for scripts not functions.
while (true) {
it.Advance();
if (it.done()) return Heap::null_value();
JSFunction* caller = JSFunction::cast(it.frame()->function());
if (!caller->shared()->is_toplevel()) return caller;
JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
if (functions[i] == *function) {
// Once we have found the frame, we need to go to the caller
// frame. This may require skipping through a number of top-level
// frames, e.g. frames for scripts not functions.
if (i > 0) {
ASSERT(!functions[i - 1]->shared()->is_toplevel());
return functions[i - 1];
} else {
for (it.Advance(); !it.done(); it.Advance()) {
frame = it.frame();
functions.Rewind(0);
frame->GetFunctions(&functions);
if (!functions.last()->shared()->is_toplevel()) {
return functions.last();
}
ASSERT(functions.length() == 1);
}
if (it.done()) return Heap::null_value();
break;
}
}
}
functions.Rewind(0);
}
// No frame corresponding to the given function found. Return null.

View File

@ -78,13 +78,14 @@ class Accessors : public AllStatic {
MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
void*);
MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
Object* value,
void*);
Object* value,
void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionGetLength(Object* object, void*);
static MaybeObject* FunctionGetName(Object* object, void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
static MaybeObject* FunctionGetCaller(Object* object, void*);
MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
Object* value, void*);

View File

@ -33,6 +33,7 @@
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "heap-profiler.h"
@ -40,18 +41,21 @@
#include "parser.h"
#include "platform.h"
#include "profile-generator-inl.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "snapshot.h"
#include "top.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#define LOG_API(expr) LOG(ApiEntryCall(expr))
#ifdef ENABLE_VMSTATE_TRACKING
#define ENTER_V8 i::VMState __state__(i::OTHER)
#define ENTER_V8 ASSERT(i::V8::IsRunning()); i::VMState __state__(i::OTHER)
#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
#else
#define ENTER_V8 ((void) 0)
@ -97,6 +101,7 @@ namespace v8 {
} \
} while (false)
// --- D a t a t h a t i s s p e c i f i c t o a t h r e a d ---
@ -2312,6 +2317,11 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
// When turning on access checks for a global object deoptimize all functions
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*self);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
has_pending_exception = obj.is_null();
@ -2598,6 +2608,10 @@ void v8::Object::TurnOnAccessCheck() {
HandleScope scope;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
// When turning on access checks for a global object deoptimize all functions
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map =
i::Factory::CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
@ -3262,7 +3276,6 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
bool v8::V8::Initialize() {
if (i::V8::IsRunning()) return true;
ENTER_V8;
HandleScope scope;
if (i::Snapshot::Initialize()) return true;
return i::V8::Initialize(NULL);
@ -3386,6 +3399,7 @@ Persistent<Context> v8::Context::New(
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
}
i::RuntimeProfiler::Reset();
}
// Leave V8.
@ -4945,6 +4959,66 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
#endif // ENABLE_LOGGING_AND_PROFILING
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
void Testing::SetStressRunType(Testing::StressType type) {
internal::Testing::set_stress_type(type);
}
int Testing::GetStressRuns() {
#ifdef DEBUG
// In debug mode the code runs much slower so stressing will only make two
// runs.
return 2;
#else
return 5;
#endif
}
static void SetFlagsFromString(const char* flags) {
V8::SetFlagsFromString(flags, i::StrLength(flags));
}
void Testing::PrepareStressRun(int run) {
static const char* kLazyOptimizations =
"--prepare-always-opt --nolimit-inlining "
"--noalways-opt --noopt-eagerly";
static const char* kEagerOptimizations = "--opt-eagerly";
static const char* kForcedOptimizations = "--always-opt";
// If deoptimization stressed turn on frequent deoptimization. If no value
// is spefified through --deopt-every-n-times use a default default value.
static const char* kDeoptEvery13Times = "--deopt-every-n-times=13";
if (internal::Testing::stress_type() == Testing::kStressTypeDeopt &&
internal::FLAG_deopt_every_n_times == 0) {
SetFlagsFromString(kDeoptEvery13Times);
}
#ifdef DEBUG
// As stressing in debug mode only make two runs skip the deopt stressing
// here.
if (run == GetStressRuns() - 1) {
SetFlagsFromString(kForcedOptimizations);
} else {
SetFlagsFromString(kEagerOptimizations);
SetFlagsFromString(kLazyOptimizations);
}
#else
if (run == GetStressRuns() - 1) {
SetFlagsFromString(kForcedOptimizations);
} else if (run == GetStressRuns() - 2) {
SetFlagsFromString(kEagerOptimizations);
} else {
SetFlagsFromString(kLazyOptimizations);
}
#endif
}
namespace internal {

View File

@ -31,6 +31,8 @@
#include "apiutils.h"
#include "factory.h"
#include "../include/v8-testing.h"
namespace v8 {
// Constants used in the implementation of the API. The most natural thing
@ -489,6 +491,18 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
(!blocks_.is_empty() && prev_limit != NULL));
}
class Testing {
public:
static v8::Testing::StressType stress_type() { return stress_type_; }
static void set_stress_type(v8::Testing::StressType stress_type) {
stress_type_ = stress_type;
}
private:
static v8::Testing::StressType stress_type_;
};
} } // namespace v8::internal
#endif // V8_API_H_

View File

@ -110,6 +110,30 @@ Address* RelocInfo::target_reference_address() {
}
Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
return Handle<JSGlobalPropertyCell>(
reinterpret_cast<JSGlobalPropertyCell**>(address));
}
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
Object* object = HeapObject::FromAddress(
address - JSGlobalPropertyCell::kValueOffset);
return reinterpret_cast<JSGlobalPropertyCell*>(object);
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.

View File

@ -70,7 +70,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
#endif // def __arm__
void CpuFeatures::Probe() {
void CpuFeatures::Probe(bool portable) {
#ifndef __arm__
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
if (FLAG_enable_vfp3) {
@ -81,7 +81,7 @@ void CpuFeatures::Probe() {
supported_ |= 1u << ARMv7;
}
#else // def __arm__
if (Serializer::enabled()) {
if (portable && Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
return; // No features if we might serialize.
@ -98,6 +98,8 @@ void CpuFeatures::Probe() {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
if (!portable) found_by_runtime_probing_ = 0;
#endif
}
@ -318,7 +320,10 @@ static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this) {
: positions_recorder_(this),
allow_peephole_optimization_(false) {
// BUG(3245989): disable peephole optimization if crankshaft is enabled.
allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@ -987,6 +992,7 @@ void Assembler::b(int branch_offset, Condition cond) {
void Assembler::bl(int branch_offset, Condition cond) {
positions_recorder()->WriteRecordedPositions();
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
@ -1650,9 +1656,10 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
ASSERT(cond == al);
bkpt(0);
#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
svc(0x9f0001);
svc(0x9f0001, cond);
#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
#endif // def __arm__
}
@ -1826,13 +1833,18 @@ void Assembler::vldr(const DwVfpRegister dst,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
@ -1843,15 +1855,20 @@ void Assembler::vldr(const SwVfpRegister dst,
const Condition cond) {
// Sdst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
int sd, d;
dst.split_code(&sd, &d);
emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 |
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
}
@ -1862,13 +1879,18 @@ void Assembler::vstr(const DwVfpRegister src,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
@ -1879,15 +1901,20 @@ void Assembler::vstr(const SwVfpRegister src,
const Condition cond) {
// MEM(Rbase + offset) = SSrc.
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) |
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
int sd, d;
src.split_code(&sd, &d);
emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 |
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
}
@ -2411,7 +2438,7 @@ void Assembler::RecordDebugBreakSlot() {
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
if (FLAG_code_comments) {
CheckBuffer();
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
@ -2469,6 +2496,20 @@ void Assembler::GrowBuffer() {
}
void Assembler::db(uint8_t data) {
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {

View File

@ -69,7 +69,39 @@ namespace internal {
//
// Core register
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < 16; }
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 8;
static int ToAllocationIndex(Register reg) {
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
"r2",
"r3",
"r4",
"r5",
"r6",
"r7",
};
return names[index];
}
static Register from_code(int code) {
Register r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
@ -132,6 +164,48 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
// d0 has been excluded from allocation. This is following ia32
// where xmm0 is excluded. This should be revisited.
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 15;
static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
static DwVfpRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index + 1);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"d1",
"d2",
"d3",
"d4",
"d5",
"d6",
"d7",
"d8",
"d9",
"d10",
"d11",
"d12",
"d13",
"d14",
"d15"
};
return names[index];
}
static DwVfpRegister from_code(int code) {
DwVfpRegister r = { code };
return r;
}
// Supporting d0 to d15, can be later extended to d31.
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
@ -167,6 +241,9 @@ struct DwVfpRegister {
};
typedef DwVfpRegister DoubleRegister;
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
const SwVfpRegister s0 = { 0 };
@ -286,6 +363,9 @@ enum Coprocessor {
// Condition field in instructions.
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
nz = 1 << 28, // Z clear not zero.
@ -527,7 +607,7 @@ class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
static void Probe(bool portable);
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
@ -1148,15 +1228,20 @@ class Assembler : public Malloced {
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return pc_ - buffer_; }
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
bool can_peephole_optimize(int instructions) {
if (!FLAG_peephole_optimization) return false;
if (!allow_peephole_optimization_) return false;
if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
}
@ -1185,6 +1270,8 @@ class Assembler : public Malloced {
static bool IsLdrPcImmediateOffset(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries
void CheckConstPool(bool force_emit, bool require_jump);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@ -1201,9 +1288,6 @@ class Assembler : public Malloced {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
// Check if is time to emit a constant pool for pending reloc info entries
void CheckConstPool(bool force_emit, bool require_jump);
// Block the emission of the constant pool before pc_offset
void BlockConstPoolBefore(int pc_offset) {
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
@ -1317,6 +1401,7 @@ class Assembler : public Malloced {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
bool allow_peephole_optimization_;
friend class PositionsRecorder;
friend class EnsureSpace;
};

View File

@ -1,4 +1,4 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -31,6 +31,8 @@
#include "codegen-inl.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
namespace v8 {
@ -1089,6 +1091,80 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
__ EnterInternalFrame();
// Preserve the function.
__ push(r1);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyRecompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore saved function.
__ pop(r1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
__ EnterInternalFrame();
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
__ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it -> r6.
__ ldr(r6, MemOperand(sp, 0 * kPointerSize));
__ SmiUntag(r6);
// Switch on the state.
Label with_tos_register, unknown_state;
__ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
__ b(ne, &with_tos_register);
__ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
__ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
__ b(ne, &unknown_state);
__ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&unknown_state);
__ stop("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
__ stop("builtins-arm.cc: NotifyOSR");
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ stop("builtins-arm.cc: OnStackReplacement");
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments

View File

@ -82,12 +82,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// write barrier because the allocated object is in new space.
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
__ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
@ -1088,6 +1091,10 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
Label not_heap_number;
Register scratch = r7;
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
__ b(eq, &false_result);
// HeapNumber => false iff +0, -0, or NaN.
__ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@ -2200,6 +2207,14 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
}
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
UNIMPLEMENTED();
return Handle<Code>::null();
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Argument is a number and is on stack and in r0.
Label runtime_call;
@ -2641,7 +2656,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
__ LeaveExitFrame();
__ LeaveExitFrame(save_doubles_);
// check if we should retry or throw exception
Label retry;
@ -2690,7 +2705,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// builtin once.
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame();
__ EnterExitFrame(save_doubles_);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@ -2778,6 +2793,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Setup frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(r6, Operand(0, RelocInfo::NONE));
__ str(fp, MemOperand(r5), eq);
#endif
// Call a faked try-block that does the invoke.
__ bl(&invoke);
@ -2840,6 +2864,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// No need to restore registers
__ add(sp, sp, Operand(StackHandlerConstants::kSize));
#ifdef ENABLE_LOGGING_AND_PROFILING
// If current FP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(fp, Operand(r6));
__ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ str(r6, MemOperand(r5), eq);
#endif
__ bind(&exit); // r0 holds result
// Restore the top frame descriptors from the stack.
@ -3430,6 +3463,95 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &slowcase);
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
__ b(hi, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
// Allocate RegExpResult followed by FixedArray with size in ebx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
// Size of JSArray with two in-object properties and the header of a
// FixedArray.
int objects_size =
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
__ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
__ add(r2, r5, Operand(objects_size));
__ AllocateInNewSpace(
r2, // In: Size, in words.
r0, // Out: Start of allocation (tagged).
r3, // Scratch register.
r4, // Scratch register.
&slowcase,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// r0: Start of allocated area, object-tagged.
// r1: Number of elements in array, as smi.
// r5: Number of elements, untagged.
// Set JSArray map to global.regexp_result_map().
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
__ mov(r4, Operand(Factory::empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Set input, index and length fields from arguments.
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
__ ldr(r1, MemOperand(sp, kPointerSize * 1));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
__ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
// Fill out the elements FixedArray.
// r0: JSArray, tagged.
// r3: FixedArray, tagged.
// r5: Number of elements in array, untagged.
// Set map.
__ mov(r2, Operand(Factory::fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
__ mov(r2, Operand(Factory::the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Fill fixed array elements with hole.
// r0: JSArray, tagged.
// r2: the hole.
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
__ tst(r5, Operand(r5));
__ bind(&loop);
__ b(le, &done); // Jump if r1 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
__ jmp(&loop);
__ bind(&done);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ bind(&slowcase);
__ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
@ -4722,6 +4844,123 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &miss);
if (GetCondition() == eq) {
// For equality we do not care about the sign of the result.
__ sub(r0, r0, r1, SetCC);
} else {
__ sub(r1, r1, r0, SetCC);
// Correct sign of result in case of overflow.
__ rsb(r1, r1, Operand(0), SetCC, vs);
__ mov(r0, r1);
}
__ Ret();
__ bind(&miss);
GenerateMiss(masm);
}
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered;
Label miss;
__ and_(r2, r1, Operand(r0));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load left and right operand
__ sub(r2, r1, Operand(kHeapObjectTag));
__ vldr(d0, r2, HeapNumber::kValueOffset);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vldr(d1, r2, HeapNumber::kValueOffset);
// Compare operands
__ vcmp(d0, d1);
__ vmrs(pc); // Move vector status bits to normal status bits.
// Don't base result on status bits when a NaN is involved.
__ b(vs, &unordered);
// Return a result of -1, 0, or 1, based on status bits.
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
__ mov(r0, Operand(LESS), LeaveCC, lt);
__ mov(r0, Operand(GREATER), LeaveCC, gt);
__ Ret();
__ bind(&unordered);
}
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&miss);
GenerateMiss(masm);
}
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ and_(r2, r1, Operand(r0));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &miss);
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
ASSERT(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
__ Ret();
__ bind(&miss);
GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r0);
__ push(lr);
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
__ EnterInternalFrame();
__ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
__ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
__ pop(r0);
__ pop(r1);
__ Jump(r2);
}
#undef __
} } // namespace v8::internal

View File

@ -106,9 +106,9 @@ class GenericBinaryOpStub : public CodeStub {
// Minor key encoding in 17 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class TypeInfoBits: public BitField<int, 8, 2> {};
class RegisterBits: public BitField<bool, 10, 1> {};
class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
class TypeInfoBits: public BitField<int, 8, 3> {};
class RegisterBits: public BitField<bool, 11, 1> {};
class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
@ -196,6 +196,10 @@ class GenericBinaryOpStub : public CodeStub {
const char* GetName();
virtual void FinishCode(Code* code) {
code->set_binary_op_type(runtime_operands_type_);
}
#ifdef DEBUG
void Print() {
if (!specialized_on_rhs_) {

View File

@ -36,7 +36,7 @@
#include "debug.h"
#include "ic-inl.h"
#include "jsregexp.h"
#include "jump-target-light-inl.h"
#include "jump-target-inl.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
@ -79,12 +79,12 @@ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveInternalFrame();
}
@ -165,6 +165,9 @@ void CodeGenerator::Generate(CompilationInfo* info) {
int slots = scope()->num_parameters() + scope()->num_stack_slots();
ScopedVector<TypeInfo> type_info_array(slots);
for (int i = 0; i < slots; i++) {
type_info_array[i] = TypeInfo::Unknown();
}
type_info_ = &type_info_array;
ASSERT(allocator_ == NULL);
@ -5416,97 +5419,14 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
// No stub. This code only occurs a few times in regexp.js.
const int kMaxInlineLength = 100;
ASSERT_EQ(3, args->length());
Load(args->at(0)); // Size of array, smi.
Load(args->at(1)); // "index" property value.
Load(args->at(2)); // "input" property value.
{
VirtualFrame::SpilledScope spilled_scope(frame_);
Label slowcase;
Label done;
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &slowcase);
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
__ b(hi, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
// Allocate RegExpResult followed by FixedArray with size in ebx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
// Size of JSArray with two in-object properties and the header of a
// FixedArray.
int objects_size =
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
__ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
__ add(r2, r5, Operand(objects_size));
__ AllocateInNewSpace(
r2, // In: Size, in words.
r0, // Out: Start of allocation (tagged).
r3, // Scratch register.
r4, // Scratch register.
&slowcase,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// r0: Start of allocated area, object-tagged.
// r1: Number of elements in array, as smi.
// r5: Number of elements, untagged.
// Set JSArray map to global.regexp_result_map().
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
__ mov(r4, Operand(Factory::empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Set input, index and length fields from arguments.
__ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
__ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
__ add(sp, sp, Operand(kPointerSize));
__ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
__ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
// Fill out the elements FixedArray.
// r0: JSArray, tagged.
// r3: FixedArray, tagged.
// r5: Number of elements in array, untagged.
// Set map.
__ mov(r2, Operand(Factory::fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
__ mov(r2, Operand(Factory::the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Fill fixed array elements with hole.
// r0: JSArray, tagged.
// r2: the hole.
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
__ tst(r5, Operand(r5));
__ bind(&loop);
__ b(le, &done); // Jump if r1 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
__ jmp(&loop);
__ bind(&slowcase);
__ CallRuntime(Runtime::kRegExpConstructResult, 3);
__ bind(&done);
}
frame_->Forget(3);
RegExpConstructResultStub stub;
frame_->SpillAll();
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
}

View File

@ -209,6 +209,9 @@ class CodeGenerator: public AstVisitor {
Code::Flags flags,
CompilationInfo* info);
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@ -305,8 +308,9 @@ class CodeGenerator: public AstVisitor {
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitSlot(Slot* node);
#define DEF_VISIT(type) \
void Visit##type(type* node);
virtual void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
@ -579,6 +583,7 @@ class CodeGenerator: public AstVisitor {
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
friend class LCodeGen;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

View File

@ -42,7 +42,10 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
CpuFeatures::Probe();
CpuFeatures::Probe(true);
if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
}

503
src/arm/deoptimizer-arm.cc Normal file
View File

@ -0,0 +1,503 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
int Deoptimizer::table_entry_size_ = 16;
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
// Get the optimized code.
Code* code = function->code();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert an absolute call to the
// corresponding deoptimization entry.
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
int deoptimization_index = table.GetDeoptimizationIndex(i);
int gap_code_size = table.GetGapCodeSize(i);
// Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->bkpt(0);
}
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
const int kCallInstructionSizeInWords = 3;
CodePatcher patcher(code->instruction_start() + pc_offset + gap_code_size,
kCallInstructionSizeInWords);
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
deoptimization_index, Deoptimizer::LAZY);
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
last_pc_offset +=
gap_code_size + kCallInstructionSizeInWords * Assembler::kInstrSize;
}
}
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
(code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->bkpt(0);
}
#endif
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
node->set_next(deoptimizing_code_list_);
deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
}
}
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
Code* replacement_code) {
UNIMPLEMENTED();
}
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
UNIMPLEMENTED();
}
void Deoptimizer::DoComputeOsrOutputFrame() {
UNIMPLEMENTED();
}
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
ASSERT(frame_index >= 0 && frame_index < output_count_);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
// 2 = context and function in the frame.
top_address =
input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
uint32_t value;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetFrameSlot(output_offset, value);
unsigned fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) {
output_frame->SetRegister(fp.code(), fp_value);
}
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the function so long as we don't
// optimize functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(function->context());
// The context for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
}
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(function);
// The function for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
ASSERT(0 == output_offset);
// Compute this frame's PC, state, and continuation.
Code* non_optimized_code = function->shared()->code();
FixedArray* raw_data = non_optimized_code->deoptimization_data();
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
Address start = non_optimized_code->instruction_start();
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
output_frame->SetPc(pc_value);
if (is_topmost) {
output_frame->SetRegister(pc.code(), pc_value);
}
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = (bailout_type_ == EAGER)
? Builtins::builtin(Builtins::NotifyDeoptimized)
: Builtins::builtin(Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
if (output_count_ - 1 == frame_index) iterator->Done();
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// TOS: bailout-id; TOS+1: return address if not EAGER.
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
// Save all general purpose registers before messing with them.
__ sub(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ vstr(vfp_reg, sp, offset);
}
// Push all 16 registers (needed to populate FrameDescription::registers_).
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object if possible (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
if (type() == EAGER) {
__ mov(r3, Operand(0));
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ mov(r3, lr);
// Correct two words for bailout id and return address.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
}
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(5, r5);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
// Call Deoptimizer::New().
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kIntSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
if (type() == EAGER) {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
}
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
__ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
__ add(r2, r2, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
__ cmp(r2, sp);
__ b(ne, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r0); // Preserve deoptimizer object across call.
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: r0 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
__ add(r1, r0, Operand(r1, LSL, 2));
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r0, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
// __ add(r6, r2, Operand(r3, LSL, 1));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
__ cmp(r3, Operand(0));
__ b(ne, &inner_push_loop); // test for gt?
__ add(r0, r0, Operand(kPointerSize));
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
UNIMPLEMENTED();
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
__ push(r6);
}
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
__ push(r6);
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kIntSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
__ pop(ip); // remove sp
__ pop(ip); // remove lr
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ mov(r10, Operand(roots_address));
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(r7);
__ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries. Note that any
// registers may be still live.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
if (type() == EAGER) {
__ nop();
} else {
// Emulate ia32 like call by pushing return address to stack.
__ push(lr);
}
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
}
#undef __
} } // namespace v8::internal

View File

@ -38,7 +38,12 @@ namespace internal {
Address ExitFrame::ComputeStackPointer(Address fp) {
return fp + ExitFrameConstants::kSPOffset;
Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
Address sp = fp + ExitFrameConstants::kSPOffset;
if (marker == NULL) {
sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
}
return sp;
}

View File

@ -74,6 +74,18 @@ static const RegList kCalleeSaved =
static const int kNumCalleeSaved = 7 + kR9Available;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
static const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
static const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
@ -99,7 +111,9 @@ class ExitFrameConstants : public AllStatic {
static const int kCodeOffset = -1 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
// TODO(regis): Use a patched sp value on the stack instead.
// A marker of 0 indicates that double registers are saved.
static const int kMarkerOffset = -2 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;

View File

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -171,23 +171,22 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
}
// Check the stack for overflow or break request.
{ Comment cmnt(masm_, "[ Stack check");
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(r2));
StackCheckStub stub;
__ mov(ip,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET),
LeaveCC,
lo);
__ Call(ip, lo);
}
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
// Check the stack for overflow or break request.
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailout(info->function(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
VisitStatements(function()->body());
@ -200,6 +199,25 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
// Force emit the constant pool, so it doesn't get emitted in the middle
// of the stack check table.
masm()->CheckConstPool(true, false);
}
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
RecordStackCheck(stmt->OsrEntryId());
}
@ -275,6 +293,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(true_label_, false_label_, fall_through_);
}
@ -297,12 +316,16 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
true,
true_label_,
false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
__ b(false_label_);
if (false_label_ != fall_through_) __ b(false_label_);
} else if (index == Heap::kTrueValueRootIndex) {
__ b(true_label_);
if (true_label_ != fall_through_) __ b(true_label_);
} else {
__ LoadRoot(result_register(), index);
codegen()->DoTest(true_label_, false_label_, fall_through_);
@ -321,29 +344,34 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates can be pushed directly.
// Immediates cannot be pushed directly.
__ mov(result_register(), Operand(lit));
__ push(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
true,
true_label_,
false_label_);
ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
__ b(false_label_);
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
__ b(true_label_);
if (true_label_ != fall_through_) __ b(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
if (false_label_ != fall_through_) __ b(false_label_);
__ b(false_label_);
} else {
__ b(true_label_);
if (true_label_ != fall_through_) __ b(true_label_);
}
} else if (lit->IsSmi()) {
if (Smi::cast(*lit)->value() == 0) {
__ b(false_label_);
if (false_label_ != fall_through_) __ b(false_label_);
} else {
__ b(true_label_);
if (true_label_ != fall_through_) __ b(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
@ -383,13 +411,14 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(true_label_, false_label_, fall_through_);
}
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
ASSERT_EQ(materialize_true, materialize_false);
ASSERT(materialize_true == materialize_false);
__ bind(materialize_true);
}
@ -424,8 +453,8 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
Label* materialize_false) const {
ASSERT(materialize_false == false_label_);
ASSERT(materialize_true == true_label_);
ASSERT(materialize_false == false_label_);
}
@ -449,6 +478,10 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
true,
true_label_,
false_label_);
if (flag) {
if (true_label_ != fall_through_) __ b(true_label_);
} else {
@ -529,6 +562,33 @@ void FullCodeGenerator::Move(Slot* dst,
}
void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
bool should_normalize,
Label* if_true,
Label* if_false) {
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
if (!context()->IsTest() || !info_->IsOptimizable()) return;
Label skip;
if (should_normalize) __ b(&skip);
ForwardBailoutStack* current = forward_bailout_stack_;
while (current != NULL) {
PrepareForBailout(current->expr(), state);
current = current->parent();
}
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, NULL);
__ bind(&skip);
}
}
void FullCodeGenerator::EmitDeclaration(Variable* variable,
Variable::Mode mode,
FunctionLiteral* function) {
@ -651,6 +711,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@ -716,6 +778,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
__ bind(nested_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
}
@ -830,25 +893,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
EmitAssignment(stmt->each());
// Generate code for the body of the loop.
Label stack_limit_hit, stack_check_done;
Visit(stmt->body());
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_done);
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_target());
__ pop(r0);
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
__ b(&loop);
// Slow case for the stack limit check.
StackCheckStub stack_check_stub;
__ bind(&stack_limit_hit);
__ CallStub(&stack_check_stub);
__ b(&stack_check_done);
EmitStackCheck(stmt);
__ b(&loop);
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_target());
@ -1195,12 +1250,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
@ -1295,6 +1353,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Update the write barrier for the array store with r0 as the scratch
// register.
__ RecordWrite(r1, Operand(offset), r2, result_register());
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
if (result_saved) {
@ -1341,13 +1401,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
if (property->is_arguments_access()) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
__ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
__ push(r0);
__ mov(r0, Operand(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
}
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
if (property->is_arguments_access()) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
__ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
__ mov(r0, Operand(property->key()->AsLiteral()->handle()));
__ Push(r1, r0);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
}
}
break;
}
@ -1367,6 +1441,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
// For property compound assignments we need another deoptimization
// point after the property load.
if (property != NULL) {
PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
}
Token::Value op = expr->binary_op();
ConstantOperand constant = ShouldInlineSmiCase(op)
? GetConstantOperand(op, expr->target(), expr->value())
@ -1392,6 +1472,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} else {
EmitBinaryOp(op, mode);
}
// Deoptimization point in case the binary operation may have side effects.
PrepareForBailout(expr->binary_operation(), TOS_REG);
} else {
VisitForAccumulatorValue(expr->value());
}
@ -1676,13 +1759,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ pop(r1);
EmitKeyedPropertyLoad(expr);
context()->Plug(r0);
}
context()->Plug(r0);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
@ -1703,6 +1787,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(r0);
@ -1736,6 +1821,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
EmitCallIC(ic, mode);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0); // Drop the key still on the stack.
@ -1756,6 +1842,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
@ -1763,6 +1850,12 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
// through this function. Avoid early returns.
expr->return_is_recorded_ = false;
#endif
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@ -1814,6 +1907,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
@ -1918,6 +2012,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Emit function call.
EmitCallWithStub(expr);
}
#ifdef DEBUG
// RecordJSReturnSite should have been called.
ASSERT(expr->return_is_recorded_);
#endif
}
@ -1965,8 +2064,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_true);
__ b(if_false);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask));
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@ -1984,6 +2084,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
Split(eq, if_true, if_false, fall_through);
@ -2016,6 +2117,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, if_false);
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2036,6 +2138,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2058,6 +2161,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ne, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2081,6 +2185,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ jmp(if_false);
context()->Plug(if_true, if_false);
}
@ -2100,6 +2205,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2120,6 +2226,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2140,6 +2247,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2171,6 +2279,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2193,6 +2302,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(r1);
__ cmp(r0, r1);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@ -2646,11 +2756,12 @@ void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
RegExpConstructResultStub stub;
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kRegExpConstructResult, 3);
__ CallStub(&stub);
context()->Plug(r0);
}
@ -2769,9 +2880,8 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
__ b(eq, if_true);
__ b(if_false);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@ -2894,6 +3004,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
break;
@ -3013,14 +3124,25 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(r0);
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
if (prop->is_arguments_access()) {
VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
__ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
__ push(r0);
__ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
}
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
EmitKeyedPropertyLoad(prop);
}
}
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
PrepareForBailout(expr->increment(), TOS_REG);
// Call ToNumber only if operand is not a smi.
Label no_conversion;
__ BranchOnSmi(r0, &no_conversion);
@ -3063,6 +3185,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
__ mov(r1, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
__ CallStub(&stub);
__ bind(&done);
@ -3129,6 +3255,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
// Use a regular load, not a contextual load, to avoid a reference
// error.
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL &&
proxy->var()->AsSlot() != NULL &&
@ -3144,12 +3271,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ mov(r0, Operand(proxy->name()));
__ Push(cp, r0);
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
Visit(expr);
context()->HandleExpression(expr);
}
}
@ -3174,6 +3302,8 @@ bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(left_unary->expression());
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
if (check->Equals(Heap::number_symbol())) {
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, if_true);
@ -3277,6 +3407,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_JS);
PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
@ -3286,6 +3417,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
InstanceofStub stub;
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
// The stub returns 0 for true.
__ tst(r0, r0);
Split(eq, if_true, if_false, fall_through);
@ -3344,6 +3476,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
: NO_COMPARE_FLAGS;
CompareStub stub(cc, strict, flags, r1, r0);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
Split(cc, if_true, if_false, fall_through);
}
@ -3365,6 +3498,7 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
if (expr->is_strict()) {

View File

@ -907,6 +907,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Returns the code marker, or the 0 if the code is not marked.
static inline int InlinedICSiteMarker(Address address,
Address* inline_end_address) {
if (V8::UseCrankshaft()) return false;
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
@ -940,6 +942,8 @@ static inline int InlinedICSiteMarker(Address address,
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// Find the end of the inlined code for handling the load if this is an
// inlined IC call site.
Address inline_end_address;
@ -1019,6 +1023,8 @@ bool LoadIC::PatchInlinedContextualLoad(Address address,
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// Find the end of the inlined code for the store if there is an
// inlined version of the store.
Address inline_end_address;
@ -1069,6 +1075,8 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
if (V8::UseCrankshaft()) return false;
Address inline_end_address;
if (InlinedICSiteMarker(address, &inline_end_address)
!= Assembler::PROPERTY_ACCESS_INLINED) {
@ -1087,6 +1095,8 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
if (V8::UseCrankshaft()) return false;
// Find the end of the inlined code for handling the store if this is an
// inlined IC call site.
Address inline_end_address;
@ -1315,7 +1325,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateFast(masm);
__ Ret();
ICRuntimeCallHelper call_helper;
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
@ -2307,9 +2317,72 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
}
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
__ Push(r1, r2, r0);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
#undef __
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return eq;
case Token::LT:
return lt;
case Token::GT:
// Reverse left and right operands to obtain ECMA-262 conversion order.
return lt;
case Token::LTE:
// Reverse left and right operands to obtain ECMA-262 conversion order.
return ge;
case Token::GTE:
return ge;
default:
UNREACHABLE();
return no_condition;
}
}
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
#ifdef DEBUG
State previous_state = GetState();
#endif
State state = TargetState(x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
rewritten = stub.GetCode();
}
set_target(*rewritten);
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC (%s->%s)#%s]\n",
GetStateName(previous_state),
GetStateName(state),
Token::Name(op_));
}
#endif
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

2081
src/arm/lithium-arm.cc Normal file

File diff suppressed because it is too large Load Diff

2068
src/arm/lithium-arm.h Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,265 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: chunk_(chunk),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
enum Status {
UNUSED,
GENERATING,
DONE,
ABORTED
};
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
Handle<String> class_name,
Register input,
Register temporary,
Register temporary2);
int StackSlotCount() const { return chunk()->spill_slot_count(); }
int ParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateSafepointTable();
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr);
void LoadPrototype(Register result, Handle<JSObject> prototype);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
void RecordPosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen), external_exit_(NULL) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
};
} } // namespace v8::internal
#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_

View File

@ -1,4 +1,4 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -171,13 +171,6 @@ void MacroAssembler::Ret(Condition cond) {
}
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
LoadRoot(ip, Heap::kStackLimitRootIndex);
cmp(sp, Operand(ip));
b(lo, on_stack_overflow);
}
void MacroAssembler::Drop(int count, Condition cond) {
if (count > 0) {
add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
@ -447,6 +440,34 @@ void MacroAssembler::RecordWrite(Register object,
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0:
ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ASSERT(num_unsaved >= 0);
sub(sp, sp, Operand(num_unsaved * kPointerSize));
stm(db_w, sp, kSafepointSavedRegisters);
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ldm(ia_w, sp, kSafepointSavedRegisters);
add(sp, sp, Operand(num_unsaved * kPointerSize));
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
@ -515,12 +536,8 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
void MacroAssembler::EnterExitFrame() {
// Compute the argv pointer and keep it in a callee-saved register.
void MacroAssembler::EnterExitFrame(bool save_doubles) {
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
sub(r6, r6, Operand(kPointerSize));
// Compute callee's stack pointer before making changes and save it as
// ip register so that it is restored as sp register on exit, thereby
// popping the args.
@ -528,6 +545,9 @@ void MacroAssembler::EnterExitFrame() {
// ip = sp + kPointerSize * #args;
add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
// Compute the argv pointer and keep it in a callee-saved register.
sub(r6, ip, Operand(kPointerSize));
// Prepare the stack to be aligned when calling into C. After this point there
// are 5 pushes before the call into C, so the stack needs to be aligned after
// 5 pushes.
@ -558,6 +578,28 @@ void MacroAssembler::EnterExitFrame() {
// Setup argc and the builtin function in callee-saved registers.
mov(r4, Operand(r0));
mov(r5, Operand(r1));
// Optionally save all double registers.
if (save_doubles) {
// TODO(regis): Use vstrm instruction.
// The stack alignment code above made sp unaligned, so add space for one
// more double register and use aligned addresses.
ASSERT(kDoubleSize == frame_alignment);
// Mark the frame as containing doubles by pushing a non-valid return
// address, i.e. 0.
ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
mov(ip, Operand(0)); // Marker and alignment word.
push(ip);
int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
sub(sp, sp, Operand(space));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
vstr(reg, sp, i * kDoubleSize + kPointerSize);
}
// Note that d0 will be accessible at fp - 2*kPointerSize -
// DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
// alignment word were pushed after the fp.
}
}
@ -592,7 +634,18 @@ int MacroAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::LeaveExitFrame() {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all double registers.
if (save_doubles) {
// TODO(regis): Use vldrm instruction.
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
// Register d15 is just below the marker.
const int offset = ExitFrameConstants::kMarkerOffset;
vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
}
}
// Clear top frame.
mov(r3, Operand(0, RelocInfo::NONE));
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@ -756,7 +809,15 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
InvokeCode(r3, expected, actual, flag);
} else {
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
}
}
@ -1514,6 +1575,16 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
}
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function)));
CEntryStub stub(1);
stub.SaveDoubles();
CallStub(&stub);
}
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
mov(r0, Operand(num_arguments));

View File

@ -1,4 +1,4 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -224,6 +224,12 @@ class MacroAssembler: public Assembler {
}
}
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
static int SafepointRegisterStackIndex(int reg_code);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
Register dst2,
@ -236,11 +242,6 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
// ---------------------------------------------------------------------------
// Stack limit support
void StackLimitCheck(Label* on_stack_limit_hit);
// ---------------------------------------------------------------------------
// Activation frames
@ -254,10 +255,10 @@ class MacroAssembler: public Assembler {
// Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
void EnterExitFrame();
void EnterExitFrame(bool save_doubles);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame();
void LeaveExitFrame(bool save_doubles);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@ -575,6 +576,7 @@ class MacroAssembler: public Assembler {
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
@ -665,6 +667,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s);
}
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
@ -766,6 +776,17 @@ class CodePatcher {
#endif // ENABLE_DEBUGGER_SUPPORT
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class PostCallGenerator {
public:
PostCallGenerator() { }
virtual ~PostCallGenerator() { }
virtual void Generate() = 0;
};
// -----------------------------------------------------------------------------
// Static helper functions.

View File

@ -74,6 +74,7 @@ class Debugger {
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
double GetVFPDoubleRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value);
bool GetVFPDoubleValue(const char* desc, double* value);
@ -168,6 +169,11 @@ int32_t Debugger::GetRegisterValue(int regnum) {
}
double Debugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
}
bool Debugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kNoRegister) {
@ -309,6 +315,11 @@ void Debugger::Debug() {
value = GetRegisterValue(i);
PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
}
for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
dvalue = GetVFPDoubleRegisterValue(i);
PrintF("%3s: %f\n",
VFPRegisters::Name(i, true), dvalue);
}
} else {
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
@ -837,6 +848,11 @@ void Simulator::set_pc(int32_t value) {
}
bool Simulator::has_bad_pc() const {
return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
}
// Raw access to the PC register without the special adjustment when reading.
int32_t Simulator::get_pc() const {
return registers_[pc];
@ -1510,7 +1526,8 @@ void Simulator::HandleRList(Instr* instr, bool load) {
typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3);
int32_t arg3,
int32_t arg4);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
@ -1533,6 +1550,8 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
int32_t arg1 = get_register(r1);
int32_t arg2 = get_register(r2);
int32_t arg3 = get_register(r3);
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
int32_t arg4 = *stack_pointer;
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@ -1561,19 +1580,20 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p with args %08x, %08x, %08x, %08x",
"Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3);
arg3,
arg4);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
int64_t result = target(arg0, arg1, arg2, arg3);
int64_t result = target(arg0, arg1, arg2, arg3, arg4);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@ -1908,9 +1928,12 @@ void Simulator::DecodeType01(Instr* instr) {
set_register(lr, old_pc + Instr::kInstrSize);
break;
}
case BKPT:
v8::internal::OS::DebugBreak();
case BKPT: {
Debugger dbg(this);
PrintF("Simulator hit BKPT.\n");
dbg.Debug();
break;
}
default:
UNIMPLEMENTED();
}

View File

@ -186,6 +186,10 @@ class Simulator {
// ICache checking.
static void FlushICache(void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
bool has_bad_pc() const;
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute

View File

@ -874,6 +874,34 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
return cell;
}
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
Register scratch,
Label* miss) {
JSObject* current = object;
while (current != holder) {
if (current->IsGlobalObject()) {
// Returns a cell or a failure.
MaybeObject* result = GenerateCheckPropertyCell(
masm,
GlobalObject::cast(current),
name,
scratch,
miss);
if (result->IsFailure()) return result;
}
ASSERT(current->IsJSObject());
current = JSObject::cast(current->GetPrototype());
}
return NULL;
}
#undef __
#define __ ACCESS_MASM(masm())
@ -911,18 +939,19 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// checks are allowed in stubs.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
ASSERT(current->GetPrototype()->IsJSObject());
JSObject* prototype = JSObject::cast(current->GetPrototype());
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
MaybeObject* lookup_result = Heap::LookupSymbol(name);
if (lookup_result->IsFailure()) {
set_failure(Failure::cast(lookup_result));
MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
return reg;
} else {
name = String::cast(lookup_result->ToObjectUnchecked());
}
name = String::cast(lookup_result);
}
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
@ -936,7 +965,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
} else if (Heap::InNewSpace(prototype)) {
// Get the map of the current object.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
@ -956,14 +985,24 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
}
reg = holder_reg; // from now the object is in holder_reg
if (Heap::InNewSpace(prototype)) {
// The prototype is in new space; we cannot store a reference
// to it in the code. Load it from the map.
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ mov(reg, Operand(Handle<JSObject>(prototype)));
// The prototype is in new space; we cannot store a reference
// to it in the code. Load it from the map.
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
// Check the map of the current object.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ b(ne, miss);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
// The prototype is in old space; load it directly.
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, Operand(Handle<JSObject>(prototype)));
}
if (save_at_depth == depth) {
@ -982,32 +1021,22 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
// Log the check depth.
LOG(IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
ASSERT(current == holder);
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
if (current->IsJSGlobalProxy()) {
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
};
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
current = object;
while (current != holder) {
if (current->IsGlobalObject()) {
MaybeObject* cell = GenerateCheckPropertyCell(masm(),
GlobalObject::cast(current),
name,
scratch1,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
return reg;
}
}
current = JSObject::cast(current->GetPrototype());
}
MaybeObject* result = GenerateCheckPropertyCells(masm(),
object,
holder,
name,
scratch1,
miss);
if (result->IsFailure()) set_failure(Failure::cast(result));
// Return the register containing the holder.
return reg;
@ -1652,7 +1681,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
__ Drop(argc + 1);
__ Ret();
ICRuntimeCallHelper call_helper;
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
@ -1729,7 +1758,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
__ Drop(argc + 1);
__ Ret();
ICRuntimeCallHelper call_helper;
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
@ -1804,7 +1833,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
__ Drop(argc + 1);
__ Ret();
ICRuntimeCallHelper call_helper;
StubRuntimeCallHelper call_helper;
char_from_code_generator.GenerateSlow(masm(), call_helper);
// Tail call the full function. We do not have to patch the receiver
@ -2330,8 +2359,16 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
} else {
__ InvokeCode(code, expected, arguments(),
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
}
// Handle call cache miss.
__ bind(&miss);
@ -2864,13 +2901,62 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label miss;
// Check that the receiver isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the map matches.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r2, Operand(Handle<Map>(receiver->map())));
__ b(ne, &miss);
// Check that the key is a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(ne, &miss);
// Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
__ AssertFastElements(r2);
// Check that the key is within bounds.
__ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ cmp(r0, Operand(r3));
__ b(hs, &miss);
// Load the result and make sure it's not the hole.
__ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ ldr(r4,
MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r4, ip);
__ b(eq, &miss);
__ mov(r0, r4);
__ Ret();
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r1 : name
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
@ -2902,6 +2988,76 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
JSObject* receiver) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
Label miss;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register scratch = r3;
Register elements_reg = r4;
// Check that the receiver isn't a smi.
__ tst(receiver_reg, Operand(kSmiTagMask));
__ b(eq, &miss);
// Check that the map matches.
__ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(Handle<Map>(receiver->map())));
__ b(ne, &miss);
// Check that the key is a smi.
__ tst(key_reg, Operand(kSmiTagMask));
__ b(ne, &miss);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(Handle<Map>(Factory::fixed_array_map())));
__ b(ne, &miss);
// Check that the key is within bounds.
if (receiver->IsJSArray()) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis.
__ cmp(key_reg, scratch);
__ b(hs, &miss);
__ add(scratch,
elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ str(value_reg,
MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ RecordWrite(scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
receiver_reg , elements_reg);
// value_reg (r0) is preserved.
// Done.
__ Ret();
__ bind(&miss);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ----------- S t a t e -------------
// -- r0 : argc

View File

@ -35,10 +35,12 @@
#include "v8.h"
#include "arguments.h"
#include "deoptimizer.h"
#include "execution.h"
#include "ic-inl.h"
#include "factory.h"
#include "runtime.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "stub-cache.h"
#include "regexp-stack.h"
@ -62,6 +64,10 @@ namespace v8 {
namespace internal {
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
// -----------------------------------------------------------------------------
// Implementation of Label
@ -210,7 +216,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#endif
Counters::reloc_info_count.Increment();
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
@ -386,7 +392,7 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
// relocation info is read backwards
// Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
done_ = false;
@ -399,7 +405,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
// relocation info is read backwards
// Relocation info is read backwards.
pos_ = desc.buffer + desc.buffer_size;
end_ = pos_ - desc.reloc_size;
done_ = false;
@ -431,6 +437,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break";
case RelocInfo::CODE_TARGET:
return "code target";
case RelocInfo::GLOBAL_PROPERTY_CELL:
return "global property cell";
case RelocInfo::RUNTIME_ENTRY:
return "runtime entry";
case RelocInfo::JS_RETURN:
@ -476,6 +484,13 @@ void RelocInfo::Print() {
PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address());
} else if (IsPosition(rmode_)) {
PrintF(" (%" V8_PTR_PREFIX "d)", data());
} else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
PrintF(" (deoptimization bailout %d)", id);
}
}
PrintF("\n");
@ -489,6 +504,9 @@ void RelocInfo::Verify() {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
break;
case GLOBAL_PROPERTY_CELL:
Object::VerifyPointer(target_cell());
break;
case DEBUG_BREAK:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@ -595,6 +613,23 @@ ExternalReference ExternalReference::transcendental_cache_array_address() {
}
ExternalReference ExternalReference::new_deoptimizer_function() {
return ExternalReference(
Redirect(FUNCTION_ADDR(Deoptimizer::New)));
}
ExternalReference ExternalReference::compute_output_frames_function() {
return ExternalReference(
Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
}
ExternalReference ExternalReference::global_contexts_list() {
return ExternalReference(Heap::global_contexts_list_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(KeyedLookupCache::keys_address());
}
@ -675,6 +710,18 @@ ExternalReference ExternalReference::scheduled_exception_address() {
}
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::min_int)));
}
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::one_half)));
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {

30
src/assembler.cc.rej Normal file
View File

@ -0,0 +1,30 @@
--- src/assembler.cc (revision 757)
+++ src/assembler.cc (working copy)
@@ -392,12 +392,16 @@
RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
- // relocation info is read backwards
+ // Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
- if (mode_mask_ == 0) pos_ = end_;
+ // Skip all relocation information if the mask is zero or if the
+ // code has been deoptimized and thereby destructively patched.
+ if (mode_mask_ == 0 || code->kind() == Code::DEOPTIMIZED_FUNCTION) {
+ pos_ = end_;
+ }
next();
}
@@ -405,7 +409,7 @@
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
- // relocation info is read backwards
+ // Relocation info is read backwards.
pos_ = desc.buffer + desc.buffer_size;
end_ = pos_ - desc.reloc_size;
done_ = false;

View File

@ -38,12 +38,21 @@
#include "runtime.h"
#include "top.h"
#include "token.h"
#include "objects.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Common double constants.
class DoubleConstant: public AllStatic {
public:
static const double min_int;
static const double one_half;
};
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
@ -174,6 +183,8 @@ class RelocInfo BASE_EMBEDDED {
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,
GLOBAL_PROPERTY_CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
JS_RETURN, // Marks start of the ExitJSFrame code.
@ -254,6 +265,10 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target));
INLINE(JSGlobalPropertyCell* target_cell());
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@ -484,6 +499,11 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference transcendental_cache_array_address();
static ExternalReference delete_handle_scope_extensions();
// Deoptimization support.
static ExternalReference new_deoptimizer_function();
static ExternalReference compute_output_frames_function();
static ExternalReference global_contexts_list();
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys();
static ExternalReference keyed_lookup_cache_field_offsets();
@ -526,6 +546,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference scheduled_exception_address();
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT

View File

@ -25,18 +25,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_AST_INL_H_
#define V8_AST_INL_H_
#include "v8.h"
#include "ast.h"
#include "jump-target-inl.h"
namespace v8 {
namespace internal {
BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
: labels_(labels), type_(type) {
ASSERT(labels == NULL || labels->length() > 0);
}
SwitchStatement::SwitchStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
@ -44,13 +43,6 @@ SwitchStatement::SwitchStatement(ZoneStringList* labels)
}
IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
body_(NULL),
continue_target_(JumpTarget::BIDIRECTIONAL) {
}
Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
: BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
statements_(capacity),
@ -58,13 +50,46 @@ Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
}
BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
: labels_(labels),
type_(type),
entry_id_(GetNextId()),
exit_id_(GetNextId()) {
ASSERT(labels == NULL || labels->length() > 0);
}
IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
body_(NULL),
continue_target_(JumpTarget::BIDIRECTIONAL),
osr_entry_id_(GetNextId()) {
}
DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
condition_position_(-1),
next_id_(GetNextId()) {
}
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
may_have_function_literal_(true) {
}
ForStatement::ForStatement(ZoneStringList* labels)
: IterationStatement(labels),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL) {
loop_variable_(NULL),
next_id_(GetNextId()) {
}
@ -73,8 +98,6 @@ ForInStatement::ForInStatement(ZoneStringList* labels)
}
DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
: IterationStatement(labels), cond_(NULL), condition_position_(-1) {
}
} } // namespace v8::internal
#endif // V8_AST_INL_H_

View File

@ -28,16 +28,16 @@
#include "v8.h"
#include "ast.h"
#include "jump-target-inl.h"
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
#include "ast-inl.h"
#include "jump-target-inl.h"
namespace v8 {
namespace internal {
unsigned AstNode::current_id_ = 0;
unsigned AstNode::count_ = 0;
VariableProxySentinel VariableProxySentinel::this_proxy_(true);
VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
@ -48,6 +48,8 @@ Call Call::sentinel_(NULL, NULL, 0);
// ----------------------------------------------------------------------------
// All the Accept member functions for each syntax tree node type.
void Slot::Accept(AstVisitor* v) { v->VisitSlot(this); }
#define DECL_ACCEPT(type) \
void type::Accept(AstVisitor* v) { v->Visit##type(this); }
AST_NODE_LIST(DECL_ACCEPT)
@ -115,6 +117,29 @@ void VariableProxy::BindTo(Variable* var) {
}
Assignment::Assignment(Token::Value op,
Expression* target,
Expression* value,
int pos)
: op_(op),
target_(target),
value_(value),
pos_(pos),
compound_bailout_id_(kNoNumber),
block_start_(false),
block_end_(false),
is_monomorphic_(false),
receiver_types_(NULL) {
ASSERT(Token::IsAssignmentOp(op));
binary_operation_ = is_compound()
? new BinaryOperation(binary_op(), target, value, pos + 1)
: NULL;
if (is_compound()) {
compound_bailout_id_ = GetNextId();
}
}
Token::Value Assignment::binary_op() const {
switch (op_) {
case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
@ -139,6 +164,12 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
bool FunctionLiteral::AllowOptimize() {
// We can't deal with heap-allocated locals.
return scope()->num_heap_slots() == 0;
}
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
@ -372,6 +403,265 @@ BinaryOperation::BinaryOperation(Assignment* assignment) {
}
// ----------------------------------------------------------------------------
// Inlining support
bool Block::IsInlineable() const {
const int count = statements_.length();
for (int i = 0; i < count; ++i) {
if (!statements_[i]->IsInlineable()) return false;
}
return true;
}
bool ExpressionStatement::IsInlineable() const {
return expression()->IsInlineable();
}
bool IfStatement::IsInlineable() const {
return condition()->IsInlineable() && then_statement()->IsInlineable() &&
else_statement()->IsInlineable();
}
bool ReturnStatement::IsInlineable() const {
return expression()->IsInlineable();
}
bool Conditional::IsInlineable() const {
return condition()->IsInlineable() && then_expression()->IsInlineable() &&
else_expression()->IsInlineable();
}
bool VariableProxy::IsInlineable() const {
return var()->is_global() || var()->IsStackAllocated();
}
bool Assignment::IsInlineable() const {
return target()->IsInlineable() && value()->IsInlineable();
}
bool Property::IsInlineable() const {
return obj()->IsInlineable() && key()->IsInlineable();
}
bool Call::IsInlineable() const {
if (!expression()->IsInlineable()) return false;
const int count = arguments()->length();
for (int i = 0; i < count; ++i) {
if (!arguments()->at(i)->IsInlineable()) return false;
}
return true;
}
bool CallNew::IsInlineable() const {
if (!expression()->IsInlineable()) return false;
const int count = arguments()->length();
for (int i = 0; i < count; ++i) {
if (!arguments()->at(i)->IsInlineable()) return false;
}
return true;
}
bool CallRuntime::IsInlineable() const {
const int count = arguments()->length();
for (int i = 0; i < count; ++i) {
if (!arguments()->at(i)->IsInlineable()) return false;
}
return true;
}
bool UnaryOperation::IsInlineable() const {
return expression()->IsInlineable();
}
bool BinaryOperation::IsInlineable() const {
return left()->IsInlineable() && right()->IsInlineable();
}
bool CompareOperation::IsInlineable() const {
return left()->IsInlineable() && right()->IsInlineable();
}
bool CompareToNull::IsInlineable() const {
return expression()->IsInlineable();
}
bool CountOperation::IsInlineable() const {
return expression()->IsInlineable();
}
// ----------------------------------------------------------------------------
// Recording of type feedback
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST.
is_monomorphic_ = oracle->LoadIsMonomorphic(this);
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
is_array_length_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString());
Handle<String> name = Handle<String>::cast(lit_key->handle());
ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
receiver_types_ = types;
}
} else if (is_monomorphic_) {
monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
}
}
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphic(this);
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString());
Handle<String> name = Handle<String>::cast(lit_key->handle());
ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
receiver_types_ = types;
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed loads.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
}
}
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);
}
}
static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
if (target->NeedsArgumentsAdaption()) {
// If the number of formal parameters of the target function
// does not match the number of arguments we're passing, we
// don't want to deal with it.
return target->shared()->formal_parameter_count() == arity;
} else {
// If the target doesn't need arguments adaption, we can call
// it directly, but we avoid to do so if it has a custom call
// generator, because that is likely to generate better code.
return !target->shared()->HasCustomCallGenerator();
}
}
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
holder_ = Handle<JSObject>::null();
while (true) {
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
// If the function wasn't found directly in the map, we start
// looking upwards through the prototype chain.
if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
type = Handle<Map>(holder()->map());
} else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
return CallWithoutIC(target_, arguments()->length());
} else {
return false;
}
}
}
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
Handle<String> name) {
target_ = Handle<JSFunction>::null();
cell_ = Handle<JSGlobalPropertyCell>::null();
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() && lookup.type() == NORMAL) {
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!Heap::InNewSpace(*candidate)
&& CallWithoutIC(candidate, arguments()->length())) {
target_ = candidate;
return true;
}
}
}
return false;
}
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
// Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle());
receiver_types_ = oracle->CallReceiverTypes(this, name);
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
if (receiver_types_ != NULL) {
int length = receiver_types_->length();
for (int i = 0; i < length; i++) {
Handle<Map> map = receiver_types_->at(i);
ASSERT(!map.is_null() && *map != NULL);
}
}
}
#endif
if (receiver_types_ != NULL && receiver_types_->length() > 0) {
Handle<Map> type = receiver_types_->at(0);
is_monomorphic_ = oracle->CallIsMonomorphic(this);
if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name);
}
}
void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
is_smi_only_ = left.IsSmi() && right.IsSmi();
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT);
TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT);
if (left.IsSmi() && right.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (left.IsNonPrimitive() && right.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);
}
}
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@ -742,15 +1032,12 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
may_have_function_literal_(true) {
}
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) {
}
CaseClause::CaseClause(Expression* label,
ZoneList<Statement*>* statements,
int pos)
: label_(label),
statements_(statements),
position_(pos),
compare_type_(NONE) {}
} } // namespace v8::internal

294
src/ast.h
View File

@ -75,7 +75,6 @@ namespace internal {
V(FunctionLiteral) \
V(SharedFunctionInfoLiteral) \
V(Conditional) \
V(Slot) \
V(VariableProxy) \
V(Literal) \
V(RegExpLiteral) \
@ -102,10 +101,11 @@ namespace internal {
EXPRESSION_NODE_LIST(V)
// Forward declarations
class TargetCollector;
class MaterializedLiteral;
class DefinitionInfo;
class BitVector;
class DefinitionInfo;
class MaterializedLiteral;
class TargetCollector;
class TypeFeedbackOracle;
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@ -133,6 +133,10 @@ class AstNode: public ZoneObject {
};
#undef DECLARE_TYPE_ENUM
static const int kNoNumber = -1;
AstNode() : id_(GetNextId()) { count_++; }
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@ -150,6 +154,27 @@ class AstNode: public ZoneObject {
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual Slot* AsSlot() { return NULL; }
// True if the node is simple enough for us to inline calls containing it.
virtual bool IsInlineable() const { return false; }
static int Count() { return count_; }
static void ResetIds() { current_id_ = 0; }
unsigned id() const { return id_; }
protected:
static unsigned GetNextId() { return current_id_++; }
static unsigned ReserveIdRange(int n) {
unsigned tmp = current_id_;
current_id_ += n;
return tmp;
}
private:
static unsigned current_id_;
static unsigned count_;
unsigned id_;
};
@ -174,6 +199,18 @@ class Statement: public AstNode {
class Expression: public AstNode {
public:
enum Context {
// Not assigned a context yet, or else will not be visited during
// code generation.
kUninitialized,
// Evaluated for its side effects.
kEffect,
// Evaluated for its value (and side effects).
kValue,
// Evaluated for control flow (and side effects).
kTest
};
Expression() : bitfields_(0) {}
virtual Expression* AsExpression() { return this; }
@ -181,6 +218,10 @@ class Expression: public AstNode {
virtual bool IsTrivial() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
virtual bool ToBooleanIsTrue() { return false; }
virtual bool ToBooleanIsFalse() { return false; }
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
@ -198,6 +239,24 @@ class Expression: public AstNode {
// True iff the expression is a literal represented as a smi.
virtual bool IsSmiLiteral() { return false; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
return false;
}
virtual bool IsArrayLength() {
UNREACHABLE();
return false;
}
virtual ZoneMapList* GetReceiverTypes() {
UNREACHABLE();
return NULL;
}
virtual Handle<Map> GetMonomorphicReceiverType() {
UNREACHABLE();
return Handle<Map>();
}
// Static type information for this expression.
StaticType* type() { return &type_; }
@ -301,6 +360,10 @@ class BreakableStatement: public Statement {
// Testers.
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
// Bailout support.
int EntryId() const { return entry_id_; }
int ExitId() const { return exit_id_; }
protected:
inline BreakableStatement(ZoneStringList* labels, Type type);
@ -308,6 +371,8 @@ class BreakableStatement: public Statement {
ZoneStringList* labels_;
Type type_;
BreakTarget break_target_;
int entry_id_;
int exit_id_;
};
@ -327,6 +392,8 @@ class Block: public BreakableStatement {
return statements_[0]->StatementAsCountOperation();
}
virtual bool IsInlineable() const;
void AddStatement(Statement* statement) { statements_.Add(statement); }
ZoneList<Statement*>* statements() { return &statements_; }
@ -370,6 +437,10 @@ class IterationStatement: public BreakableStatement {
Statement* body() const { return body_; }
void set_body(Statement* stmt) { body_ = stmt; }
// Bailout support.
int OsrEntryId() const { return osr_entry_id_; }
virtual int ContinueId() const = 0;
// Code generation
BreakTarget* continue_target() { return &continue_target_; }
@ -383,6 +454,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
BreakTarget continue_target_;
int osr_entry_id_;
};
@ -404,15 +476,19 @@ class DoWhileStatement: public IterationStatement {
int condition_position() { return condition_position_; }
void set_condition_position(int pos) { condition_position_ = pos; }
// Bailout support.
virtual int ContinueId() const { return next_id_; }
private:
Expression* cond_;
int condition_position_;
int next_id_;
};
class WhileStatement: public IterationStatement {
public:
explicit WhileStatement(ZoneStringList* labels);
explicit inline WhileStatement(ZoneStringList* labels);
DECLARE_NODE_TYPE(WhileStatement)
@ -429,6 +505,9 @@ class WhileStatement: public IterationStatement {
may_have_function_literal_ = value;
}
// Bailout support.
virtual int ContinueId() const { return EntryId(); }
private:
Expression* cond_;
// True if there is a function literal subexpression in the condition.
@ -466,6 +545,9 @@ class ForStatement: public IterationStatement {
may_have_function_literal_ = value;
}
// Bailout support.
virtual int ContinueId() const { return next_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
@ -477,6 +559,7 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
int next_id_;
};
@ -495,6 +578,9 @@ class ForInStatement: public IterationStatement {
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
// Bailout support.
virtual int ContinueId() const { return EntryId(); }
private:
Expression* each_;
Expression* enumerable_;
@ -508,11 +594,13 @@ class ExpressionStatement: public Statement {
DECLARE_NODE_TYPE(ExpressionStatement)
virtual bool IsInlineable() const;
virtual Assignment* StatementAsSimpleAssignment();
virtual CountOperation* StatementAsCountOperation();
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() { return expression_; }
Expression* expression() const { return expression_; }
private:
Expression* expression_;
@ -554,7 +642,8 @@ class ReturnStatement: public Statement {
DECLARE_NODE_TYPE(ReturnStatement)
Expression* expression() { return expression_; }
Expression* expression() const { return expression_; }
virtual bool IsInlineable() const;
private:
Expression* expression_;
@ -588,7 +677,7 @@ class WithExitStatement: public Statement {
class CaseClause: public ZoneObject {
public:
CaseClause(Expression* label, ZoneList<Statement*>* statements);
CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@ -598,10 +687,21 @@ class CaseClause: public ZoneObject {
JumpTarget* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
int position() { return position_; }
void set_position(int pos) { position_ = pos; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
private:
Expression* label_;
JumpTarget body_target_;
ZoneList<Statement*>* statements_;
int position_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
};
@ -641,6 +741,8 @@ class IfStatement: public Statement {
DECLARE_NODE_TYPE(IfStatement)
virtual bool IsInlineable() const;
bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
@ -744,6 +846,8 @@ class DebuggerStatement: public Statement {
class EmptyStatement: public Statement {
public:
DECLARE_NODE_TYPE(EmptyStatement)
virtual bool IsInlineable() const { return true; }
};
@ -754,6 +858,7 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
virtual bool IsTrivial() { return true; }
virtual bool IsInlineable() const { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
// Check if this literal is identical to the other literal.
@ -769,6 +874,14 @@ class Literal: public Expression {
return false;
}
Handle<String> AsPropertyName() {
ASSERT(IsPropertyName());
return Handle<String>::cast(handle_);
}
virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@ -906,16 +1019,21 @@ class ArrayLiteral: public MaterializedLiteral {
int depth)
: MaterializedLiteral(literal_index, is_simple, depth),
constant_elements_(constant_elements),
values_(values) {}
values_(values),
first_element_id_(ReserveIdRange(values->length())) {}
DECLARE_NODE_TYPE(ArrayLiteral)
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
// Return an AST id for an element that is used in simulate instructions.
int GetIdForElement(int i) { return first_element_id_ + i; }
private:
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
int first_element_id_;
};
@ -967,6 +1085,8 @@ class VariableProxy: public Expression {
return is_this_ || is_trivial_;
}
virtual bool IsInlineable() const;
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
@ -1044,7 +1164,9 @@ class Slot: public Expression {
ASSERT(var != NULL);
}
DECLARE_NODE_TYPE(Slot)
virtual void Accept(AstVisitor* v);
virtual Slot* AsSlot() { return this; }
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
@ -1069,17 +1191,41 @@ class Property: public Expression {
// of the resolved Reference.
enum Type { NORMAL, SYNTHETIC };
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { }
: obj_(obj),
key_(key),
pos_(pos),
type_(type),
is_monomorphic_(false),
receiver_types_(NULL),
is_array_length_(false),
is_arguments_access_(false) { }
DECLARE_NODE_TYPE(Property)
virtual bool IsValidLeftHandSide() { return true; }
virtual bool IsInlineable() const;
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
int position() const { return pos_; }
bool is_synthetic() const { return type_ == SYNTHETIC; }
// Marks that this is actually an argument rewritten to a keyed property
// accessing the argument through the arguments shadow object.
void set_is_arguments_access(bool is_arguments_access) {
is_arguments_access_ = is_arguments_access;
}
bool is_arguments_access() const { return is_arguments_access_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
virtual bool IsArrayLength() { return is_array_length_; }
virtual Handle<Map> GetMonomorphicReceiverType() {
return monomorphic_receiver_type_;
}
// Returns a property singleton property access on 'this'. Used
// during preparsing.
static Property* this_property() { return &this_property_; }
@ -1090,6 +1236,12 @@ class Property: public Expression {
int pos_;
Type type_;
bool is_monomorphic_;
ZoneMapList* receiver_types_;
bool is_array_length_;
bool is_arguments_access_;
Handle<Map> monomorphic_receiver_type_;
// Dummy property used during preparsing.
static Property this_property_;
};
@ -1098,21 +1250,55 @@ class Property: public Expression {
class Call: public Expression {
public:
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
: expression_(expression),
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
receiver_types_(NULL),
return_id_(GetNextId()) {
}
DECLARE_NODE_TYPE(Call)
virtual bool IsInlineable() const;
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() { return target_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSGlobalPropertyCell> cell() { return cell_; }
bool ComputeTarget(Handle<Map> type, Handle<String> name);
bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
// Bailout support.
int ReturnId() const { return return_id_; }
static Call* sentinel() { return &sentinel_; }
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
#endif
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
int pos_;
bool is_monomorphic_;
ZoneMapList* receiver_types_;
Handle<JSFunction> target_;
Handle<JSObject> holder_;
Handle<JSGlobalPropertyCell> cell_;
int return_id_;
static Call sentinel_;
};
@ -1124,6 +1310,8 @@ class CallNew: public Expression {
DECLARE_NODE_TYPE(CallNew)
virtual bool IsInlineable() const;
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@ -1148,6 +1336,8 @@ class CallRuntime: public Expression {
DECLARE_NODE_TYPE(CallRuntime)
virtual bool IsInlineable() const;
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1169,6 +1359,8 @@ class UnaryOperation: public Expression {
DECLARE_NODE_TYPE(UnaryOperation)
virtual bool IsInlineable() const;
virtual bool ResultOverwriteAllowed();
Token::Value op() const { return op_; }
@ -1186,7 +1378,7 @@ class BinaryOperation: public Expression {
Expression* left,
Expression* right,
int pos)
: op_(op), left_(left), right_(right), pos_(pos) {
: op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
ASSERT(Token::IsBinaryOp(op));
}
@ -1195,6 +1387,8 @@ class BinaryOperation: public Expression {
DECLARE_NODE_TYPE(BinaryOperation)
virtual bool IsInlineable() const;
virtual bool ResultOverwriteAllowed();
Token::Value op() const { return op_; }
@ -1202,11 +1396,16 @@ class BinaryOperation: public Expression {
Expression* right() const { return right_; }
int position() const { return pos_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiOnly() const { return is_smi_only_; }
private:
Token::Value op_;
Expression* left_;
Expression* right_;
int pos_;
bool is_smi_only_;
};
@ -1251,6 +1450,8 @@ class CountOperation: public Expression {
virtual void MarkAsStatement() { is_prefix_ = true; }
virtual bool IsInlineable() const;
private:
bool is_prefix_;
IncrementOperation* increment_;
@ -1264,7 +1465,7 @@ class CompareOperation: public Expression {
Expression* left,
Expression* right,
int pos)
: op_(op), left_(left), right_(right), pos_(pos) {
: op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) {
ASSERT(Token::IsCompareOp(op));
}
@ -1275,11 +1476,21 @@ class CompareOperation: public Expression {
Expression* right() const { return right_; }
int position() const { return pos_; }
virtual bool IsInlineable() const;
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
private:
Token::Value op_;
Expression* left_;
Expression* right_;
int pos_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
};
@ -1290,6 +1501,8 @@ class CompareToNull: public Expression {
DECLARE_NODE_TYPE(CompareToNull)
virtual bool IsInlineable() const;
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
Expression* expression() const { return expression_; }
@ -1315,6 +1528,8 @@ class Conditional: public Expression {
DECLARE_NODE_TYPE(Conditional)
virtual bool IsInlineable() const;
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
@ -1333,14 +1548,12 @@ class Conditional: public Expression {
class Assignment: public Expression {
public:
Assignment(Token::Value op, Expression* target, Expression* value, int pos)
: op_(op), target_(target), value_(value), pos_(pos),
block_start_(false), block_end_(false) {
ASSERT(Token::IsAssignmentOp(op));
}
Assignment(Token::Value op, Expression* target, Expression* value, int pos);
DECLARE_NODE_TYPE(Assignment)
virtual bool IsInlineable() const;
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
Token::Value binary_op() const;
@ -1349,6 +1562,8 @@ class Assignment: public Expression {
Expression* target() const { return target_; }
Expression* value() const { return value_; }
int position() { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
bool is_compound() const { return op() > Token::ASSIGN; }
@ -1361,13 +1576,31 @@ class Assignment: public Expression {
void mark_block_start() { block_start_ = true; }
void mark_block_end() { block_end_ = true; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
virtual Handle<Map> GetMonomorphicReceiverType() {
return monomorphic_receiver_type_;
}
// Bailout support.
int compound_bailout_id() const { return compound_bailout_id_; }
private:
Token::Value op_;
Expression* target_;
Expression* value_;
int pos_;
BinaryOperation* binary_operation_;
int compound_bailout_id_;
bool block_start_;
bool block_end_;
bool is_monomorphic_;
ZoneMapList* receiver_types_;
Handle<Map> monomorphic_receiver_type_;
};
@ -1417,11 +1650,7 @@ class FunctionLiteral: public Expression {
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false),
pretenure_(false) {
#ifdef DEBUG
already_compiled_ = false;
#endif
}
pretenure_(false) { }
DECLARE_NODE_TYPE(FunctionLiteral)
@ -1446,6 +1675,7 @@ class FunctionLiteral: public Expression {
int num_parameters() { return num_parameters_; }
bool AllowsLazyCompilation();
bool AllowOptimize();
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
@ -1463,13 +1693,6 @@ class FunctionLiteral: public Expression {
bool pretenure() { return pretenure_; }
void set_pretenure(bool value) { pretenure_ = value; }
#ifdef DEBUG
void mark_as_compiled() {
ASSERT(!already_compiled_);
already_compiled_ = true;
}
#endif
private:
Handle<String> name_;
Scope* scope_;
@ -1487,9 +1710,6 @@ class FunctionLiteral: public Expression {
Handle<String> inferred_name_;
bool try_full_codegen_;
bool pretenure_;
#ifdef DEBUG
bool already_compiled_;
#endif
};
@ -1894,8 +2114,12 @@ class AstVisitor BASE_EMBEDDED {
// node, calling SetStackOverflow will make sure that the visitor
// bails out without visiting more nodes.
void SetStackOverflow() { stack_overflow_ = true; }
void ClearStackOverflow() { stack_overflow_ = false; }
// Individual nodes
// Nodes not appearing in the AST, including slots.
virtual void VisitSlot(Slot* node) { UNREACHABLE(); }
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)

165
src/atomicops.h Normal file
View File

@ -0,0 +1,165 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The routines exported by this module are subtle. If you use them, even if
// you get the code right, it will depend on careful reasoning about atomicity
// and memory ordering; it will be less readable, and harder to maintain. If
// you plan to use these routines, you should have a good reason, such as solid
// evidence that performance would otherwise suffer, or there being no
// alternative. You should assume only properties explicitly guaranteed by the
// specifications in this file. You are almost certainly _not_ writing code
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
// implementations on other archtectures will cause your code to break. If you
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
// NoBarrier_Store()
// NoBarrier_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these.
//
#ifndef V8_ATOMICOPS_H_
#define V8_ATOMICOPS_H_
#include "../include/v8.h"
#include "globals.h"
namespace v8 {
namespace internal {
typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__APPLE__)
// MacOS is an exception to the implicit conversion rule above,
// because it uses long for intptr_t.
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
#endif
#endif
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
typedef intptr_t AtomicWord;
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment);
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void MemoryBarrier();
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
} } // namespace v8::internal
// Include our platform specific implementation.
#if defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
#endif // V8_ATOMICOPS_H_

View File

@ -0,0 +1,145 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
//
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
namespace v8 {
namespace internal {
// 0xffff0fc0 is the hard coded address of a function provided by
// the kernel which implements an atomic compare-exchange. On older
// ARM architecture revisions (pre-v6) this may be implemented using
// a syscall. This address is stable, and in active use (hard coded)
// by at least glibc-2.7 and the Android C library.
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr);
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
(LinuxKernelCmpxchgFunc) 0xffff0fc0;
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value = *ptr;
do {
if (!pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (pLinuxKernelCmpxchg(old_value, new_value,
const_cast<Atomic32*>(ptr)) == 0) {
// The exchange took place as expected.
return new_value;
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
pLinuxKernelMemoryBarrier();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_

View File

@ -0,0 +1,126 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This module gets enough CPU information to optimize the
// atomicops module on x86.
#include <string.h>
#include "atomicops.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#elif defined(__x86_64__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // bug can't exist before process spawns multiple threads
false, // no SSE2
};
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
static void AtomicOps_Internalx86CPUFeaturesInit() {
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
cpuid(eax, ebx, ecx, edx, 1);
int family = (eax >> 8) & 0xf; // family and model fields
int model = (eax >> 4) & 0xf;
if (family == 0xf) { // use extended family and model fields
family += (eax >> 20) & 0xff;
model += ((eax >> 16) & 0xf) << 4;
}
// Opteron Rev E has a bug in which on very rare occasions a locked
// instruction doesn't act as a read-acquire barrier if followed by a
// non-locked read-modify-write instruction. Rev F has this bug in
// pre-release versions, but not in versions released to customers,
// so we test only for Rev E, which is family 15, model 32..63 inclusive.
if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
family == 15 &&
32 <= model && model <= 63) {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
} else {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
}
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
}
namespace {
class AtomicOpsx86Initializer {
public:
AtomicOpsx86Initializer() {
AtomicOps_Internalx86CPUFeaturesInit();
}
};
// A global to get use initialized on startup via static initialization :/
AtomicOpsx86Initializer g_initer;
} // namespace
#endif // if x86
#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_

View File

@ -0,0 +1,287 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
bool has_sse2; // Processor has SSE2.
};
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now holds the old value of *ptr
return temp + increment;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now holds the old value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
#if defined(__x86_64__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
#else
inline void MemoryBarrier() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier on PIII
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(__x86_64__)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now contains the previous value of *ptr
return temp + increment;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now contains the previous value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(__x86_64__)
} } // namespace v8::internal
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_

View File

@ -0,0 +1,301 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
#include <libkern/OSAtomic.h>
namespace v8 {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
inline void MemoryBarrier() {
OSMemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
MemoryBarrier();
return *ptr;
}
#ifdef __LP64__
// 64-bit implementation on 64-bit platform
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
const_cast<Atomic64*>(ptr)));
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
const_cast<Atomic64*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
// Acquire and Release memory barriers; they are equivalent.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
MemoryBarrier();
return *ptr;
}
#endif // defined(__LP64__)
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
// on the Mac, even when they are the same size. We need to explicitly cast
// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
#ifdef __LP64__
#define AtomicWordCastType Atomic64
#else
#define AtomicWordCastType Atomic32
#endif
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
}
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return NoBarrier_AtomicIncrement(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
}
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return Barrier_AtomicIncrement(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
}
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Acquire_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Release_CompareAndSwap(
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Acquire_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Release_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return v8::internal::Acquire_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return v8::internal::Release_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
#undef AtomicWordCastType
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_

View File

@ -0,0 +1,202 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
#include "win32-headers.h"
namespace v8 {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = InterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return InterlockedExchangeAdd(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) + increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = InterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return InterlockedExchangeAdd64(
reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) + increment;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(_WIN64)
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_

View File

@ -500,6 +500,24 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
}
static void AddToWeakGlobalContextList(Context* context) {
ASSERT(context->IsGlobalContext());
#ifdef DEBUG
{ // NOLINT
ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
// Check that context is not in the list yet.
for (Object* current = Heap::global_contexts_list();
!current->IsUndefined();
current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
ASSERT(current != context);
}
}
#endif
context->set(Context::NEXT_CONTEXT_LINK, Heap::global_contexts_list());
Heap::set_global_contexts_list(context);
}
void Genesis::CreateRoots() {
// Allocate the global context FixedArray first and then patch the
// closure and extension object later (we need the empty function
@ -508,6 +526,7 @@ void Genesis::CreateRoots() {
global_context_ =
Handle<Context>::cast(
GlobalHandles::Create(*Factory::NewGlobalContext()));
AddToWeakGlobalContextList(*global_context_);
Top::set_context(*global_context());
// Allocate the message listeners object.
@ -1596,7 +1615,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
= Handle<SharedFunctionInfo>(function->shared());
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// Set the code object on the function object.
function->set_code(function->shared()->code());
function->ReplaceCode(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code());
}
return true;
@ -1784,6 +1803,7 @@ Genesis::Genesis(Handle<Object> global_object,
if (!new_context.is_null()) {
global_context_ =
Handle<Context>::cast(GlobalHandles::Create(*new_context));
AddToWeakGlobalContextList(*global_context_);
Top::set_context(*global_context_);
i::Counters::contexts_created_by_snapshot.Increment();
result_ = global_context_;
@ -1819,11 +1839,6 @@ Genesis::Genesis(Handle<Object> global_object,
i::Counters::contexts_created_from_scratch.Increment();
}
// Add this context to the weak list of global contexts.
(*global_context_)->set(Context::NEXT_CONTEXT_LINK,
Heap::global_contexts_list());
Heap::set_global_contexts_list(*global_context_);
result_ = global_context_;
}

View File

@ -32,6 +32,7 @@
#include "bootstrapper.h"
#include "builtins.h"
#include "ic-inl.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
@ -1031,9 +1032,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
{
// Leaving JavaScript.
VMState state(EXTERNAL);
#ifdef ENABLE_LOGGING_AND_PROFILING
state.set_external_callback(v8::ToCData<Address>(callback_obj));
#endif
ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
value = callback(new_args);
}
if (value.IsEmpty()) {
@ -1103,9 +1102,7 @@ BUILTIN(FastHandleApiCall) {
{
// Leaving JavaScript.
VMState state(EXTERNAL);
#ifdef ENABLE_LOGGING_AND_PROFILING
state.set_external_callback(v8::ToCData<Address>(callback_obj));
#endif
ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
@ -1169,9 +1166,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
{
// Leaving JavaScript.
VMState state(EXTERNAL);
#ifdef ENABLE_LOGGING_AND_PROFILING
state.set_external_callback(v8::ToCData<Address>(callback_obj));
#endif
ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
value = callback(new_args);
}
if (value.IsEmpty()) {
@ -1332,6 +1327,11 @@ static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
}
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm);
}
@ -1581,4 +1581,5 @@ const char* Builtins::Lookup(byte* pc) {
return NULL;
}
} } // namespace v8::internal

View File

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -71,6 +71,10 @@ enum BuiltinExtraArguments {
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(LazyCompile, BUILTIN, UNINITIALIZED) \
V(LazyRecompile, BUILTIN, UNINITIALIZED) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED) \
V(NotifyOSR, BUILTIN, UNINITIALIZED) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
@ -102,6 +106,7 @@ enum BuiltinExtraArguments {
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
@ -120,7 +125,9 @@ enum BuiltinExtraArguments {
V(ArrayCode, BUILTIN, UNINITIALIZED) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED)
V(StringConstructCode, BUILTIN, UNINITIALIZED) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -256,6 +263,10 @@ class Builtins : public AllStatic {
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_LazyCompile(MacroAssembler* masm);
static void Generate_LazyRecompile(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
@ -265,6 +276,8 @@ class Builtins : public AllStatic {
static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
};
} } // namespace v8::internal

View File

@ -281,7 +281,7 @@ bool EnableSlowAsserts();
// safely enabled in release mode. Moreover, the ((void) 0) expression
// obeys different syntax rules than typedef's, e.g. it can't appear
// inside class declaration, this leads to inconsistency between debug
// and release compilation modes behaviour.
// and release compilation modes behavior.
#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)

View File

@ -103,6 +103,7 @@ Handle<Code> CodeStub::GetCode() {
GetICState());
Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
@ -142,6 +143,7 @@ MaybeObject* CodeStub::TryGetCode() {
}
code = Code::cast(new_object);
RecordCodeGeneration(code, &masm);
FinishCode(code);
// Try to update the code cache but do not fail if unable.
MaybeObject* maybe_new_object =
@ -170,4 +172,29 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
void ICCompareStub::Generate(MacroAssembler* masm) {
switch (state_) {
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
case CompareIC::SMIS:
GenerateSmis(masm);
break;
case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm);
break;
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
default:
UNREACHABLE();
}
}
} } // namespace v8::internal

View File

@ -29,7 +29,6 @@
#define V8_CODE_STUBS_H_
#include "globals.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
@ -39,11 +38,15 @@ namespace internal {
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(GenericBinaryOp) \
V(TypeRecordingBinaryOp) \
V(StringAdd) \
V(StringCharAt) \
V(SubString) \
V(StringCompare) \
V(SmiOp) \
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
@ -60,6 +63,7 @@ namespace internal {
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
V(RegExpConstructResult) \
V(NumberToString) \
V(CEntry) \
V(JSEntry) \
@ -125,7 +129,7 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
protected:
static const int kMajorBits = 5;
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
@ -143,6 +147,9 @@ class CodeStub BASE_EMBEDDED {
// initially generated.
void RecordCodeGeneration(Code* code, MacroAssembler* masm);
// Finish the code object after it has been generated.
virtual void FinishCode(Code* code) { }
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
@ -216,11 +223,11 @@ namespace v8 {
namespace internal {
// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
// RuntimeCallHelper implementation used in stubs: enters/leaves a
// newly created internal frame before/after the runtime call.
class ICRuntimeCallHelper : public RuntimeCallHelper {
class StubRuntimeCallHelper : public RuntimeCallHelper {
public:
ICRuntimeCallHelper() {}
StubRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const;
@ -376,9 +383,61 @@ class GenericUnaryOpStub : public CodeStub {
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
class MathPowStub: public CodeStub {
public:
MathPowStub() {}
virtual void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return MathPow; }
virtual int MinorKey() { return 0; }
const char* GetName() { return "MathPowStub"; }
};
class StringCharAtStub: public CodeStub {
public:
StringCharAtStub() {}
private:
Major MajorKey() { return StringCharAt; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class ICCompareStub: public CodeStub {
public:
ICCompareStub(Token::Value op, CompareIC::State state)
: op_(op), state_(state) {
ASSERT(Token::IsCompareOp(op));
}
virtual void Generate(MacroAssembler* masm);
private:
class OpField: public BitField<int, 0, 3> { };
class StateField: public BitField<int, 3, 5> { };
virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
virtual CodeStub::Major MajorKey() { return CompareIC; }
virtual int MinorKey();
virtual int GetCodeKind() { return Code::COMPARE_IC; }
void GenerateSmis(MacroAssembler* masm);
void GenerateHeapNumbers(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
Token::Value op_;
CompareIC::State state_;
};
@ -391,6 +450,12 @@ enum CompareFlags {
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc,
@ -398,7 +463,7 @@ class CompareStub: public CodeStub {
CompareFlags flags,
Register lhs,
Register rhs) :
cc_(cc),
cc_(cc),
strict_(strict),
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
@ -440,6 +505,7 @@ class CompareStub: public CodeStub {
// Register holding the left hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register lhs_;
// Register holding the right hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
@ -457,6 +523,11 @@ class CompareStub: public CodeStub {
int MinorKey();
virtual int GetCodeKind() { return Code::COMPARE_IC; }
virtual void FinishCode(Code* code) {
code->set_compare_state(CompareIC::GENERIC);
}
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
@ -490,9 +561,11 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size) : result_size_(result_size) { }
explicit CEntryStub(int result_size)
: result_size_(result_size), save_doubles_(false) { }
void Generate(MacroAssembler* masm);
void SaveDoubles() { save_doubles_ = true; }
private:
void GenerateCore(MacroAssembler* masm,
@ -508,10 +581,9 @@ class CEntryStub : public CodeStub {
// Number of pointers/values returned.
const int result_size_;
bool save_doubles_;
Major MajorKey() { return CEntry; }
// Minor key must differ if different result_size_ values means different
// code is generated.
int MinorKey();
const char* GetName() { return "CEntryStub"; }
@ -597,6 +669,26 @@ class RegExpExecStub: public CodeStub {
};
class RegExpConstructResultStub: public CodeStub {
public:
RegExpConstructResultStub() { }
private:
Major MajorKey() { return RegExpConstructResult; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "RegExpConstructResultStub"; }
#ifdef DEBUG
void Print() {
PrintF("RegExpConstructResultStub\n");
}
#endif
};
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)

View File

@ -139,6 +139,16 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
print_source = FLAG_print_source;
print_ast = FLAG_print_ast;
print_json_ast = FLAG_print_json_ast;
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if (print_source && !filter.is_empty()) {
print_source = info->function()->name()->IsEqualTo(filter);
}
if (print_ast && !filter.is_empty()) {
print_ast = info->function()->name()->IsEqualTo(filter);
}
if (print_json_ast && !filter.is_empty()) {
print_json_ast = info->function()->name()->IsEqualTo(filter);
}
ftype = "user-defined";
}
@ -174,14 +184,24 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
masm->GetCode(&desc);
Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
if (!code.is_null()) {
Counters::total_compiled_code_size.Increment(code->instruction_size());
}
return code;
}
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
bool print_code = Bootstrapper::IsActive()
? FLAG_print_builtin_code
: FLAG_print_code;
if (print_code) {
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
FunctionLiteral* function = info->function();
bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
if (print_code && match) {
// Print the source code if available.
Handle<Script> script = info->script();
FunctionLiteral* function = info->function();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
StringInputBuffer stream(String::cast(script->source()));
@ -199,22 +219,22 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
code->Disassemble(*function->name()->ToCString());
}
#endif // ENABLE_DISASSEMBLER
if (!code.is_null()) {
Counters::total_compiled_code_size.Increment(code->instruction_size());
}
return code;
}
// Generate the code. Compile the AST and assemble all the pieces into a
// Code object.
bool CodeGenerator::MakeCode(CompilationInfo* info) {
// When using Crankshaft the classic backend should never be used.
ASSERT(!V8::UseCrankshaft());
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
Counters::total_old_codegen_source_size.Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Classic Compiler - ");
}
MakeCodePrologue(info);
// Generate code.
const int kInitialBufferSize = 4 * KB;
@ -230,6 +250,9 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
// There is no stack check table in code generated by the classic backend.
code->SetNoStackCheckTable();
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
return !code.is_null();
}
@ -441,10 +464,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
ASSERT(result_size_ == 1 || result_size_ == 2);
int result = save_doubles_ ? 1 : 0;
#ifdef _WIN64
return result_size_ == 1 ? 0 : 1;
return result | ((result_size_ == 1) ? 0 : 2);
#else
return 0;
return result;
#endif
}

View File

@ -68,6 +68,9 @@
// CodeForDoWhileConditionPosition
// CodeForSourcePosition
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#if V8_TARGET_ARCH_IA32
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64

View File

@ -86,6 +86,9 @@ class CompilationSubCache {
// Clear this sub-cache evicting all its content.
void Clear();
// Remove given shared function info from sub-cache.
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
@ -249,6 +252,18 @@ void CompilationSubCache::Clear() {
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
{ HandleScope scope;
for (int generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
table->Remove(*function_info);
}
}
}
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
@ -467,6 +482,15 @@ void CompilationCacheRegExp::Put(Handle<String> source,
}
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
eval_global.Remove(function_info);
eval_contextual.Remove(function_info);
script.Remove(function_info);
}
Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
@ -545,6 +569,45 @@ void CompilationCache::PutRegExp(Handle<String> source,
}
static bool SourceHashCompare(void* key1, void* key2) {
return key1 == key2;
}
static HashMap* EagerOptimizingSet() {
static HashMap map(&SourceHashCompare);
return &map;
}
bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
if (FLAG_opt_eagerly) return true;
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
}
void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
EagerOptimizingSet()->Lookup(key, hash, true);
}
void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
EagerOptimizingSet()->Remove(key, hash);
}
void CompilationCache::ResetEagerOptimizingData() {
HashMap* set = EagerOptimizingSet();
if (set->occupancy() > 0) set->Clear();
}
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Clear();

View File

@ -76,9 +76,20 @@ class CompilationCache {
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Support for eager optimization tracking.
static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
static void MarkForEagerOptimizing(Handle<JSFunction> function);
static void MarkForLazyOptimizing(Handle<JSFunction> function);
// Reset the eager optimization tracking data.
static void ResetEagerOptimizingData();
// Clear the cache - also used to initialize the cache at startup.
static void Clear();
// Remove given shared function info from all caches.
static void Remove(Handle<SharedFunctionInfo> function_info);
// GC support.
static void Iterate(ObjectVisitor* v);
static void IterateFunctions(ObjectVisitor* v);

View File

@ -35,12 +35,16 @@
#include "data-flow.h"
#include "debug.h"
#include "full-codegen.h"
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "liveedit.h"
#include "oprofile-agent.h"
#include "parser.h"
#include "rewriter.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "scopes.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
@ -52,7 +56,10 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
scope_(NULL),
script_(script),
extension_(NULL),
pre_parse_data_(NULL) {
pre_parse_data_(NULL),
supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(NONOPT);
}
@ -63,7 +70,10 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL) {
pre_parse_data_(NULL),
supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(BASE);
}
@ -75,31 +85,200 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL) {
pre_parse_data_(NULL),
supports_deoptimization_(false),
osr_ast_id_(AstNode::kNoNumber) {
Initialize(BASE);
}
// For normal operation the syntax checker is used to determine whether to
// use the full compiler for top level code or not. However if the flag
// --always-full-compiler is specified or debugging is active the full
// compiler will be used for all code.
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
// the code from the full compiler supports mode precise break points. For the
// crankshaft adaptive compiler debugging the optimized code is not possible at
// all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set.
static bool AlwaysFullCompiler() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
if (V8::UseCrankshaft()) {
return FLAG_always_full_compiler || Debug::has_break_points();
} else {
return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
}
#else
return FLAG_always_full_compiler;
#endif
}
static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
if (!FLAG_trace_opt) return;
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
PrintF("[optimizing: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
PrintF(" - took %0.3f ms]\n", ms);
}
static void AbortAndDisable(CompilationInfo* info) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
// non-optimizable information for the code. When the code is
// regenerated and set on the shared function info it is marked as
// non-optimizable if optimization is disabled for the shared
// function info.
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->set_optimization_disabled(true);
Handle<Code> code = Handle<Code>(shared->code());
ASSERT(code->kind() == Code::FUNCTION);
code->set_optimizable(false);
info->SetCode(code);
if (FLAG_trace_opt) {
PrintF("[disabled optimization for: ");
info->closure()->PrintName();
PrintF(" / %" V8PRIxPTR "]\n",
reinterpret_cast<intptr_t>(*info->closure()));
}
}
static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed.
if (!info->AllowOptimize()) info->DisableOptimization();
// In case we are not optimizing simply return the code from
// the full code generator.
if (!info->IsOptimizing()) {
return FullCodeGenerator::MakeCode(info);
}
// We should never arrive here if there is not code object on the
// shared function object.
Handle<Code> code(info->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
info->SetCode(code);
return true;
}
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
const int kMaxOptCount = FLAG_deopt_every_n_times == 0 ? 10 : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
AbortAndDisable(info);
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
}
// Due to an encoding limit on LUnallocated operands in the Lithium
// language, we cannot optimize functions with too many formal parameters
// or perform on-stack replacement for function with too many
// stack-allocated local variables.
//
// The encoding is as a signed value, with parameters using the negative
// indices and locals the non-negative ones.
const int limit = LUnallocated::kMaxFixedIndices / 2;
Scope* scope = info->scope();
if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) {
AbortAndDisable(info);
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
}
// Take --hydrogen-filter into account.
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
Handle<String> name = info->function()->debug_name();
bool match = filter.is_empty() || name->IsEqualTo(filter);
if (!match) {
info->SetCode(code);
return true;
}
// Recompile the unoptimized version of the code if the current version
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
int64_t start = OS::Ticks();
bool should_recompile = !info->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_time_hydrogen) {
HPhase phase(HPhase::kFullCodeGen);
CompilationInfo unoptimized(info->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info->function());
unoptimized.SetScope(info->scope());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
if (should_recompile) {
if (!succeeded) return false;
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
// The existing unoptimized code was replaced with the new one.
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
Handle<String>(shared->DebugName()),
shared->start_position(),
&unoptimized);
}
}
// Check that the unoptimized, shared code is ready for
// optimizations. When using the always_opt flag we disregard the
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable());
ASSERT(info->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
HTracer::Instance()->TraceCompilation(info->function());
}
TypeFeedbackOracle oracle(Handle<Code>(info->shared_info()->code()));
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
if (graph != NULL && FLAG_build_lithium) {
Handle<Code> code = graph->Compile();
if (!code.is_null()) {
info->SetCode(code);
FinishOptimization(info->closure(), start);
return true;
}
}
// Compilation with the Hydrogen compiler failed. Keep using the
// shared code but mark it as unoptimizable.
AbortAndDisable(info);
// True indicates the compilation pipeline is still going, not necessarily
// that we optimized the code.
return true;
}
static bool MakeCode(CompilationInfo* info) {
// Precondition: code has been parsed. Postcondition: the code field in
// the compilation info is set if compilation succeeded.
ASSERT(info->function() != NULL);
if (Rewriter::Rewrite(info) &&
Scope::Analyze(info) &&
Rewriter::Analyze(info)) {
if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not.
@ -109,17 +288,19 @@ static bool MakeCode(CompilationInfo* info) {
//
// The normal choice of backend can be overridden with the flags
// --always-full-compiler.
Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
bool can_use_full =
FLAG_full_compiler && !info->function()->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
return FullCodeGenerator::MakeCode(info);
} else {
AssignedVariablesAnalyzer ava;
return ava.Analyze(info) && CodeGenerator::MakeCode(info);
if (Rewriter::Analyze(info)) {
Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
bool can_use_full =
FLAG_full_compiler && !info->function()->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
return FullCodeGenerator::MakeCode(info);
} else {
return AssignedVariablesAnalyzer::Analyze(info) &&
CodeGenerator::MakeCode(info);
}
}
}
@ -374,40 +555,60 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
Top::StackOverflow();
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
Handle<JSFunction> function = info->closure();
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
Handle<String>(shared->DebugName()),
shared->start_position(),
info);
// Update the shared function info with the compiled code and the
// scope info. Please note, that the order of the sharedfunction
// initialization is important since SerializedScopeInfo::Create might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By setting the code object last we avoid this.
Handle<SerializedScopeInfo> scope_info =
SerializedScopeInfo::Create(info->scope());
shared->set_scope_info(*scope_info);
shared->set_code(*info->code());
if (!info->closure().is_null()) {
info->closure()->set_code(*info->code());
if (info->IsOptimizing()) {
function->ReplaceCode(*code);
} else {
// Update the shared function info with the compiled code and the
// scope info. Please note, that the order of the shared function
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By settting the code object last we avoid this.
Handle<SerializedScopeInfo> scope_info =
SerializedScopeInfo::Create(info->scope());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
}
// Set the expected number of properties for instances.
FunctionLiteral* lit = info->function();
int expected = lit->expected_property_count();
SetExpectedNofPropertiesFromEstimate(shared, expected);
// Set the optimization hints after performing lazy compilation, as
// these are not set when the function is set up as a lazily
// compiled function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_code_age(0);
if (V8::UseCrankshaft() && info->AllowOptimize()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt && !Debug::has_break_points()) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
} else if (CompilationCache::ShouldOptimizeEagerly(function)) {
RuntimeProfiler::OptimizeSoon(*function);
}
}
}
// Set the expected number of properties for instances.
FunctionLiteral* lit = info->function();
SetExpectedNofPropertiesFromEstimate(shared,
lit->expected_property_count());
// Set the optimization hints after performing lazy compilation, as
// these are not set when the function is set up as a lazily compiled
// function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_code_age(0);
ASSERT(!info->code().is_null());
return true;
}
}
@ -419,12 +620,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script) {
#ifdef DEBUG
// We should not try to compile the same function literal more than
// once.
literal->mark_as_compiled();
#endif
// Precondition: code has been parsed and scopes have been analyzed.
CompilationInfo info(script);
info.SetFunction(literal);
@ -446,28 +641,31 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
info.SetCode(code);
} else {
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
//
// The bodies of function literals have not yet been visited by
// the AST analyzer.
if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
bool is_run_once = literal->try_full_codegen();
bool use_full = FLAG_full_compiler && !literal->contains_loops();
if (AlwaysFullCompiler() || (use_full && is_run_once)) {
if (!FullCodeGenerator::MakeCode(&info)) {
if (V8::UseCrankshaft()) {
if (!MakeCrankshaftCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
} else {
// We fall back to the classic V8 code generator.
AssignedVariablesAnalyzer ava;
if (!ava.Analyze(&info)) return Handle<SharedFunctionInfo>::null();
if (!CodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
// The bodies of function literals have not yet been visited by the
// AST optimizer/analyzer.
if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
bool is_run_once = literal->try_full_codegen();
bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
if (!FullCodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
} else {
// We fall back to the classic V8 code generator.
if (!AssignedVariablesAnalyzer::Analyze(&info) ||
!CodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
}
}
ASSERT(!info.code().is_null());
// Function compilation complete.
RecordFunctionCompilation(Logger::FUNCTION_TAG,
@ -484,6 +682,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.code(),
scope_info);
SetFunctionInfo(result, literal, false, script);
result->set_allows_lazy_compilation(allow_lazy);
// Set the expected number of properties for instances and return
// the resulting function.

View File

@ -59,6 +59,7 @@ class CompilationInfo BASE_EMBEDDED {
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> calling_context() const { return calling_context_; }
int osr_ast_id() const { return osr_ast_id_; }
void MarkAsEval() {
ASSERT(!is_lazy());
@ -93,8 +94,66 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(is_eval());
calling_context_ = context;
}
void SetOsrAstId(int osr_ast_id) {
ASSERT(IsOptimizing());
osr_ast_id_ = osr_ast_id;
}
bool has_global_object() const {
return !closure().is_null() && (closure()->context()->global() != NULL);
}
GlobalObject* global_object() const {
return has_global_object() ? closure()->context()->global() : NULL;
}
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
void SetOptimizing(int osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
void DisableOptimization() { SetMode(NONOPT); }
// Deoptimization support.
bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
void EnableDeoptimizationSupport() {
ASSERT(IsOptimizable());
supports_deoptimization_ = true;
}
// Determine whether or not we can adaptively optimize.
bool AllowOptimize() {
return V8::UseCrankshaft() &&
!closure_.is_null() &&
function_->AllowOptimize();
}
private:
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
// NONOPT is generated by the full codegen or the classic backend
// and is not prepared for recompilation/bailouts. These functions
// are never recompiled.
enum Mode {
BASE,
OPTIMIZE,
NONOPT
};
CompilationInfo() : function_(NULL) {}
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
}
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
mode_ = mode;
}
// Flags using template class BitField<type, start, length>. All are
// false by default.
//
@ -130,6 +189,11 @@ class CompilationInfo BASE_EMBEDDED {
// handle otherwise.
Handle<Context> calling_context_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
bool supports_deoptimization_;
int osr_ast_id_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@ -185,7 +249,6 @@ class Compiler : public AllStatic {
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif
private:
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
Handle<String> name,
int start_position,

View File

@ -239,6 +239,69 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
}
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
#ifdef DEBUG
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
CHECK(element != function);
element = JSFunction::cast(element)->next_function_link();
}
CHECK(function->next_function_link()->IsUndefined());
// Check that the context belongs to the weak global contexts list.
bool found = false;
Object* context = Heap::global_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
break;
}
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
CHECK(found);
#endif
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function);
}
void Context::RemoveOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
JSFunction* prev = NULL;
while (!element->IsUndefined()) {
JSFunction* element_function = JSFunction::cast(element);
ASSERT(element_function->next_function_link()->IsUndefined() ||
element_function->next_function_link()->IsJSFunction());
if (element_function == function) {
if (prev == NULL) {
set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link());
} else {
prev->set_next_function_link(element_function->next_function_link());
}
element_function->set_next_function_link(Heap::undefined_value());
return;
}
prev = element_function;
element = element_function->next_function_link();
}
UNREACHABLE();
}
Object* Context::OptimizedFunctionsListHead() {
ASSERT(IsGlobalContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
void Context::ClearOptimizedFunctions() {
set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
}
#ifdef DEBUG
bool Context::IsBootstrappingOrContext(Object* object) {
// During bootstrapping we allow all objects to pass as

View File

@ -228,12 +228,13 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
NEXT_CONTEXT_LINK,
OPTIMIZED_FUNCTIONS_LIST, // Weak.
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
GLOBAL_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
};
// Direct slot access.
@ -291,6 +292,12 @@ class Context: public FixedArray {
return IsCatchContext() && extension() == object;
}
// A global context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \

View File

@ -34,6 +34,7 @@
#include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h"
#include "vm-state-inl.h"
#include "../include/v8-profiler.h"
@ -223,7 +224,7 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
void ProfilerEventsProcessor::AddCurrentStack() {
TickSampleEventRecord record;
TickSample* sample = &record.sample;
sample->state = VMState::current_state();
sample->state = Top::current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
sample->frames_count = 0;
for (StackTraceFrameIterator it;
@ -314,6 +315,7 @@ void ProfilerEventsProcessor::Run() {
CpuProfiler* CpuProfiler::singleton_ = NULL;
Atomic32 CpuProfiler::is_profiling_ = false;
void CpuProfiler::StartProfiling(const char* title) {
ASSERT(singleton_ != NULL);
@ -435,7 +437,7 @@ void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
}
singleton_->processor_->FunctionCreateEvent(
function->address(),
function->code()->address(),
function->shared()->code()->address(),
security_token_id);
}
@ -525,6 +527,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
Logger::logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
@ -539,7 +542,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
if (!sampler->IsActive()) sampler->Start();
sampler->IncreaseProfilingDepth();
}
}
@ -570,12 +575,15 @@ CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
if (profiles_->IsLastProfile(title)) {
reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
sampler->DecreaseProfilingDepth();
sampler->Stop();
processor_->Stop();
processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL;
Logger::logging_nesting_ = saved_logging_nesting_;
}

View File

@ -30,6 +30,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "atomicops.h"
#include "circular-queue.h"
#include "unbound-queue.h"
@ -269,7 +270,7 @@ class CpuProfiler {
static void SetterCallbackEvent(String* name, Address entry_point);
static INLINE(bool is_profiling()) {
return singleton_ != NULL && singleton_->processor_ != NULL;
return NoBarrier_Load(&is_profiling_);
}
private:
@ -290,6 +291,7 @@ class CpuProfiler {
int saved_logging_nesting_;
static CpuProfiler* singleton_;
static Atomic32 is_profiling_;
#else
static INLINE(bool is_profiling()) { return false; }

View File

@ -138,6 +138,10 @@ class Shell: public i::AllStatic {
static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
#endif
#ifdef WIN32
#undef Yield
#endif
static Handle<Value> Print(const Arguments& args);
static Handle<Value> Write(const Arguments& args);
static Handle<Value> Yield(const Arguments& args);

View File

@ -33,7 +33,6 @@
namespace v8 {
namespace internal {
#ifdef DEBUG
void BitVector::Print() {
bool first = true;
@ -50,13 +49,39 @@ void BitVector::Print() {
#endif
void BitVector::Iterator::Advance() {
current_++;
uint32_t val = current_value_;
while (val == 0) {
current_index_++;
if (Done()) return;
val = target_->data_[current_index_];
current_ = current_index_ << 5;
}
val = SkipZeroBytes(val);
val = SkipZeroBits(val);
current_value_ = val >> 1;
}
bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
info_ = info;
Scope* scope = info->scope();
int variables = scope->num_parameters() + scope->num_stack_slots();
if (variables == 0) return true;
av_.ExpandTo(variables);
VisitStatements(info->function()->body());
int size = scope->num_parameters() + scope->num_stack_slots();
if (size == 0) return true;
AssignedVariablesAnalyzer analyzer(info, size);
return analyzer.Analyze();
}
AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
int size)
: info_(info), av_(size) {
}
bool AssignedVariablesAnalyzer::Analyze() {
ASSERT(av_.length() > 0);
VisitStatements(info_->function()->body());
return !HasStackOverflow();
}
@ -318,11 +343,6 @@ void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
}
void AssignedVariablesAnalyzer::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
// Nothing to do.
ASSERT(av_.IsEmpty());

View File

@ -42,10 +42,57 @@ class Node;
class BitVector: public ZoneObject {
public:
BitVector() : length_(0), data_length_(0), data_(NULL) { }
// Iterator for the elements of this BitVector.
class Iterator BASE_EMBEDDED {
public:
explicit Iterator(BitVector* target)
: target_(target),
current_index_(0),
current_value_(target->data_[0]),
current_(-1) {
ASSERT(target->data_length_ > 0);
Advance();
}
~Iterator() { }
explicit BitVector(int length) {
ExpandTo(length);
bool Done() const { return current_index_ >= target_->data_length_; }
void Advance();
int Current() const {
ASSERT(!Done());
return current_;
}
private:
uint32_t SkipZeroBytes(uint32_t val) {
while ((val & 0xFF) == 0) {
val >>= 8;
current_ += 8;
}
return val;
}
uint32_t SkipZeroBits(uint32_t val) {
while ((val & 0x1) == 0) {
val >>= 1;
current_++;
}
return val;
}
BitVector* target_;
int current_index_;
uint32_t current_value_;
int current_;
friend class BitVector;
};
explicit BitVector(int length)
: length_(length),
data_length_(SizeFor(length)),
data_(Zone::NewArray<uint32_t>(data_length_)) {
ASSERT(length > 0);
Clear();
}
BitVector(const BitVector& other)
@ -55,12 +102,8 @@ class BitVector: public ZoneObject {
CopyFrom(other);
}
void ExpandTo(int length) {
ASSERT(length > 0);
length_ = length;
data_length_ = SizeFor(length);
data_ = Zone::NewArray<uint32_t>(data_length_);
Clear();
static int SizeFor(int length) {
return 1 + ((length - 1) / 32);
}
BitVector& operator=(const BitVector& rhs) {
@ -75,7 +118,7 @@ class BitVector: public ZoneObject {
}
}
bool Contains(int i) {
bool Contains(int i) const {
ASSERT(i >= 0 && i < length());
uint32_t block = data_[i / 32];
return (block & (1U << (i % 32))) != 0;
@ -98,6 +141,17 @@ class BitVector: public ZoneObject {
}
}
bool UnionIsChanged(const BitVector& other) {
ASSERT(other.length() == length());
bool changed = false;
for (int i = 0; i < data_length_; i++) {
uint32_t old_data = data_[i];
data_[i] |= other.data_[i];
if (data_[i] != old_data) changed = true;
}
return changed;
}
void Intersect(const BitVector& other) {
ASSERT(other.length() == length());
for (int i = 0; i < data_length_; i++) {
@ -139,16 +193,102 @@ class BitVector: public ZoneObject {
#endif
private:
static int SizeFor(int length) {
return 1 + ((length - 1) / 32);
}
int length_;
int data_length_;
uint32_t* data_;
};
// An implementation of a sparse set whose elements are drawn from integers
// in the range [0..universe_size[. It supports constant-time Contains,
// destructive Add, and destructuve Remove operations and linear-time (in
// the number of elements) destructive Union.
class SparseSet: public ZoneObject {
public:
// Iterator for sparse set elements. Elements should not be added or
// removed during iteration.
class Iterator BASE_EMBEDDED {
public:
explicit Iterator(SparseSet* target) : target_(target), current_(0) {
ASSERT(++target->iterator_count_ > 0);
}
~Iterator() {
ASSERT(target_->iterator_count_-- > 0);
}
bool Done() const { return current_ >= target_->dense_.length(); }
void Advance() {
ASSERT(!Done());
++current_;
}
int Current() {
ASSERT(!Done());
return target_->dense_[current_];
}
private:
SparseSet* target_;
int current_;
friend class SparseSet;
};
explicit SparseSet(int universe_size)
: dense_(4),
sparse_(Zone::NewArray<int>(universe_size)) {
#ifdef DEBUG
size_ = universe_size;
iterator_count_ = 0;
#endif
}
bool Contains(int n) const {
ASSERT(0 <= n && n < size_);
int dense_index = sparse_[n];
return (0 <= dense_index) &&
(dense_index < dense_.length()) &&
(dense_[dense_index] == n);
}
void Add(int n) {
ASSERT(0 <= n && n < size_);
ASSERT(iterator_count_ == 0);
if (!Contains(n)) {
sparse_[n] = dense_.length();
dense_.Add(n);
}
}
void Remove(int n) {
ASSERT(0 <= n && n < size_);
ASSERT(iterator_count_ == 0);
if (Contains(n)) {
int dense_index = sparse_[n];
int last = dense_.RemoveLast();
if (dense_index < dense_.length()) {
dense_[dense_index] = last;
sparse_[last] = dense_index;
}
}
}
void Union(const SparseSet& other) {
for (int i = 0; i < other.dense_.length(); ++i) {
Add(other.dense_[i]);
}
}
private:
// The set is implemented as a pair of a growable dense list and an
// uninitialized sparse array.
ZoneList<int> dense_;
int* sparse_;
#ifdef DEBUG
int size_;
int iterator_count_;
#endif
};
// Simple fixed-capacity list-based worklist (managed as a queue) of
// pointers to T.
template<typename T>
@ -198,10 +338,12 @@ class WorkList BASE_EMBEDDED {
// is guaranteed to be a smi.
class AssignedVariablesAnalyzer : public AstVisitor {
public:
explicit AssignedVariablesAnalyzer() : info_(NULL) { }
bool Analyze(CompilationInfo* info);
static bool Analyze(CompilationInfo* info);
private:
AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
bool Analyze();
Variable* FindSmiLoopVariable(ForStatement* stmt);
int BitIndex(Variable* var);

View File

@ -35,6 +35,7 @@
#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "ic.h"
@ -140,7 +141,9 @@ void BreakLocationIterator::Next() {
Address target = original_rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if ((code->is_inline_cache_stub() &&
code->kind() != Code::BINARY_OP_IC) ||
!code->is_binary_op_stub() &&
!code->is_type_recording_binary_op_stub() &&
!code->is_compare_ic_stub()) ||
RelocInfo::IsConstructCall(rmode())) {
break_point_++;
return;
@ -1661,6 +1664,12 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
// Ensure shared in compiled. Return false if this failed.
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
}
// Create the debug info object.
Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);

1139
src/deoptimizer.cc Normal file

File diff suppressed because it is too large Load Diff

511
src/deoptimizer.h Normal file
View File

@ -0,0 +1,511 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
#include "v8.h"
#include "macro-assembler.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
class ValueDescription BASE_EMBEDDED {
public:
explicit ValueDescription(int index) : stack_index_(index) { }
int stack_index() const { return stack_index_; }
private:
// Offset relative to the top of the stack.
int stack_index_;
};
class ValueDescriptionInteger32: public ValueDescription {
public:
ValueDescriptionInteger32(int index, int32_t value)
: ValueDescription(index), int32_value_(value) { }
int32_t int32_value() const { return int32_value_; }
private:
// Raw value.
int32_t int32_value_;
};
class ValueDescriptionDouble: public ValueDescription {
public:
ValueDescriptionDouble(int index, double value)
: ValueDescription(index), double_value_(value) { }
double double_value() const { return double_value_; }
private:
// Raw value.
double double_value_;
};
class OptimizedFunctionVisitor BASE_EMBEDDED {
public:
virtual ~OptimizedFunctionVisitor() {}
// Function which is called before iteration of any optimized functions
// from given global context.
virtual void EnterContext(Context* context) = 0;
virtual void VisitFunction(JSFunction* function) = 0;
// Function which is called after iteration of all optimized functions
// from given global context.
virtual void LeaveContext(Context* context) = 0;
};
class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
OSR
};
int output_count() const { return output_count_; }
static Deoptimizer* New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta);
static Deoptimizer* Grab();
// Deoptimize the function now. Its current optimized code will never be run
// again and any activations of the optimized code will get deoptimized when
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
// Deoptimize all functions in the heap.
static void DeoptimizeAll();
static void DeoptimizeGlobalObject(JSObject* object);
static void VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor);
static void VisitAllOptimizedFunctionsForGlobalObject(
JSObject* object, OptimizedFunctionVisitor* visitor);
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
// Given the relocation info of a call to the stack check stub, patch the
// code so as to go unconditionally to the on-stack replacement builtin
// instead.
static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
// Given the relocation info of a call to the on-stack replacement
// builtin, patch the code back to the original stack check code.
static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
~Deoptimizer();
void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
static unsigned GetOutputInfo(DeoptimizationOutputData* data,
unsigned node_id,
SharedFunctionInfo* shared);
static void Setup();
static void TearDown();
// Code generation support.
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
static int output_count_offset() {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int GetDeoptimizedCodeCount();
static const int kNotDeoptimizationEntry = -1;
// Generators for the deoptimization entry code.
class EntryGenerator BASE_EMBEDDED {
public:
EntryGenerator(MacroAssembler* masm, BailoutType type)
: masm_(masm), type_(type) { }
virtual ~EntryGenerator() { }
void Generate();
protected:
MacroAssembler* masm() const { return masm_; }
BailoutType type() const { return type_; }
virtual void GeneratePrologue() { }
private:
MacroAssembler* masm_;
Deoptimizer::BailoutType type_;
};
class TableEntryGenerator : public EntryGenerator {
public:
TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
: EntryGenerator(masm, type), count_(count) { }
protected:
virtual void GeneratePrologue();
private:
int count() const { return count_; }
int count_;
};
private:
static const int kNumberOfEntries = 4096;
Deoptimizer(JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta);
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
void DoComputeOsrOutputFrame();
void DoComputeFrame(TranslationIterator* iterator, int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
// Translate a command for OSR. Updates the input offset to be used for
// the next command. Returns false if translation of the command failed
// (e.g., a number conversion failed) and may or may not have updated the
// input offset.
bool DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset);
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
unsigned ComputeOutgoingArgumentSize() const;
Object* ComputeLiteral(int index) const;
void InsertHeapNumberValue(JavaScriptFrame* frame,
int stack_index,
double val,
int extra_slot_count);
void AddInteger32Value(int frame_index, int slot_index, int32_t value);
void AddDoubleValue(int frame_index, int slot_index, double value);
static LargeObjectChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
// Weak handle callback for deoptimizing code objects.
static void HandleWeakDeoptimizedCode(
v8::Persistent<v8::Value> obj, void* data);
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
static LargeObjectChunk* eager_deoptimization_entry_code_;
static LargeObjectChunk* lazy_deoptimization_entry_code_;
static Deoptimizer* current_;
// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
// changed from the code present when deoptimizing was done.
static DeoptimizingCodeListNode* deoptimizing_code_list_;
JSFunction* function_;
Code* optimized_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
int fp_to_sp_delta_;
// Input frame description.
FrameDescription* input_;
// Number of output frames.
int output_count_;
// Array of output frame descriptions.
FrameDescription** output_;
List<ValueDescriptionInteger32>* integer32_values_;
List<ValueDescriptionDouble>* double_values_;
static int table_entry_size_;
friend class FrameDescription;
friend class DeoptimizingCodeListNode;
};
class FrameDescription {
public:
FrameDescription(uint32_t frame_size,
JSFunction* function);
void* operator new(size_t size, uint32_t frame_size) {
return malloc(size + frame_size);
}
void operator delete(void* description) {
free(description);
}
uint32_t GetFrameSize() const { return frame_size_; }
JSFunction* GetFunction() const { return function_; }
unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
uint32_t GetFrameSlot(unsigned offset) {
return *GetFrameSlotPointer(offset);
}
double GetDoubleFrameSlot(unsigned offset) {
return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
}
void SetFrameSlot(unsigned offset, uint32_t value) {
*GetFrameSlotPointer(offset) = value;
}
uint32_t GetRegister(unsigned n) const {
ASSERT(n < ARRAY_SIZE(registers_));
return registers_[n];
}
double GetDoubleRegister(unsigned n) const {
ASSERT(n < ARRAY_SIZE(double_registers_));
return double_registers_[n];
}
void SetRegister(unsigned n, uint32_t value) {
ASSERT(n < ARRAY_SIZE(registers_));
registers_[n] = value;
}
void SetDoubleRegister(unsigned n, double value) {
ASSERT(n < ARRAY_SIZE(double_registers_));
double_registers_[n] = value;
}
uint32_t GetTop() const { return top_; }
void SetTop(uint32_t top) { top_ = top; }
uint32_t GetPc() const { return pc_; }
void SetPc(uint32_t pc) { pc_ = pc; }
uint32_t GetFp() const { return fp_; }
void SetFp(uint32_t fp) { fp_ = fp; }
Smi* GetState() const { return state_; }
void SetState(Smi* state) { state_ = state; }
void SetContinuation(uint32_t pc) { continuation_ = pc; }
static int registers_offset() {
return OFFSET_OF(FrameDescription, registers_);
}
static int double_registers_offset() {
return OFFSET_OF(FrameDescription, double_registers_);
}
static int frame_size_offset() {
return OFFSET_OF(FrameDescription, frame_size_);
}
static int pc_offset() {
return OFFSET_OF(FrameDescription, pc_);
}
static int state_offset() {
return OFFSET_OF(FrameDescription, state_);
}
static int continuation_offset() {
return OFFSET_OF(FrameDescription, continuation_);
}
static int frame_content_offset() {
return sizeof(FrameDescription);
}
private:
static const uint32_t kZapUint32 = 0xbeeddead;
uint32_t frame_size_; // Number of bytes.
JSFunction* function_;
uint32_t registers_[Register::kNumRegisters];
double double_registers_[DoubleRegister::kNumAllocatableRegisters];
uint32_t top_;
uint32_t pc_;
uint32_t fp_;
Smi* state_;
// Continuation is the PC where the execution continues after
// deoptimizing.
uint32_t continuation_;
uint32_t* GetFrameSlotPointer(unsigned offset) {
ASSERT(offset < frame_size_);
return reinterpret_cast<uint32_t*>(
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
}
};
class TranslationBuffer BASE_EMBEDDED {
public:
TranslationBuffer() : contents_(256) { }
int CurrentIndex() const { return contents_.length(); }
void Add(int32_t value);
Handle<ByteArray> CreateByteArray();
private:
ZoneList<uint8_t> contents_;
};
class TranslationIterator BASE_EMBEDDED {
public:
TranslationIterator(ByteArray* buffer, int index)
: buffer_(buffer), index_(index) {
ASSERT(index >= 0 && index < buffer->length());
}
int32_t Next();
bool HasNext() const { return index_ >= 0; }
void Done() { index_ = -1; }
void Skip(int n) {
for (int i = 0; i < n; i++) Next();
}
private:
ByteArray* buffer_;
int index_;
};
class Translation BASE_EMBEDDED {
public:
enum Opcode {
BEGIN,
FRAME,
REGISTER,
INT32_REGISTER,
DOUBLE_REGISTER,
STACK_SLOT,
INT32_STACK_SLOT,
DOUBLE_STACK_SLOT,
LITERAL,
ARGUMENTS_OBJECT,
// A prefix indicating that the next command is a duplicate of the one
// that follows it.
DUPLICATE
};
Translation(TranslationBuffer* buffer, int frame_count)
: buffer_(buffer),
index_(buffer->CurrentIndex()) {
buffer_->Add(BEGIN);
buffer_->Add(frame_count);
}
int index() const { return index_; }
// Commands.
void BeginFrame(int node_id, int literal_id, unsigned height);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject();
void MarkDuplicate();
static int NumberOfOperandsFor(Opcode opcode);
#ifdef DEBUG
static const char* StringFor(Opcode opcode);
#endif
private:
TranslationBuffer* buffer_;
int index_;
};
// Linked list holding deoptimizing code objects. The deoptimizing code objects
// are kept as weak handles until they are no longer activated on the stack.
class DeoptimizingCodeListNode : public Malloced {
public:
explicit DeoptimizingCodeListNode(Code* code);
~DeoptimizingCodeListNode();
DeoptimizingCodeListNode* next() const { return next_; }
void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
Handle<Code> code() const { return code_; }
private:
// Global (weak) handle to the deoptimizing code object.
Handle<Code> code_;
// Next pointer for linked list.
DeoptimizingCodeListNode* next_;
};
} } // namespace v8::internal
#endif // V8_DEOPTIMIZER_H_

View File

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -30,6 +30,7 @@
#include "code-stubs.h"
#include "codegen-inl.h"
#include "debug.h"
#include "deoptimizer.h"
#include "disasm.h"
#include "disassembler.h"
#include "macro-assembler.h"
@ -277,6 +278,15 @@ static int DecodeIt(FILE* f,
} else {
out.AddFormatted(" %s", Code::Kind2String(kind));
}
} else if (rmode == RelocInfo::RUNTIME_ENTRY) {
// A runtime entry reloinfo might be a deoptimization bailout.
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
} else {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
@ -299,8 +309,17 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
byte* begin = Code::cast(code)->instruction_start();
byte* end = begin + Code::cast(code)->instruction_size();
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
? static_cast<int>(code->safepoint_table_start())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
Min(decode_size, static_cast<int>(code->stack_check_table_start()));
}
byte* begin = code->instruction_start();
byte* end = begin + decode_size;
V8NameConverter v8NameConverter(code);
DecodeIt(f, v8NameConverter, begin, end);
}

View File

@ -33,8 +33,10 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "runtime-profiler.h"
#include "simulator.h"
#include "v8threads.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
@ -295,6 +297,25 @@ void StackGuard::TerminateExecution() {
}
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access;
return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
}
void StackGuard::RequestRuntimeProfilerTick() {
// Ignore calls if we're not optimizing or if we can't get the lock.
if (FLAG_opt && ExecutionAccess::TryLock()) {
thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
Heap::SetStackLimits();
}
ExecutionAccess::Unlock();
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access;
@ -682,6 +703,12 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
#endif
MaybeObject* Execution::HandleStackGuardInterrupt() {
Counters::stack_interrupts.Increment();
if (StackGuard::IsRuntimeProfilerTick()) {
Counters::runtime_profiler_ticks.Increment();
StackGuard::Continue(RUNTIME_PROFILER_TICK);
RuntimeProfiler::OptimizeNow();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
DebugBreakHelper();

View File

@ -38,7 +38,8 @@ enum InterruptFlag {
DEBUGBREAK = 1 << 1,
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5
};
class Execution : public AllStatic {
@ -175,6 +176,8 @@ class StackGuard : public AllStatic {
static void Interrupt();
static bool IsTerminateExecution();
static void TerminateExecution();
static bool IsRuntimeProfilerTick();
static void RequestRuntimeProfilerTick();
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool IsDebugBreak();
static void DebugBreak();

View File

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -32,6 +32,7 @@
#include "execution.h"
#include "factory.h"
#include "macro-assembler.h"
#include "objects.h"
#include "objects-visiting.h"
namespace v8 {
@ -73,6 +74,26 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
}
Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(DeoptimizationInputData::Allocate(deopt_entry_count,
pretenure),
DeoptimizationInputData);
}
Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(DeoptimizationOutputData::Allocate(deopt_entry_count,
pretenure),
DeoptimizationOutputData);
}
// Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
@ -243,6 +264,13 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
}
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) {
CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalPropertyCell(*value),
JSGlobalPropertyCell);
}
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
}
@ -333,6 +361,15 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
context->global_context());
}
result->set_literals(*literals);
result->set_next_function_link(Heap::undefined_value());
if (V8::UseCrankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
function_info->allows_lazy_compilation()) {
result->MarkForLazyRecompilation();
}
return result;
}

View File

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -53,6 +53,12 @@ class Factory : public AllStatic {
static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
static Handle<DeoptimizationInputData> NewDeoptimizationInputData(
int deopt_entry_count,
PretenureFlag pretenure);
static Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure);
static Handle<String> LookupSymbol(Vector<const char> str);
static Handle<String> LookupAsciiSymbol(const char* str) {
@ -169,6 +175,9 @@ class Factory : public AllStatic {
void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
static Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
static Handle<Map> NewMap(InstanceType type, int instance_size);
static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);

View File

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -96,9 +96,56 @@ private:
//
#define FLAG FLAG_FULL
// Flags for Crankshaft.
#ifdef V8_TARGET_ARCH_IA32
DEFINE_bool(crankshaft, true, "use crankshaft")
#else
DEFINE_bool(crankshaft, false, "use crankshaft")
#endif
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
DEFINE_bool(use_lithium, true, "use lithium code generator")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_peeling, false, "use loop peeling")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(time_hydrogen, false, "timing for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_environment, false, "trace lithium environments")
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times,
0,
"deoptimize every n times a deopt point is passed")
DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
#ifdef V8_TARGET_ARCH_IA32
DEFINE_bool(use_osr, true, "use on-stack replacement")
#else
DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
"generate extra code (comments, assertions) for debugging")
"generate extra code (assertions) for debugging")
DEFINE_bool(code_comments, false, "emit comments in code disassembly")
DEFINE_bool(emit_branch_hints, false, "emit branch hints")
DEFINE_bool(peephole_optimization, true,
"perform peephole optimizations in assembly code")
@ -146,7 +193,14 @@ DEFINE_bool(mask_constants_with_cookie,
// codegen.cc
DEFINE_bool(lazy, true, "use lazy compilation")
DEFINE_bool(trace_opt, false, "trace lazy optimization")
DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
DEFINE_bool(always_opt, false, "always try to optimize functions")
DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(deopt, true, "support deoptimization")
DEFINE_bool(trace_deopt, false, "trace deoptimization")
// compiler.cc
DEFINE_bool(strict, false, "strict error checking")
@ -365,6 +419,9 @@ DEFINE_bool(collect_heap_spill_statistics, false,
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
// VM state
DEFINE_bool(log_state_changes, false, "Log state changes.")
// Regexp
DEFINE_bool(regexp_possessive_quantifier,
false,
@ -397,7 +454,6 @@ DEFINE_bool(log_gc, false,
DEFINE_bool(log_handles, false, "Log global handle events.")
DEFINE_bool(log_snapshot_positions, false,
"log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_state_changes, false, "Log state changes.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
DEFINE_bool(compress_log, false,
@ -446,6 +502,8 @@ DEFINE_bool(print_code_stubs, false, "print code stubs")
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(print_code, false, "print generated code")
DEFINE_bool(print_opt_code, false, "print optimized code")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
// Cleanup...

View File

@ -279,7 +279,7 @@ static void SplitArgument(const char* arg,
*value = NULL;
*is_bool = false;
if (*arg == '-') {
if (arg != NULL && *arg == '-') {
// find the begin of the flag name
arg++; // remove 1st '-'
if (*arg == '-') {

View File

@ -262,8 +262,8 @@ class FrameElement BASE_EMBEDDED {
class CopiedField: public BitField<bool, 3, 1> {};
class SyncedField: public BitField<bool, 4, 1> {};
class UntaggedInt32Field: public BitField<bool, 5, 1> {};
class TypeInfoField: public BitField<int, 6, 6> {};
class DataField: public BitField<uint32_t, 12, 32 - 12> {};
class TypeInfoField: public BitField<int, 6, 7> {};
class DataField: public BitField<uint32_t, 13, 32 - 13> {};
friend class VirtualFrame;
};

View File

@ -27,8 +27,12 @@
#include "v8.h"
#include "ast.h"
#include "deoptimizer.h"
#include "frames-inl.h"
#include "full-codegen.h"
#include "mark-compact.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "string-stream.h"
#include "top.h"
@ -324,11 +328,33 @@ void SafeStackTraceFrameIterator::Advance() {
#endif
Code* StackFrame::GetSafepointData(Address pc,
uint8_t** safepoint_entry,
unsigned* stack_slots) {
PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
uint8_t* cached_safepoint_entry = entry->safepoint_entry;
if (cached_safepoint_entry == NULL) {
cached_safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(cached_safepoint_entry != NULL); // No safepoint found.
entry->safepoint_entry = cached_safepoint_entry;
} else {
ASSERT(cached_safepoint_entry == entry->code->GetSafepointEntry(pc));
}
// Fill in the results and return the code.
Code* code = entry->code;
*safepoint_entry = cached_safepoint_entry;
*stack_slots = code->stack_slots();
return code;
}
bool StackFrame::HasHandler() const {
StackHandlerIterator it(this, top_handler());
return !it.done();
}
void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address,
Code* holder) {
@ -355,7 +381,16 @@ StackFrame::Type StackFrame::ComputeType(State* state) {
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
if (!marker->IsSmi()) {
// If we're using a "safe" stack iterator, we treat optimized
// frames as normal JavaScript frames to avoid having to look
// into the heap to determine the state. This is safe as long
// as nobody tries to GC...
if (SafeStackFrameIterator::is_active()) return JAVA_SCRIPT;
Code::Kind kind = GetContainingCode(*(state->pc_address))->kind();
ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
}
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
@ -488,6 +523,70 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
}
void OptimizedFrame::Iterate(ObjectVisitor* v) const {
#ifdef DEBUG
// Make sure that optimized frames do not contain any stack handlers.
StackHandlerIterator it(this, top_handler());
ASSERT(it.done());
#endif
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
ASSERT(!SafeStackFrameIterator::is_active());
// Compute the safepoint information.
unsigned stack_slots = 0;
uint8_t* safepoint_entry = NULL;
Code* code = StackFrame::GetSafepointData(
pc(), &safepoint_entry, &stack_slots);
unsigned slot_space = stack_slots * kPointerSize;
// Visit the outgoing parameters. This is usually dealt with by the
// callee, but while GC'ing we artificially lower the number of
// arguments to zero and let the caller deal with it.
Object** parameters_base = &Memory::Object_at(sp());
Object** parameters_limit = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
// Visit the registers that contain pointers if any.
if (SafepointTable::HasRegisters(safepoint_entry)) {
for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
if (SafepointTable::HasRegisterAt(safepoint_entry, i)) {
int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
v->VisitPointer(parameters_base + reg_stack_index);
}
}
// Skip the words containing the register values.
parameters_base += kNumSafepointRegisters;
}
// We're done dealing with the register bits.
safepoint_entry += kNumSafepointRegisters >> kBitsPerByteLog2;
// Visit the rest of the parameters.
v->VisitPointers(parameters_base, parameters_limit);
// Visit pointer spill slots and locals.
for (unsigned index = 0; index < stack_slots; index++) {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
if ((safepoint_entry[byte_index] & (1U << bit_index)) != 0) {
v->VisitPointer(parameters_limit + index);
}
}
// Visit the context and the function.
Object** fixed_base = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset);
Object** fixed_limit = &Memory::Object_at(fp());
v->VisitPointers(fixed_base, fixed_limit);
// Visit the return address in the callee and incoming arguments.
IteratePc(v, pc_address(), code);
IterateArguments(v);
}
Object* JavaScriptFrame::GetParameter(int index) const {
ASSERT(index >= 0 && index < ComputeParametersCount());
const int offset = JavaScriptFrameConstants::kParam0Offset;
@ -547,6 +646,185 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
}
void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
functions->Add(JSFunction::cast(function()));
}
void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
ASSERT(functions->length() == 0);
Code* code_pointer = code();
int offset = pc() - code_pointer->address();
FrameSummary summary(receiver(),
JSFunction::cast(function()),
code_pointer,
offset,
IsConstructor());
functions->Add(summary);
}
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
PrintF("\nfunction: ");
function_->shared()->DebugName()->ShortPrint();
PrintF("\ncode: ");
code_->ShortPrint();
if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
PrintF("\npc: %d\n", offset_);
}
void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
ASSERT(frames->length() == 0);
ASSERT(is_optimized());
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
// BUG(3243555): Since we don't have a lazy-deopt registered at
// throw-statements, we can't use the translation at the call-site of
// throw. An entry with no deoptimization index indicates a call-site
// without a lazy-deopt. As a consequence we are not allowed to inline
// functions containing throw.
if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
JavaScriptFrame::Summarize(frames);
return;
}
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
// We create the summary in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
int i = frame_count;
while (i > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::FRAME) {
// We don't inline constructor calls, so only the first, outermost
// frame can be a constructor frame in case of inlining.
bool is_constructor = (i == frame_count) && IsConstructor();
i--;
int ast_id = it.Next();
int function_id = it.Next();
it.Next(); // Skip height.
JSFunction* function =
JSFunction::cast(data->LiteralArray()->get(function_id));
// The translation commands are ordered and the receiver is always
// at the first position. Since we are always at a call when we need
// to construct a stack trace, the receiver is always in a stack slot.
opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::STACK_SLOT);
int input_slot_index = it.Next();
// Get the correct receiver in the optimized frame.
Object* receiver = NULL;
// Positive index means the value is spilled to the locals area. Negative
// means it is stored in the incoming parameter area.
if (input_slot_index >= 0) {
receiver = GetExpression(input_slot_index);
} else {
// Index -1 overlaps with last parameter, -n with the first parameter,
// (-n - 1) with the receiver with n being the number of parameters
// of the outermost, optimized frame.
int parameter_count = ComputeParametersCount();
int parameter_index = input_slot_index + parameter_count;
receiver = (parameter_index == -1)
? this->receiver()
: this->GetParameter(parameter_index);
}
Code* code = function->shared()->code();
DeoptimizationOutputData* output_data =
DeoptimizationOutputData::cast(code->deoptimization_data());
unsigned entry = Deoptimizer::GetOutputInfo(output_data,
ast_id,
function->shared());
unsigned pc_offset =
FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
ASSERT(pc_offset > 0);
FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
frames->Add(summary);
} else {
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
}
}
}
DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
int* deopt_index) {
ASSERT(is_optimized());
JSFunction* opt_function = JSFunction::cast(function());
Code* code = opt_function->code();
// The code object may have been replaced by lazy deoptimization. Fall
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
code = PcToCodeCache::GcSafeFindCodeForPc(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
SafepointTable table(code);
unsigned pc_offset = pc() - code->instruction_start();
for (unsigned i = 0; i < table.length(); i++) {
if (table.GetPcOffset(i) == pc_offset) {
*deopt_index = table.GetDeoptimizationIndex(i);
break;
}
}
ASSERT(*deopt_index != AstNode::kNoNumber);
return DeoptimizationInputData::cast(code->deoptimization_data());
}
void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
ASSERT(is_optimized());
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
while (frame_count > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::FRAME) {
frame_count--;
it.Next(); // Skip ast id.
int function_id = it.Next();
it.Next(); // Skip height.
JSFunction* function =
JSFunction::cast(data->LiteralArray()->get(function_id));
functions->Add(function);
} else {
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
}
}
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
@ -789,7 +1067,11 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
IteratePc(v, pc_address(), code());
IterateArguments(v);
}
void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
// Traverse callee-saved registers, receiver, and parameters.
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
@ -851,6 +1133,7 @@ Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
}
}
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
Counters::pc_to_code.Increment();
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
@ -867,6 +1150,7 @@ PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
// been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForPc(pc);
entry->safepoint_entry = NULL;
entry->pc = pc;
}
return entry;

View File

@ -51,6 +51,7 @@ class PcToCodeCache : AllStatic {
struct PcToCodeCacheEntry {
Address pc;
Code* code;
uint8_t* safepoint_entry;
};
static PcToCodeCacheEntry* cache(int index) {
@ -115,6 +116,7 @@ class StackHandler BASE_EMBEDDED {
V(ENTRY_CONSTRUCT, EntryConstructFrame) \
V(EXIT, ExitFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@ -158,12 +160,17 @@ class StackFrame BASE_EMBEDDED {
bool is_entry() const { return type() == ENTRY; }
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
bool is_exit() const { return type() == EXIT; }
bool is_java_script() const { return type() == JAVA_SCRIPT; }
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
bool is_construct() const { return type() == CONSTRUCT; }
virtual bool is_standard() const { return false; }
bool is_java_script() const {
Type type = this->type();
return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
}
// Accessors.
Address sp() const { return state_.sp; }
Address fp() const { return state_.fp; }
@ -193,10 +200,17 @@ class StackFrame BASE_EMBEDDED {
Code* code() const { return GetContainingCode(pc()); }
// Get the code object that contains the given pc.
Code* GetContainingCode(Address pc) const {
static Code* GetContainingCode(Address pc) {
return PcToCodeCache::GetCacheEntry(pc)->code;
}
// Get the code object containing the given pc and fill in the
// safepoint entry and the number of stack slots. The pc must be at
// a safepoint.
static Code* GetSafepointData(Address pc,
uint8_t** safepoint_entry,
unsigned* stack_slots);
virtual void Iterate(ObjectVisitor* v) const = 0;
static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
@ -393,6 +407,36 @@ class StandardFrame: public StackFrame {
};
class FrameSummary BASE_EMBEDDED {
public:
FrameSummary(Object* receiver,
JSFunction* function,
Code* code,
int offset,
bool is_constructor)
: receiver_(receiver),
function_(function),
code_(code),
offset_(offset),
is_constructor_(is_constructor) { }
Handle<Object> receiver() { return receiver_; }
Handle<JSFunction> function() { return function_; }
Handle<Code> code() { return code_; }
Address pc() { return reinterpret_cast<Address>(*code_) + offset_; }
int offset() { return offset_; }
bool is_constructor() { return is_constructor_; }
void Print();
private:
Handle<Object> receiver_;
Handle<JSFunction> function_;
Handle<Code> code_;
int offset_;
bool is_constructor_;
};
class JavaScriptFrame: public StandardFrame {
public:
virtual Type type() const { return JAVA_SCRIPT; }
@ -431,6 +475,12 @@ class JavaScriptFrame: public StandardFrame {
// Determine the code for the frame.
virtual Code* unchecked_code() const;
// Return a list with JSFunctions of this frame.
virtual void GetFunctions(List<JSFunction*>* functions);
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);
static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
return static_cast<JavaScriptFrame*>(frame);
@ -442,6 +492,10 @@ class JavaScriptFrame: public StandardFrame {
virtual Address GetCallerStackPointer() const;
// Garbage collection support. Iterates over incoming arguments,
// receiver, and any callee-saved registers.
void IterateArguments(ObjectVisitor* v) const;
private:
inline Object* function_slot_object() const;
@ -450,6 +504,31 @@ class JavaScriptFrame: public StandardFrame {
};
class OptimizedFrame : public JavaScriptFrame {
public:
virtual Type type() const { return OPTIMIZED; }
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
virtual void GetFunctions(List<JSFunction*>* functions);
virtual void Summarize(List<FrameSummary>* frames);
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
explicit OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
private:
friend class StackFrameIterator;
};
// Arguments adaptor frames are automatically inserted below
// JavaScript frames when the actual number of parameters does not
// match the formal number of parameters.

View File

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,12 +29,13 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "liveedit.h"
#include "macro-assembler.h"
#include "prettyprinter.h"
#include "scopes.h"
#include "stub-cache.h"
#include "debug.h"
#include "liveedit.h"
namespace v8 {
namespace internal {
@ -166,10 +167,6 @@ void BreakableStatementChecker::VisitConditional(Conditional* expr) {
}
void BreakableStatementChecker::VisitSlot(Slot* expr) {
}
void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
}
@ -283,6 +280,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
int len = String::cast(script->source())->length();
Counters::total_full_codegen_source_size.Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Full Compiler - ");
}
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
@ -293,14 +293,105 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
ASSERT(!Top::has_pending_exception());
return false;
}
unsigned table_offset = cgen.EmitStackCheckTable();
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_start(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
return !code.is_null();
}
unsigned FullCodeGenerator::EmitStackCheckTable() {
// The stack check table consists of a length (in number of entries)
// field, and then a sequence of entries. Each entry is a pair of AST id
// and code-relative pc offset.
masm()->Align(kIntSize);
masm()->RecordComment("[ Stack check table");
unsigned offset = masm()->pc_offset();
unsigned length = stack_checks_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
__ dd(stack_checks_[i].id);
__ dd(stack_checks_[i].pc_and_state);
}
masm()->RecordComment("]");
return offset;
}
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
if (!info_->HasDeoptimizationSupport()) return;
int length = bailout_entries_.length();
Handle<DeoptimizationOutputData> data =
Factory::NewDeoptimizationOutputData(length, TENURED);
for (int i = 0; i < length; i++) {
data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
}
code->set_deoptimization_data(*data);
}
void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
void FullCodeGenerator::RecordJSReturnSite(Call* call) {
// We record the offset of the function return so we can rebuild the frame
// if the function was inlined, i.e., this is the return address in the
// inlined function's frame.
//
// The state is ignored. We defensively set it to TOS_REG, which is the
// real state of the unoptimized code at the return site.
PrepareForBailoutForId(call->ReturnId(), TOS_REG);
#ifdef DEBUG
// In debug builds, mark the return so we can verify that this function
// was called.
ASSERT(!call->return_is_recorded_);
call->return_is_recorded_ = true;
#endif
}
void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
// There's no need to prepare this code for bailouts from already optimized
// code or code that can't be optimized.
if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
unsigned pc_and_state =
StateField::encode(state) | PcField::encode(masm_->pc_offset());
BailoutEntry entry = { id, pc_and_state };
#ifdef DEBUG
// Assert that we don't have multiple bailout entries for the same node.
for (int i = 0; i < bailout_entries_.length(); i++) {
if (bailout_entries_.at(i).id == entry.id) {
AstPrinter printer;
PrintF("%s", printer.PrintProgram(info_->function()));
UNREACHABLE();
}
}
#endif // DEBUG
bailout_entries_.Add(entry);
}
void FullCodeGenerator::RecordStackCheck(int ast_id) {
// The pc offset does not need to be encoded and packed together with a
// state.
BailoutEntry entry = { ast_id, masm_->pc_offset() };
stack_checks_.Add(entry);
}
int FullCodeGenerator::SlotOffset(Slot* slot) {
ASSERT(slot != NULL);
// Offset is negative because higher indexes are at lower addresses.
@ -335,13 +426,11 @@ void FullCodeGenerator::EffectContext::Plug(Register reg) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
// Move value into place.
__ Move(result_register(), reg);
}
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
// Move value into place.
__ push(reg);
}
@ -349,6 +438,7 @@ void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
void FullCodeGenerator::TestContext::Plug(Register reg) const {
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(true_label_, false_label_, fall_through_);
}
@ -370,6 +460,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
__ pop(result_register());
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(true_label_, false_label_, fall_through_);
}
@ -614,7 +705,8 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
switch (op) {
case Token::COMMA:
VisitForEffect(left);
Visit(right);
if (context()->IsTest()) ForwardBailoutToChild(expr);
context()->HandleExpression(right);
break;
case Token::OR:
@ -670,7 +762,8 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
context()->EmitLogicalLeft(expr, &eval_right, &done);
__ bind(&eval_right);
Visit(expr->right());
if (context()->IsTest()) ForwardBailoutToChild(expr);
context()->HandleExpression(expr->right());
__ bind(&done);
}
@ -692,15 +785,17 @@ void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
BinaryOperation* expr,
Label* eval_right,
Label* done) const {
codegen()->Visit(expr->left());
HandleExpression(expr->left());
// We want the value in the accumulator for the test, and on the stack in case
// we need it.
__ push(result_register());
Label discard, restore;
if (expr->op() == Token::OR) {
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(&restore, &discard, &restore);
} else {
ASSERT(expr->op() == Token::AND);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(&discard, &restore, &restore);
}
__ bind(&restore);
@ -721,9 +816,11 @@ void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
__ push(result_register());
Label discard;
if (expr->op() == Token::OR) {
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(done, &discard, &discard);
} else {
ASSERT(expr->op() == Token::AND);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
codegen()->DoTest(&discard, done, &discard);
}
__ bind(&discard);
@ -745,12 +842,66 @@ void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
}
void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
if (!info_->HasDeoptimizationSupport()) return;
ASSERT(context()->IsTest());
ASSERT(expr == forward_bailout_stack_->expr());
forward_bailout_pending_ = forward_bailout_stack_;
}
void FullCodeGenerator::EffectContext::HandleExpression(
Expression* expr) const {
codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
}
void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
Expression* expr) const {
codegen()->HandleInNonTestContext(expr, TOS_REG);
}
void FullCodeGenerator::StackValueContext::HandleExpression(
Expression* expr) const {
codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
}
void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
codegen()->VisitInTestContext(expr);
}
void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
ASSERT(forward_bailout_pending_ == NULL);
AstVisitor::Visit(expr);
PrepareForBailout(expr, state);
// Forwarding bailouts to children is a one shot operation. It
// should have been processed at this point.
ASSERT(forward_bailout_pending_ == NULL);
}
void FullCodeGenerator::VisitInTestContext(Expression* expr) {
ForwardBailoutStack stack(expr, forward_bailout_pending_);
ForwardBailoutStack* saved = forward_bailout_stack_;
forward_bailout_pending_ = NULL;
forward_bailout_stack_ = &stack;
AstVisitor::Visit(expr);
forward_bailout_stack_ = saved;
}
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
PrepareForBailoutForId(stmt->EntryId(), TOS_REG);
VisitStatements(stmt->statements());
__ bind(nested_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
}
@ -786,6 +937,7 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
Visit(stmt->then_statement());
}
__ bind(&done);
PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
}
@ -872,7 +1024,7 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success, done;
Label body, stack_check;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@ -880,75 +1032,63 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ bind(&body);
Visit(stmt->body());
// Check stack before looping.
__ bind(loop_statement.continue_target());
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
// Record the position of the do while condition and make sure it is
// possible to break on the condition.
__ bind(loop_statement.continue_target());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
SetExpressionPosition(stmt->cond(), stmt->condition_position());
VisitForControl(stmt->cond(),
&body,
&stack_check,
loop_statement.break_target(),
loop_statement.break_target());
&stack_check);
// Check stack before looping.
__ bind(&stack_check);
EmitStackCheck(stmt);
__ jmp(&body);
__ bind(loop_statement.break_target());
__ jmp(&done);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(&done);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
decrement_loop_depth();
}
void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
Label body, stack_limit_hit, stack_check_success, done;
Label test, body;
Iteration loop_statement(this, stmt);
increment_loop_depth();
// Emit the test at the bottom of the loop.
__ jmp(loop_statement.continue_target());
__ jmp(&test);
__ bind(&body);
Visit(stmt->body());
__ bind(loop_statement.continue_target());
// Emit the statement position here as this is where the while
// statement code starts.
__ bind(loop_statement.continue_target());
SetStatementPosition(stmt);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
EmitStackCheck(stmt);
__ bind(&test);
VisitForControl(stmt->cond(),
&body,
loop_statement.break_target(),
loop_statement.break_target());
__ bind(loop_statement.break_target());
__ jmp(&done);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(&done);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
decrement_loop_depth();
}
void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
Comment cmnt(masm_, "[ ForStatement");
Label test, body, stack_limit_hit, stack_check_success;
Label test, body;
Iteration loop_statement(this, stmt);
if (stmt->init() != NULL) {
@ -959,30 +1099,25 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(&body);
Visit(stmt->body());
__ bind(loop_statement.continue_target());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
SetStatementPosition(stmt);
if (stmt->next() != NULL) {
Visit(stmt->next());
}
__ bind(&test);
// Emit the statement position here as this is where the for
// statement code starts.
SetStatementPosition(stmt);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
EmitStackCheck(stmt);
__ bind(&test);
if (stmt->cond() != NULL) {
VisitForControl(stmt->cond(),
&body,
@ -993,6 +1128,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
}
__ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
decrement_loop_depth();
}
@ -1130,14 +1266,15 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
for_test->false_label(),
NULL);
} else {
Visit(expr->then_expression());
context()->HandleExpression(expr->then_expression());
__ jmp(&done);
}
__ bind(&false_case);
if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
expr->else_expression_position());
Visit(expr->else_expression());
context()->HandleExpression(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
__ bind(&done);
@ -1145,12 +1282,6 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
}
void FullCodeGenerator::VisitSlot(Slot* expr) {
// Slots do not appear directly in the AST.
UNREACHABLE();
}
void FullCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
context()->Plug(expr->handle());

View File

@ -31,6 +31,8 @@
#include "v8.h"
#include "ast.h"
#include "code-stubs.h"
#include "codegen.h"
#include "compiler.h"
namespace v8 {
@ -66,17 +68,39 @@ class BreakableStatementChecker: public AstVisitor {
class FullCodeGenerator: public AstVisitor {
public:
enum State {
NO_REGISTERS,
TOS_REG
};
explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm),
info_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
context_(NULL) {
context_(NULL),
bailout_entries_(0),
stack_checks_(2), // There's always at least one.
forward_bailout_stack_(NULL),
forward_bailout_pending_(NULL) {
}
static bool MakeCode(CompilationInfo* info);
void Generate(CompilationInfo* info);
void PopulateDeoptimizationData(Handle<Code> code);
class StateField : public BitField<State, 0, 8> { };
class PcField : public BitField<unsigned, 8, 32-8> { };
static const char* State2String(State state) {
switch (state) {
case NO_REGISTERS: return "NO_REGISTERS";
case TOS_REG: return "TOS_REG";
}
UNREACHABLE();
return NULL;
}
private:
class Breakable;
@ -229,6 +253,24 @@ class FullCodeGenerator: public AstVisitor {
DISALLOW_COPY_AND_ASSIGN(ForIn);
};
// The forward bailout stack keeps track of the expressions that can
// bail out to just before the control flow is split in a child
// node. The stack elements are linked together through the parent
// link when visiting expressions in test contexts after requesting
// bailout in child forwarding.
class ForwardBailoutStack BASE_EMBEDDED {
public:
ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
: expr_(expr), parent_(parent) { }
Expression* expr() const { return expr_; }
ForwardBailoutStack* parent() const { return parent_; }
private:
Expression* const expr_;
ForwardBailoutStack* const parent_;
};
enum ConstantOperand {
kNoConstants,
kLeftConstant,
@ -274,19 +316,23 @@ class FullCodeGenerator: public AstVisitor {
// register.
MemOperand EmitSlotSearch(Slot* slot, Register scratch);
// Forward the bailout responsibility for the given expression to
// the next child visited (which must be in a test context).
void ForwardBailoutToChild(Expression* expr);
void VisitForEffect(Expression* expr) {
EffectContext context(this);
Visit(expr);
HandleInNonTestContext(expr, NO_REGISTERS);
}
void VisitForAccumulatorValue(Expression* expr) {
AccumulatorValueContext context(this);
Visit(expr);
HandleInNonTestContext(expr, TOS_REG);
}
void VisitForStackValue(Expression* expr) {
StackValueContext context(this);
Visit(expr);
HandleInNonTestContext(expr, NO_REGISTERS);
}
void VisitForControl(Expression* expr,
@ -294,9 +340,15 @@ class FullCodeGenerator: public AstVisitor {
Label* if_false,
Label* fall_through) {
TestContext context(this, if_true, if_false, fall_through);
Visit(expr);
VisitInTestContext(expr);
// Forwarding bailouts to children is a one shot operation. It
// should have been processed at this point.
ASSERT(forward_bailout_pending_ == NULL);
}
void HandleInNonTestContext(Expression* expr, State state);
void VisitInTestContext(Expression* expr);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
@ -310,12 +362,39 @@ class FullCodeGenerator: public AstVisitor {
Label* if_false,
Label* fall_through);
// Bailout support.
void PrepareForBailout(AstNode* node, State state);
void PrepareForBailoutForId(int id, State state);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
void RecordJSReturnSite(Call* call);
// Prepare for bailout before a test (or compare) and branch. If
// should_normalize, then the following comparison will not handle the
// canonical JS true value so we will insert a (dead) test against true at
// the actual bailout target from the optimized code. If not
// should_normalize, the true and false labels are ignored.
void PrepareForBailoutBeforeSplit(State state,
bool should_normalize,
Label* if_true,
Label* if_false);
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(Variable* variable,
Variable::Mode mode,
FunctionLiteral* function);
// Platform-specific code for checking the stack limit at the back edge of
// a loop.
void EmitStackCheck(IterationStatement* stmt);
// Record the OSR AST id corresponding to a stack check in the code.
void RecordStackCheck(int osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
// Platform-specific return sequence
void EmitReturnSequence();
@ -471,14 +550,13 @@ class FullCodeGenerator: public AstVisitor {
void VisitForTypeofValue(Expression* expr);
MacroAssembler* masm_;
CompilationInfo* info_;
struct BailoutEntry {
unsigned id;
unsigned pc_and_state;
};
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
class ExpressionContext {
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
: masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
@ -504,7 +582,8 @@ class FullCodeGenerator: public AstVisitor {
// Emit code to convert pure control flow to a pair of unbound labels into
// the result expected according to this expression context. The
// implementation may decide to bind either of the labels.
// implementation will bind both labels unless it's a TestContext, which
// won't bind them at this point.
virtual void Plug(Label* materialize_true,
Label* materialize_false) const = 0;
@ -526,12 +605,14 @@ class FullCodeGenerator: public AstVisitor {
Label** if_false,
Label** fall_through) const = 0;
virtual void HandleExpression(Expression* expr) const = 0;
// Returns true if we are evaluating only for side effects (ie if the result
// will be discarded.
// will be discarded).
virtual bool IsEffect() const { return false; }
// Returns true if we are branching on the value rather than materializing
// it.
// it. Only used for asserts.
virtual bool IsTest() const { return false; }
protected:
@ -565,6 +646,7 @@ class FullCodeGenerator: public AstVisitor {
Label** if_true,
Label** if_false,
Label** fall_through) const;
virtual void HandleExpression(Expression* expr) const;
};
class StackValueContext : public ExpressionContext {
@ -588,6 +670,7 @@ class FullCodeGenerator: public AstVisitor {
Label** if_true,
Label** if_false,
Label** fall_through) const;
virtual void HandleExpression(Expression* expr) const;
};
class TestContext : public ExpressionContext {
@ -626,6 +709,7 @@ class FullCodeGenerator: public AstVisitor {
Label** if_true,
Label** if_false,
Label** fall_through) const;
virtual void HandleExpression(Expression* expr) const;
virtual bool IsTest() const { return true; }
private:
@ -655,10 +739,20 @@ class FullCodeGenerator: public AstVisitor {
Label** if_true,
Label** if_false,
Label** fall_through) const;
virtual void HandleExpression(Expression* expr) const;
virtual bool IsEffect() const { return true; }
};
MacroAssembler* masm_;
CompilationInfo* info_;
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_;
ForwardBailoutStack* forward_bailout_stack_;
ForwardBailoutStack* forward_bailout_pending_;
friend class NestedStatement;

View File

@ -30,6 +30,8 @@
#include "api.h"
#include "global-handles.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {

View File

@ -147,13 +147,16 @@ typedef byte* Address;
#ifdef _MSC_VER
#define V8_UINT64_C(x) (x ## UI64)
#define V8_INT64_C(x) (x ## I64)
#define V8_INTPTR_C(x) (x ## I64)
#define V8_PTR_PREFIX "ll"
#else // _MSC_VER
#define V8_UINT64_C(x) (x ## UL)
#define V8_INT64_C(x) (x ## L)
#define V8_INTPTR_C(x) (x ## L)
#define V8_PTR_PREFIX "l"
#endif // _MSC_VER
#else // V8_HOST_ARCH_64_BIT
#define V8_INTPTR_C(x) (x)
#define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT
@ -223,6 +226,7 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't

214
src/graph-codegen.cc.rej Normal file
View File

@ -0,0 +1,214 @@
--- src/graph-codegen.cc (revision 757)
+++ src/graph-codegen.cc (working copy)
@@ -45,6 +45,7 @@
bailout_literals_(8),
arguments_stack_height_(0),
safepoint_pc_offsets_(32),
+ safepoint_bailout_ids_(32),
safepoint_span_indexes_(32),
current_block_(NULL),
next_block_(NULL),
@@ -257,6 +258,7 @@
}
}
+
SpanList* SpanList::Insert(Span* span) {
return new SpanList(span, this);
}
@@ -994,90 +996,9 @@
BAILOUT("attempted bailout when deoptimization is impossible");
}
- // Compute the output frame height.
- int height = environment_.ExpressionStackHeight();
-
- // Build the translation. The size is the part above the frame pointer.
- int translation_size = height + LocalCount();
- Translation translation(&translations_, translation_size);
-
- // Total output frame size: Expression stack + locals + fixed elements +
- // parameters and receiver.
- int output_frame_size = translation_size + 4 + ParameterCount() + 1;
-
- // The parameters are at the bottom of the frame. They have negative
- // span indices that increase (go toward zero) as the parameter index
- // goes up. They have positive destination indices that decrease as the
- // parameter index goes up.
- //
- // Output frame index of the slot above the last parameter. First '1' is
- // receiver, second '1' is to convert to a zero-based index.
- int parameter_base = output_frame_size - (ParameterCount() + 1) - 1;
- EnvironmentIterator parameters(&environment_,
- EnvironmentIterator::PARAMETERS);
- while (parameters.HasNext()) {
- Span* span = parameters.Next()->span();
- ASSERT(span->HasFixedSpillSlot());
- int dest_index = parameter_base - span->index();
- if (span->IsAllocated()) {
- translation.MoveStackReg(dest_index, span->reg());
- } else if (span->IsSpilled()) {
- // Nothing to do. Parameter already in its fixed slot.
- } else {
- UNREACHABLE();
- }
- }
-
- // Setup the locals. Locals have positive span indices that increase as
- // the local index goes up. They have positive output frame indices that
- // decrease as the local index goes up.
- int output_index = translation_size - 1; // For local 0.
- EnvironmentIterator locals(&environment_, EnvironmentIterator::LOCALS);
- while (locals.HasNext()) {
- Span* span = locals.Next()->span();
- if (span->IsAllocated()) {
- translation.MoveStackReg(output_index, span->reg());
- } else if (span->IsSpilled()) {
- // TODO(kmillikin): spilled spans should be already in place in the
- // output frame. Eliminate this move.
- translation.MoveStackStack(output_index, span->index());
- } else {
- UNREACHABLE();
- }
- --output_index;
- }
-
- // Setup the rest of the expression stack.
- for (int i = 0; i < height; i++) {
- Expression* expr = environment_.ExpressionStackAt(i);
- Span* span = expr->span();
- if (span->IsAllocated()) {
- translation.MoveStackReg(i, span->reg());
- } else if (span->IsSpilled()) {
- translation.MoveStackStack(i, span->index());
- } else if (span->IsArgument()) {
- int index = arguments_stack_height_ - (span->ArgumentIndex() + 1);
- translation.MoveStackArgument(i, index);
- } else if (expr->AsLiteral() != NULL) {
- int index = DefineBailoutLiteral(expr->AsLiteral()->handle());
- translation.MoveStackLiteral(i, index);
- } else {
- UNREACHABLE();
- }
- }
-
- // Emit the bailout information.
- int id = bailouts_.length();
- Bailout bailout = {
- node->id(),
- translation.index(),
- arguments_stack_height_,
- };
- bailouts_.Add(bailout);
+ unsigned id = RecordBailout(node);
Address entry = Deoptimizer::GetDeoptimizationEntry(id);
- if (entry == NULL) {
- BAILOUT("bailout was not prepared");
- }
+ if (entry == NULL) BAILOUT("too many bailouts");
__ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
}
@@ -1920,9 +1841,102 @@
}
safepoint_pc_offsets_.Add(masm_->pc_offset());
safepoint_span_indexes_.Add(indexes);
+
+ // Record a bailout at every safe point.
+ unsigned id = RecordBailout(current_instruction());
+ safepoint_bailout_ids_.Add(id);
+ if (Deoptimizer::GetDeoptimizationEntry(id) == NULL) {
+ BAILOUT("too many bailouts");
+ }
}
+unsigned GraphCodeGenerator::RecordBailout(AstNode* node) {
+ // Compute the output frame height.
+ int height = environment_.ExpressionStackHeight();
+
+ // Build the translation. The size is the part above the frame pointer.
+ int translation_size = height + LocalCount();
+ Translation translation(&translations_, translation_size);
+
+ // Total output frame size: Expression stack + locals + fixed elements +
+ // parameters and receiver.
+ int output_frame_size = translation_size + 4 + ParameterCount() + 1;
+
+ // The parameters are at the bottom of the frame. They have negative
+ // span indices that increase (go toward zero) as the parameter index
+ // goes up. They have positive destination indices that decrease as the
+ // parameter index goes up.
+ //
+ // Output frame index of the slot above the last parameter. First '1' is
+ // receiver, second '1' is to convert to a zero-based index.
+ int parameter_base = output_frame_size - (ParameterCount() + 1) - 1;
+ EnvironmentIterator parameters(&environment_,
+ EnvironmentIterator::PARAMETERS);
+ while (parameters.HasNext()) {
+ Span* span = parameters.Next()->span();
+ ASSERT(span->HasFixedSpillSlot());
+ int dest_index = parameter_base - span->index();
+ if (span->IsAllocated()) {
+ translation.MoveStackReg(dest_index, span->reg());
+ } else if (span->IsSpilled()) {
+ // Nothing to do. Parameter already in its fixed slot.
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ // Setup the locals. Locals have positive span indices that increase as
+ // the local index goes up. They have positive output frame indices that
+ // decrease as the local index goes up.
+ int output_index = translation_size - 1; // For local 0.
+ EnvironmentIterator locals(&environment_, EnvironmentIterator::LOCALS);
+ while (locals.HasNext()) {
+ Span* span = locals.Next()->span();
+ if (span->IsAllocated()) {
+ translation.MoveStackReg(output_index, span->reg());
+ } else if (span->IsSpilled()) {
+ // TODO(kmillikin): spilled spans should be already in place in the
+ // output frame. Eliminate this move.
+ translation.MoveStackStack(output_index, span->index());
+ } else {
+ UNREACHABLE();
+ }
+ --output_index;
+ }
+
+ // Setup the rest of the expression stack.
+ for (int i = 0; i < height; i++) {
+ Expression* expr = environment_.ExpressionStackAt(i);
+ Span* span = expr->span();
+ if (span->IsAllocated()) {
+ translation.MoveStackReg(i, span->reg());
+ } else if (span->IsSpilled()) {
+ translation.MoveStackStack(i, span->index());
+ } else if (span->IsArgument()) {
+ int index = arguments_stack_height_ - (span->ArgumentIndex() + 1);
+ translation.MoveStackArgument(i, index);
+ } else if (expr->AsLiteral() != NULL) {
+ int index = DefineBailoutLiteral(expr->AsLiteral()->handle());
+ translation.MoveStackLiteral(i, index);
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ // Emit the bailout information.
+ unsigned id = bailouts_.length();
+ Bailout bailout = {
+ node->id(),
+ translation.index(),
+ arguments_stack_height_,
+ };
+ bailouts_.Add(bailout);
+ return id;
+
+}
+
+
unsigned GraphCodeGenerator::EmitSafepointTable(unsigned prologue_size) {
// Make sure the safepoint table is properly aligned. Pad with nops.
static const unsigned kTableAlignment = kIntSize;

18
src/graph-codegen.h.rej Normal file
View File

@ -0,0 +1,18 @@
--- src/graph-codegen.h (revision 757)
+++ src/graph-codegen.h (working copy)
@@ -254,6 +254,7 @@
// Support for recording safepoint information.
void RecordSafepoint();
+ unsigned RecordBailout(AstNode* node);
// Emits the safepoint table and returns the pc offset where the
// table starts after the instructions.
@@ -286,6 +287,7 @@
// Safe point data structures.
ZoneList<unsigned> safepoint_pc_offsets_;
+ ZoneList<unsigned> safepoint_bailout_ids_;
ZoneList<ZoneList<int>*> safepoint_span_indexes_;
// Current and next basic block.

View File

@ -39,6 +39,7 @@
#include "runtime.h"
#include "string-search.h"
#include "stub-cache.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
@ -797,7 +798,7 @@ bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(!info->shared_info()->is_compiled());
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
bool result = Compiler::CompileLazy(info);
ASSERT(result != Top::has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
@ -814,36 +815,47 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
PROFILE(FunctionCreateEvent(*function));
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
return true;
} else {
CompilationInfo info(function);
bool result = CompileLazyHelper(&info, flag);
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
PROFILE(FunctionCreateEvent(*function));
return result;
}
if (result && function->is_compiled()) {
PROFILE(FunctionCreateEvent(*function));
}
return result;
}
bool CompileLazyInLoop(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code());
PROFILE(FunctionCreateEvent(*function));
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
return true;
} else {
CompilationInfo info(function);
info.MarkAsInLoop();
bool result = CompileLazyHelper(&info, flag);
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
PROFILE(FunctionCreateEvent(*function));
return result;
}
if (result && function->is_compiled()) {
PROFILE(FunctionCreateEvent(*function));
}
return result;
}
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
CompilationInfo info(function);
info.SetOptimizing(osr_ast_id);
bool result = CompileLazyHelper(&info, KEEP_EXCEPTION);
if (result) PROFILE(FunctionCreateEvent(*function));
return result;
}

View File

@ -342,6 +342,8 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id);
class NoHandleAllocation BASE_EMBEDDED {
public:
#ifndef DEBUG

View File

@ -409,8 +409,8 @@ void Heap::SetLastScriptId(Object* last_script_id) {
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
Heap::CollectGarbage(Failure::cast(__maybe_object__)-> \
allocation_space()); \
Heap::CollectGarbage( \
Failure::cast(__maybe_object__)->allocation_space()); \
__maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
if (__maybe_object__->IsOutOfMemory()) { \

View File

@ -38,10 +38,12 @@
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
#include "runtime-profiler.h"
#include "scanner-base.h"
#include "scopeinfo.h"
#include "snapshot.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
@ -839,6 +841,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
ContextSlotCache::Clear();
DescriptorLookupCache::Clear();
RuntimeProfiler::MarkCompactPrologue(is_compacting);
CompilationCache::MarkCompactPrologue();
CompletelyClearInstanceofCache();
@ -1049,6 +1053,14 @@ void Heap::Scavenge() {
// Scavenge object reachable from the global contexts list directly.
scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
// Scavenge objects reachable from the runtime-profiler sampler
// window directly.
Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
scavenge_visitor.VisitPointers(
sampler_window_address,
sampler_window_address + sampler_window_size);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
@ -1116,6 +1128,40 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
}
static Object* ProcessFunctionWeakReferences(Object* function,
WeakObjectRetainer* retainer) {
Object* head = Heap::undefined_value();
JSFunction* tail = NULL;
Object* candidate = function;
while (!candidate->IsUndefined()) {
// Check whether to keep the candidate in the list.
JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
if (head->IsUndefined()) {
// First element in the list.
head = candidate_function;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_next_function_link(candidate_function);
}
// Retained function is new tail.
tail = candidate_function;
}
// Move to next element in the list.
candidate = candidate_function->next_function_link();
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
tail->set_next_function_link(Heap::undefined_value());
}
return head;
}
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* head = undefined_value();
Context* tail = NULL;
@ -1137,6 +1183,15 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
}
// Retained context is new tail.
tail = candidate_context;
// Process the weak list of optimized functions for the context.
Object* function_list_head =
ProcessFunctionWeakReferences(
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
retainer);
candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
function_list_head,
UPDATE_WRITE_BARRIER);
}
// Move to next element in the list.
candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
@ -1651,6 +1706,11 @@ bool Heap::CreateInitialMaps() {
}
set_byte_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_byte_array(ByteArray::cast(obj));
{ MaybeObject* maybe_obj =
AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
if (!maybe_obj->ToObject(&obj)) return false;
@ -2245,9 +2305,11 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_debug_info(undefined_value());
share->set_inferred_name(empty_string());
share->set_compiler_hints(0);
share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
share->set_initial_map(undefined_value());
share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
share->set_opt_count(0);
share->set_num_literals(0);
share->set_end_position(0);
share->set_function_token_position(0);
@ -2666,6 +2728,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
code->set_flags(flags);
code->set_deoptimization_data(empty_fixed_array());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@ -2794,6 +2857,7 @@ MaybeObject* Heap::InitializeFunction(JSFunction* function,
function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value());
function->set_literals(empty_fixed_array());
function->set_next_function_link(undefined_value());
return function;
}

View File

@ -62,6 +62,7 @@ namespace internal {
V(Object, termination_exception, TerminationException) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(Map, string_map, StringMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Map, symbol_map, SymbolMap) \
@ -173,6 +174,8 @@ namespace internal {
V(value_of_symbol, "valueOf") \
V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \

1482
src/hydrogen-instructions.cc Normal file

File diff suppressed because it is too large Load Diff

2885
src/hydrogen-instructions.h Normal file

File diff suppressed because it is too large Load Diff

5540
src/hydrogen.cc Normal file

File diff suppressed because it is too large Load Diff

1061
src/hydrogen.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -120,6 +120,30 @@ Address* RelocInfo::target_reference_address() {
}
Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
return Handle<JSGlobalPropertyCell>(
reinterpret_cast<JSGlobalPropertyCell**>(address));
}
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
Object* object = HeapObject::FromAddress(
address - JSGlobalPropertyCell::kValueOffset);
return reinterpret_cast<JSGlobalPropertyCell*>(object);
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@ -167,6 +191,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -190,6 +216,8 @@ void RelocInfo::Visit() {
StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -246,6 +274,12 @@ Immediate::Immediate(Smi* value) {
}
Immediate::Immediate(Address addr) {
x_ = reinterpret_cast<int32_t>(addr);
rmode_ = RelocInfo::NONE;
}
void Assembler::emit(uint32_t x) {
*reinterpret_cast<uint32_t*>(pc_) = x;
pc_ += sizeof(uint32_t);

View File

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
@ -56,10 +56,10 @@ uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
void CpuFeatures::Probe(bool portable) {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == 0);
if (Serializer::enabled()) {
if (portable && Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
@ -137,7 +137,7 @@ void CpuFeatures::Probe() {
found_by_runtime_probing_ = supported_;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
found_by_runtime_probing_ &= ~os_guarantees;
found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
}
@ -435,6 +435,13 @@ void Assembler::push(const Immediate& x) {
}
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0x68);
emit(imm32);
}
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1542,7 +1549,9 @@ void Assembler::bind(NearLabel* L) {
L->bind_to(pc_offset());
}
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (L->is_bound()) {
@ -1561,6 +1570,7 @@ void Assembler::call(Label* L) {
void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(!RelocInfo::IsCodeTarget(rmode));
@ -1570,6 +1580,7 @@ void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
void Assembler::call(const Operand& adr) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xFF);
@ -2420,6 +2431,17 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
}
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xDB);
emit_sse_operand(dst, src);
}
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2491,7 +2513,7 @@ void Assembler::RecordDebugBreakSlot() {
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
if (FLAG_code_comments) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
@ -2623,9 +2645,15 @@ void Assembler::emit_farith(int b1, int b2, int i) {
}
void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
emit(data, reloc_info);
EMIT(data);
}
void Assembler::dd(uint32_t data) {
EnsureSpace ensure_space(this);
emit(data);
}

View File

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
@ -64,7 +64,36 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < 8; }
static const int kNumAllocatableRegisters = 5;
static const int kNumRegisters = 8;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < 4 || reg.code() == 7);
return (reg.code() == 7) ? 4 : reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return (index == 4) ? from_code(7) : from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"eax",
"ecx",
"edx",
"ebx",
"edi"
};
return names[index];
}
static Register from_code(int code) {
Register r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
// eax, ebx, ecx and edx are byte registers, the rest are not.
bool is_byte_register() const { return code_ <= 3; }
@ -93,7 +122,40 @@ const Register no_reg = { -1 };
struct XMMRegister {
bool is_valid() const { return 0 <= code_ && code_ < 8; }
static const int kNumAllocatableRegisters = 7;
static const int kNumRegisters = 8;
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
static XMMRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index + 1);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
"xmm3",
"xmm4",
"xmm5",
"xmm6",
"xmm7"
};
return names[index];
}
static XMMRegister from_code(int code) {
XMMRegister r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
@ -102,6 +164,7 @@ struct XMMRegister {
int code_;
};
const XMMRegister xmm0 = { 0 };
const XMMRegister xmm1 = { 1 };
const XMMRegister xmm2 = { 2 };
@ -111,6 +174,17 @@ const XMMRegister xmm5 = { 5 };
const XMMRegister xmm6 = { 6 };
const XMMRegister xmm7 = { 7 };
typedef XMMRegister DoubleRegister;
// Index of register used in pusha/popa.
// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI
inline int EspIndexForPushAll(Register reg) {
return Register::kNumRegisters - 1 - reg.code();
}
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@ -202,6 +276,7 @@ class Immediate BASE_EMBEDDED {
inline explicit Immediate(const ExternalReference& ext);
inline explicit Immediate(Handle<Object> handle);
inline explicit Immediate(Smi* value);
inline explicit Immediate(Address addr);
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
@ -281,6 +356,11 @@ class Operand BASE_EMBEDDED {
RelocInfo::EXTERNAL_REFERENCE);
}
static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
return Operand(reinterpret_cast<int32_t>(cell.location()),
RelocInfo::GLOBAL_PROPERTY_CELL);
}
// Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const;
@ -369,9 +449,12 @@ class Displacement BASE_EMBEDDED {
// }
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Detect features of the target CPU. If the portable flag is set,
// the method sets safe defaults if the serializer is enabled
// (snapshots must be portable).
static void Probe(bool portable);
static void Clear() { supported_ = 0; }
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
@ -484,6 +567,11 @@ class Assembler : public Malloced {
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
// ---------------------------------------------------------------------------
// Code generation
//
@ -519,6 +607,7 @@ class Assembler : public Malloced {
void popfd();
void push(const Immediate& x);
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
@ -818,6 +907,7 @@ class Assembler : public Malloced {
void movd(XMMRegister dst, const Operand& src);
void movsd(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
@ -845,12 +935,13 @@ class Assembler : public Malloced {
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void dd(uint32_t data, RelocInfo::Mode reloc_info);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return pc_ - buffer_; }
@ -878,8 +969,8 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
private:
byte* addr_at(int pos) { return buffer_ + pos; }
private:
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {

View File

@ -1,4 +1,4 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -31,6 +31,8 @@
#include "code-stubs.h"
#include "codegen-inl.h"
#include "deoptimizer.h"
#include "full-codegen.h"
namespace v8 {
namespace internal {
@ -480,6 +482,85 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
__ EnterInternalFrame();
// Push a copy of the function onto the stack.
__ push(edi);
__ push(edi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyRecompile, 1);
// Restore function and tear down temporary frame.
__ pop(edi);
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
__ jmp(Operand(ecx));
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
__ EnterInternalFrame();
// Pass the function and deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ SmiUntag(ecx);
// Switch on the state.
NearLabel not_no_registers, not_tos_eax;
__ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
__ j(not_equal, &not_no_registers);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ mov(eax, Operand(esp, 2 * kPointerSize));
__ cmp(ecx, FullCodeGenerator::TOS_REG);
__ j(not_equal, &not_tos_eax);
__ ret(2 * kPointerSize); // Remove state, eax.
__ bind(&not_tos_eax);
__ Abort("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// TODO(kasperl): Do we need to save/restore the XMM registers too?
// For now, we are relying on the fact that Runtime::NotifyOSR
// doesn't do any garbage collection which allows us to save/restore
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ pushad();
__ EnterInternalFrame();
__ CallRuntime(Runtime::kNotifyOSR, 0);
__ LeaveInternalFrame();
__ popad();
__ ret(0);
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{ Label done;
@ -1418,6 +1499,76 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// We shouldn't be performing on-stack replacement in the first
// place if the CPU features we need for the optimized Crankshaft
// code aren't supported.
CpuFeatures::Probe(false);
if (!CpuFeatures::IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}
// Get the loop depth of the stack guard check. This is recorded in
// a test(eax, depth) instruction right after the call.
Label stack_check;
__ mov(ebx, Operand(esp, 0)); // return address
if (FLAG_debug_code) {
__ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
__ Assert(equal, "test eax instruction not found after loop stack check");
}
__ movzx_b(ebx, Operand(ebx, 1)); // depth
// Get the loop nesting level at which we allow OSR from the
// unoptimized code and check if we want to do OSR yet. If not we
// should perform a stack guard check so we can get interrupts while
// waiting for on-stack replacement.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
__ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
__ j(greater, &stack_check);
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
__ EnterInternalFrame();
__ push(eax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
NearLabel skip;
__ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
__ j(not_equal, &skip);
__ ret(0);
// If we decide not to perform on-stack replacement we perform a
// stack guard check to enable interrupts.
__ bind(&stack_check);
NearLabel ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ TailCallStub(&stub);
__ Abort("Unreachable code: returned from tail call.");
__ bind(&ok);
__ ret(0);
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiUntag(eax);
__ push(eax);
// Generate the code for doing the frame-to-frame translation using
// the deoptimizer infrastructure.
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
generator.Generate();
}
#undef __
} } // namespace v8::internal

File diff suppressed because it is too large Load Diff

View File

@ -83,7 +83,7 @@ class GenericBinaryOpStub: public CodeStub {
args_in_registers_(false),
args_reversed_(false),
static_operands_type_(operands_type),
runtime_operands_type_(BinaryOpIC::DEFAULT),
runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
name_(NULL) {
if (static_operands_type_.IsSmi()) {
mode_ = NO_OVERWRITE;
@ -117,6 +117,11 @@ class GenericBinaryOpStub: public CodeStub {
|| op_ == Token::MUL || op_ == Token::DIV;
}
void SetArgsInRegisters() {
ASSERT(ArgsInRegistersSupported());
args_in_registers_ = true;
}
private:
Token::Value op_;
OverwriteMode mode_;
@ -157,7 +162,7 @@ class GenericBinaryOpStub: public CodeStub {
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
class StaticTypeInfoBits: public BitField<int, 13, 3> {};
class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
@ -185,7 +190,6 @@ class GenericBinaryOpStub: public CodeStub {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgsInRegisters() { return args_in_registers_; }
@ -207,6 +211,123 @@ class GenericBinaryOpStub: public CodeStub {
return BinaryOpIC::ToState(runtime_operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_binary_op_type(runtime_operands_type_);
}
friend class CodeGenerator;
};
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) {
}
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
void GenerateCall(MacroAssembler* masm, Register left, Register right);
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_sse3_;
// Operand type information determined at runtime.
TRBinaryOpIC::TypeInfo operands_type_;
TRBinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingBinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
TRBinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return TypeRecordingBinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| SSE3Bits::encode(use_sse3_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return TRBinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_type_recording_binary_op_type(operands_type_);
code->set_type_recording_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};

View File

@ -104,12 +104,12 @@ void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveInternalFrame();
}
@ -7398,6 +7398,7 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
Load(args->at(3));
RegExpExecStub stub;
Result result = frame_->CallStub(&stub, 4);
frame_->Push(&result);
@ -7405,91 +7406,15 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
// No stub. This code only occurs a few times in regexp.js.
const int kMaxInlineLength = 100;
ASSERT_EQ(3, args->length());
Load(args->at(0)); // Size of array, smi.
Load(args->at(1)); // "index" property value.
Load(args->at(2)); // "input" property value.
{
VirtualFrame::SpilledScope spilled_scope;
Label slowcase;
Label done;
__ mov(ebx, Operand(esp, kPointerSize * 2));
__ test(ebx, Immediate(kSmiTagMask));
__ j(not_zero, &slowcase);
__ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
// Allocate RegExpResult followed by FixedArray with size in ebx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
times_half_pointer_size,
ebx, // In: Number of elements (times 2, being a smi)
eax, // Out: Start of allocation (tagged).
ecx, // Out: End of allocation.
edx, // Scratch register
&slowcase,
TAG_OBJECT);
// eax: Start of allocated area, object-tagged.
// Set JSArray map to global.regexp_result_map().
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
__ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
__ mov(ecx, Immediate(Factory::empty_fixed_array()));
__ lea(ebx, Operand(eax, JSRegExpResult::kSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
// Set input, index and length fields from arguments.
__ pop(FieldOperand(eax, JSRegExpResult::kInputOffset));
__ pop(FieldOperand(eax, JSRegExpResult::kIndexOffset));
__ pop(ecx);
__ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
// Fill out the elements FixedArray.
// eax: JSArray.
// ebx: FixedArray.
// ecx: Number of elements in array, as smi.
// Set map.
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
// Set length.
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
__ SmiUntag(ecx);
__ mov(edx, Immediate(Factory::the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
// eax: JSArray.
// ecx: Number of elements to fill.
// ebx: Start of elements in FixedArray.
// edx: the hole.
Label loop;
__ test(ecx, Operand(ecx));
__ bind(&loop);
__ j(less_equal, &done); // Jump if ecx is negative or zero.
__ sub(Operand(ecx), Immediate(1));
__ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
__ jmp(&loop);
__ bind(&slowcase);
__ CallRuntime(Runtime::kRegExpConstructResult, 3);
__ bind(&done);
}
frame_->Forget(3);
frame_->Push(eax);
RegExpConstructResultStub stub;
Result result = frame_->CallStub(&stub, 3);
frame_->Push(&result);
}
@ -10082,14 +10007,15 @@ void Reference::SetValue(InitState init_state) {
#define __ masm.
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
MemCopyFunction CreateMemCopyFunction() {
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
&actual_size,
true));
CHECK(buffer);
HandleScope handles;
MacroAssembler masm(buffer, static_cast<int>(actual_size));
HandleScope scope;
MacroAssembler masm(NULL, 1 * KB);
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
@ -10183,6 +10109,7 @@ MemCopyFunction CreateMemCopyFunction() {
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
__ pop(esi);
__ pop(edi);
__ ret(0);
@ -10229,6 +10156,7 @@ MemCopyFunction CreateMemCopyFunction() {
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
__ pop(esi);
__ pop(edi);
__ ret(0);
@ -10272,6 +10200,7 @@ MemCopyFunction CreateMemCopyFunction() {
__ mov(eax, Operand(src, count, times_1, -4));
__ mov(Operand(dst, count, times_1, -4), eax);
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
__ pop(esi);
__ pop(edi);
__ ret(0);
@ -10279,8 +10208,15 @@ MemCopyFunction CreateMemCopyFunction() {
CodeDesc desc;
masm.GetCode(&desc);
// Call the function from C++.
return FUNCTION_CAST<MemCopyFunction>(buffer);
ASSERT(desc.reloc_size == 0);
// Copy the generated code into an executable chunk and return a pointer
// to the first instruction in it as a C++ function pointer.
LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
if (chunk == NULL) return &MemCopyWrapper;
memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
}
#undef __

View File

@ -43,9 +43,6 @@ class RegisterAllocator;
class RegisterFile;
class RuntimeCallHelper;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
@ -310,6 +307,9 @@ class CodeGenerator: public AstVisitor {
Code::Flags flags,
CompilationInfo* info);
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@ -398,8 +398,9 @@ class CodeGenerator: public AstVisitor {
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitSlot(Slot* node);
#define DEF_VISIT(type) \
void Visit##type(type* node);
virtual void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
@ -783,6 +784,7 @@ class CodeGenerator: public AstVisitor {
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
friend class LCodeGen;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc

View File

@ -42,7 +42,11 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
CpuFeatures::Probe();
CpuFeatures::Clear();
CpuFeatures::Probe(true);
if (!CpuFeatures::IsSupported(SSE2) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
}

Some files were not shown because too many files have changed in this diff Show More