Changed all text files to have native svn:eol-style.

Added a few samples and support for building them. The samples include a simple shell that can be used to benchmark and test V8.

Changed V8::GetVersion to return the version as a string.

Added source for lazily loaded scripts to snapshots and made serialization non-destructive.

Improved ARM support by fixing the write barrier code to use aligned loads and stores and by removing premature locals optimization that relied on broken support for callee-saved registers (removed).

Refactored the code for marking live objects during garbage collection and the code for allocating objects in paged spaces. Introduced an abstraction for the map word of a heap-allocated object and changed the memory allocator to allocate executable memory only for spaces that may contain code objects.

Moved StringBuilder to utils.h and ScopedLock to platform.h, where they can be used by debugging and logging modules. Added thread-safe message queues for dealing with debugger events.

Fixed the source code reported by toString for certain builtin empty functions and made sure that the prototype property of a function is enumerable.

Improved performance of converting values to condition flags in generated code.

Merged disassembler-{arch} files.


git-svn-id: http://v8.googlecode.com/svn/trunk@8 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
kasper.lund 2008-07-30 08:49:36 +00:00
parent af4734f10f
commit 7276f14ca7
81 changed files with 4072 additions and 3066 deletions

View File

@ -1,3 +1,40 @@
2008-07-30: Version 0.2.0 (129146)
Changed all text files to have native svn:eol-style.
Added a few samples and support for building them. The samples
include a simple shell that can be used to benchmark and test V8.
Changed V8::GetVersion to return the version as a string.
Added source for lazily loaded scripts to snapshots and made
serialization non-destructive.
Improved ARM support by fixing the write barrier code to use
aligned loads and stores and by removing premature locals
optimization that relied on broken support for callee-saved
registers (removed).
Refactored the code for marking live objects during garbage
collection and the code for allocating objects in paged
spaces. Introduced an abstraction for the map word of a heap-
allocated object and changed the memory allocator to allocate
executable memory only for spaces that may contain code objects.
Moved StringBuilder to utils.h and ScopedLock to platform.h, where
they can be used by debugging and logging modules. Added
thread-safe message queues for dealing with debugger events.
Fixed the source code reported by toString for certain builtin
empty functions and made sure that the prototype property of a
function is enumerable.
Improved performance of converting values to condition flags in
generated code.
Merged disassembler-{arch} files.
2008-07-28: Version 0.1.4 (128918)
Added support for storing JavaScript stack traces in a stack

View File

@ -64,10 +64,7 @@ def GuessProcessor():
def GuessToolchain(os):
tools = Environment()['TOOLS']
if 'gcc' in tools:
if os == 'macos' and 'Kernel Version 8' in platform.version():
return 'gcc-darwin'
else:
return 'gcc'
return 'gcc'
elif 'msvc' in tools:
return 'msvc'
else:
@ -79,19 +76,20 @@ def GetOptions():
os_guess = GuessOS()
toolchain_guess = GuessToolchain(os_guess)
processor_guess = GuessProcessor()
result.Add('mode', 'debug or release', 'release')
result.Add('toolchain', 'the toolchain to use (gcc, gcc-darwin or msvc)', toolchain_guess)
result.Add('os', 'the os to build for (linux, macos or win32)', os_guess)
result.Add('processor', 'the processor to build for (arm or ia32)', processor_guess)
result.Add('mode', 'compilation mode (debug, release)', 'release')
result.Add('toolchain', 'the toolchain to use (gcc, msvc)', toolchain_guess)
result.Add('os', 'the os to build for (linux, macos, win32)', os_guess)
result.Add('processor', 'the processor to build for (arm, ia32)', processor_guess)
result.Add('snapshot', 'build using snapshots for faster start-up (on, off)', 'off')
result.Add('library', 'which type of library to produce (static, shared, default)', 'default')
result.Add('sample', 'build sample (process, shell)', '')
return result
def VerifyOptions(env):
if not env['mode'] in ['debug', 'release']:
Abort("Unknown build mode '%s'." % env['mode'])
if not env['toolchain'] in ['gcc', 'gcc-darwin', 'msvc']:
if not env['toolchain'] in ['gcc', 'msvc']:
Abort("Unknown toolchain '%s'." % env['toolchain'])
if not env['os'] in ['linux', 'macos', 'win32']:
Abort("Unknown os '%s'." % env['os'])
@ -101,9 +99,11 @@ def VerifyOptions(env):
Abort("Illegal value for option snapshot: '%s'." % env['snapshot'])
if not env['library'] in ['static', 'shared', 'default']:
Abort("Illegal value for option library: '%s'." % env['library'])
if not env['sample'] in ['', 'process', 'shell']:
Abort("Illegal value for option sample: '%s'." % env['sample'])
def Start():
def Build():
opts = GetOptions()
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
@ -116,12 +116,38 @@ def Start():
use_snapshot = (env['snapshot'] == 'on')
library_type = env['library']
env.SConscript(
# Build the object files by invoking SCons recursively.
object_files = env.SConscript(
join('src', 'SConscript'),
build_dir=mode,
build_dir='build',
exports='toolchain arch os mode use_snapshot library_type',
duplicate=False
)
# Link the object files into a library.
if library_type == 'static':
library = env.StaticLibrary('v8', object_files)
elif library_type == 'shared':
# There seems to be a glitch in the way scons decides where to put
# PDB files when compiling using MSVC so we specify it manually.
# This should not affect any other platforms.
library = env.SharedLibrary('v8', object_files, PDB='v8.dll.pdb')
else:
library = env.Library('v8', object_files)
Start()
# Bail out if we're not building any sample.
sample = env['sample']
if not sample: return
# Build the sample.
env.Replace(CPPPATH='public')
object_path = join('build', 'samples', sample)
source_path = join('samples', sample + '.cc')
object = env.Object(object_path, source_path)
if toolchain == 'gcc':
env.Program(sample, [object, library], LIBS='pthread')
else:
env.Program(sample, [object, library], LIBS='WS2_32')
Build()

View File

@ -41,14 +41,13 @@
*/
namespace v8 {
// Debug events which can occour in the V8 JavaScript engine.
// Debug events which can occur in the V8 JavaScript engine.
enum DebugEvent {
Break = 1,
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
AfterCompile = 5,
PendingRequestProcessed = 6
AfterCompile = 5
};
@ -72,6 +71,8 @@ typedef void (*DebugEventCallback)(DebugEvent event,
*
* \param message the debug message
* \param length length of the message
* A DebugMessageHandler does not take posession of the message string,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*DebugMessageHandler)(const uint16_t* message, int length,
void* data);

View File

@ -747,7 +747,7 @@ class String : public Primitive {
* be careful to supply the length parameter.
* If it is not given, the function calls
* 'strlen' to determine the buffer length, it might be
* wrong if '\0' character is in the 'data'.
* wrong if 'data' contains a null character.
*/
static Local<String> New(const char* data, int length = -1);
@ -777,10 +777,10 @@ class String : public Primitive {
*/
static Local<String> NewExternal(ExternalAsciiStringResource* resource);
/** Creates an undetectable string from the supplied character.*/
/** Creates an undetectable string from the supplied ascii or utf-8 data.*/
static Local<String> NewUndetectable(const char* data, int length = -1);
/** Creates an undetectable string from the supplied unsigned integer.*/
/** Creates an undetectable string from the supplied utf-16 data.*/
static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
/**
@ -1562,13 +1562,6 @@ class Exception {
};
/**
* Ignore
*/
struct VersionInfo {
int major, minor, build_major, build_minor, revision;
};
// --- C o u n t e r s C a l l b a c k s
typedef int* (*CounterLookupCallback)(const wchar_t* name);
@ -1633,8 +1626,8 @@ class V8 {
*/
static void SetFlagsFromString(const char* str, int length);
/** Sets the version fields in the given VersionInfo struct.*/
static void GetVersion(VersionInfo* info);
/** Get the version string. */
static const char* GetVersion();
/**
* Enables the host application to provide a mechanism for recording
@ -1683,6 +1676,14 @@ class V8 {
*/
static bool Initialize();
/**
* Adjusts the about of registered external memory.
* Returns the adjusted value.
* Used for triggering a global GC earlier than otherwise.
*/
static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
private:
V8();

42
samples/count-hosts.js Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2008 Google Inc. All Rights Reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function Initialize() { }
function Process(request) {
if (options.verbose) {
log("Processing " + request.host + request.path +
" from " + request.referrer + "@" + request.userAgent);
}
if (!output[request.host]) {
output[request.host] = 1;
} else {
output[request.host]++
}
}
Initialize();

611
samples/process.cc Normal file
View File

@ -0,0 +1,611 @@
// Copyright 2008 Google Inc. All Rights Reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <v8.h>
#include <string>
#include <map>
using namespace std;
using namespace v8;
// These interfaces represent an existing request processing interface.
// The idea is to imagine a real application that uses these interfaces
// and then add scripting capabilities that allow you to interact with
// the objects through JavaScript.
/**
* A simplified http request.
*/
class HttpRequest {
public:
virtual ~HttpRequest() { }
virtual const string& Path() = 0;
virtual const string& Referrer() = 0;
virtual const string& Host() = 0;
virtual const string& UserAgent() = 0;
};
/**
* The abstract superclass of http request processors.
*/
class HttpRequestProcessor {
public:
virtual ~HttpRequestProcessor() { }
// Initialize this processor. The map contains options that control
// how requests should be processed.
virtual bool Initialize(map<string, string>* options,
map<string, string>* output) = 0;
// Process a single request.
virtual bool Process(HttpRequest* req) = 0;
static void Log(const char* event);
};
/**
* An http request processor that is scriptable using JavaScript.
*/
class JsHttpRequestProcessor : public HttpRequestProcessor {
public:
// Creates a new processor that processes requests by invoking the
// Process function of the JavaScript script given as an argument.
JsHttpRequestProcessor(Handle<String> script) : script_(script) { }
virtual ~JsHttpRequestProcessor();
virtual bool Initialize(map<string, string>* opts,
map<string, string>* output);
virtual bool Process(HttpRequest* req);
private:
// Execute the script associated with this processor and extract the
// Process function. Returns true if this succeeded, otherwise false.
bool ExecuteScript(Handle<String> script);
// Wrap the options and output map in a JavaScript objects and
// install it in the global namespace as 'options' and 'output'.
bool InstallMaps(map<string, string>* opts, map<string, string>* output);
// Constructs the template that describes the JavaScript wrapper
// type for requests.
static Handle<ObjectTemplate> MakeRequestTemplate();
static Handle<ObjectTemplate> MakeMapTemplate();
// Callbacks that access the individual fields of request objects.
static Handle<Value> GetPath(Local<String> name, const AccessorInfo& info);
static Handle<Value> GetReferrer(Local<String> name, const AccessorInfo& info);
static Handle<Value> GetHost(Local<String> name, const AccessorInfo& info);
static Handle<Value> GetUserAgent(Local<String> name, const AccessorInfo& info);
// Callbacks that access maps
static Handle<Value> MapGet(Local<String> name, const AccessorInfo& info);
static Handle<Value> MapSet(Local<String> name, Local<Value> value,
const AccessorInfo& info);
// Utility methods for wrapping C++ objects as JavaScript objects,
// and going back again.
static Handle<Object> WrapMap(map<string, string>* obj);
static map<string, string>* UnwrapMap(Handle<Object> obj);
static Handle<Object> WrapRequest(HttpRequest* obj);
static HttpRequest* UnwrapRequest(Handle<Object> obj);
Handle<String> script_;
Persistent<Context> context_;
Persistent<Function> process_;
static Persistent<ObjectTemplate> request_template_;
static Persistent<ObjectTemplate> map_template_;
};
// -------------------------
// --- P r o c e s s o r ---
// -------------------------
static Handle<Value> LogCallback(const Arguments& args) {
if (args.Length() < 1) return v8::Undefined();
HandleScope scope;
Handle<Value> arg = args[0];
String::AsciiValue value(arg);
HttpRequestProcessor::Log(*value);
return v8::Undefined();
}
// Execute the script and fetch the Process method.
bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
map<string, string>* output) {
// Create a handle scope to hold the temporary references.
HandleScope handle_scope;
// Create a template for the global object where we set the
// built-in global functions.
Handle<ObjectTemplate> global = ObjectTemplate::New();
global->Set(String::New("log"), FunctionTemplate::New(LogCallback));
// Each processor gets its own context so different processors
// don't affect each other (ignore the first three lines).
Handle<Context> context = Context::New(NULL, global);
// Store the context in the processor object in a persistent handle,
// since we want the reference to remain after we return from this
// method.
context_ = Persistent<Context>::New(context);
// Enter the new context so all the following operations take place
// within it.
Context::Scope context_scope(context);
// Make the options mapping available within the context
if (!InstallMaps(opts, output))
return false;
// Compile and run the script
if (!ExecuteScript(script_))
return false;
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
Handle<String> process_name = String::New("Process");
Handle<Value> process_val = context->Global()->Get(process_name);
// If there is no Process function, or if it is not a function,
// bail out
if (!process_val->IsFunction()) return false;
// It is a function; cast it to a Function
Handle<Function> process_fun = Handle<Function>::Cast(process_val);
// Store the function in a Persistent handle, since we also want
// that to remain after this call returns
process_ = Persistent<Function>::New(process_fun);
// All done; all went well
return true;
}
bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
HandleScope handle_scope;
// We're just about to compile the script; set up an error handler to
// catch any exceptions the script might throw.
TryCatch try_catch;
// Compile the script and check for errors.
Handle<Script> compiled_script = Script::Compile(script);
if (compiled_script.IsEmpty()) {
String::AsciiValue error(try_catch.Exception());
Log(*error);
// The script failed to compile; bail out.
return false;
}
// Run the script!
Handle<Value> result = compiled_script->Run();
if (result.IsEmpty()) {
// The TryCatch above is still in effect and will have caught the error.
String::AsciiValue error(try_catch.Exception());
Log(*error);
// Running the script failed; bail out.
return false;
}
return true;
}
bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
map<string, string>* output) {
HandleScope handle_scope;
// Wrap the map object in a JavaScript wrapper
Handle<Object> opts_obj = WrapMap(opts);
// Set the options object as a property on the global object.
context_->Global()->Set(String::New("options"), opts_obj);
Handle<Object> output_obj = WrapMap(output);
context_->Global()->Set(String::New("output"), output_obj);
return true;
}
bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// Create a handle scope to keep the temporary object references.
HandleScope handle_scope;
// Enter this processor's context so all the remaining operations
// take place there
Context::Scope context_scope(context_);
// Wrap the C++ request object in a JavaScript wrapper
Handle<Object> request_obj = WrapRequest(request);
// Set up an exception handler before calling the Process function
TryCatch try_catch;
// Invoke the process function, giving the global object as 'this'
// and one argument, the request.
const int argc = 1;
Handle<Value> argv[argc] = { request_obj };
Handle<Value> result = process_->Call(context_->Global(), argc, argv);
if (result.IsEmpty()) {
String::AsciiValue error(try_catch.Exception());
Log(*error);
return false;
} else {
return true;
}
}
JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
context_.Dispose();
process_.Dispose();
}
Persistent<ObjectTemplate> JsHttpRequestProcessor::request_template_;
Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
// -----------------------------------
// --- A c c e s s i n g M a p s ---
// -----------------------------------
// Utility function that wraps a C++ http request object in a
// JavaScript object.
Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Handle scope for temporary handles.
HandleScope handle_scope;
// Fetch the template for creating JavaScript map wrappers.
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate();
map_template_ = Persistent<ObjectTemplate>::New(raw_template);
}
Handle<ObjectTemplate> templ = map_template_;
// Create an empty map wrapper.
Handle<Object> result = templ->NewInstance();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
Handle<External> map_ptr = External::New(obj);
// Store the map pointer in the JavaScript wrapper.
result->SetInternalField(0, map_ptr);
// Return the result through the current handle scope. Since each
// of these handles will go away when the handle scope is deleted
// we need to call Close to let one, the result, escape into the
// outer handle scope.
return handle_scope.Close(result);
}
// Utility function that extracts the C++ map pointer from a wrapper
// object.
map<string, string>* JsHttpRequestProcessor::UnwrapMap(Handle<Object> obj) {
Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
void* ptr = field->Value();
return static_cast<map<string, string>*>(ptr);
}
// Convert a JavaScript string to a std::string. To not bother too
// much with string encodings we just use ascii.
string ObjectToString(Local<Value> value) {
String::AsciiValue ascii_value(value);
return string(*ascii_value);
}
Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
const AccessorInfo& info) {
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
// Convert the JavaScript string to a std::string.
string key = ObjectToString(name);
// Look up the value if it exists using the standard STL ideom.
map<string, string>::iterator iter = obj->find(key);
// If the key is not present return an empty handle as signal
if (iter == obj->end()) return Handle<Value>();
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
return String::New(value.c_str(), value.length());
}
Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
Local<Value> value_obj, const AccessorInfo& info) {
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
// Convert the key and value to std::strings.
string key = ObjectToString(name);
string value = ObjectToString(value_obj);
// Update the map.
(*obj)[key] = value;
// Return the value; any non-empty handle will work.
return value_obj;
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate() {
HandleScope handle_scope;
Handle<ObjectTemplate> result = ObjectTemplate::New();
result->SetInternalFieldCount(1);
result->SetNamedPropertyHandler(MapGet, MapSet);
// Again, return the result through the current handle scope.
return handle_scope.Close(result);
}
// -------------------------------------------
// --- A c c e s s i n g R e q u e s t s ---
// -------------------------------------------
/**
* Utility function that wraps a C++ http request object in a
* JavaScript object.
*/
Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Handle scope for temporary handles.
HandleScope handle_scope;
// Fetch the template for creating JavaScript http request wrappers.
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeRequestTemplate();
request_template_ = Persistent<ObjectTemplate>::New(raw_template);
}
Handle<ObjectTemplate> templ = request_template_;
// Create an empty http request wrapper.
Handle<Object> result = templ->NewInstance();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
Handle<External> request_ptr = External::New(request);
// Store the request pointer in the JavaScript wrapper.
result->SetInternalField(0, request_ptr);
// Return the result through the current handle scope. Since each
// of these handles will go away when the handle scope is deleted
// we need to call Close to let one, the result, escape into the
// outer handle scope.
return handle_scope.Close(result);
}
/**
* Utility function that extracts the C++ http request object from a
* wrapper object.
*/
HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Handle<Object> obj) {
Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
void* ptr = field->Value();
return static_cast<HttpRequest*>(ptr);
}
Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
const AccessorInfo& info) {
// Extract the C++ request object from the JavaScript wrapper.
HttpRequest* request = UnwrapRequest(info.Holder());
// Fetch the path.
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
return String::New(path.c_str(), path.length());
}
Handle<Value> JsHttpRequestProcessor::GetReferrer(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
return String::New(path.c_str(), path.length());
}
Handle<Value> JsHttpRequestProcessor::GetHost(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
return String::New(path.c_str(), path.length());
}
Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
return String::New(path.c_str(), path.length());
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate() {
HandleScope handle_scope;
Handle<ObjectTemplate> result = ObjectTemplate::New();
result->SetInternalFieldCount(1);
// Add accessors for each of the fields of the request.
result->SetAccessor(String::NewSymbol("path"), GetPath);
result->SetAccessor(String::NewSymbol("referrer"), GetReferrer);
result->SetAccessor(String::NewSymbol("host"), GetHost);
result->SetAccessor(String::NewSymbol("userAgent"), GetUserAgent);
// Again, return the result through the current handle scope.
return handle_scope.Close(result);
}
// --- Test ---
void HttpRequestProcessor::Log(const char* event) {
printf("Logged: %s\n", event);
}
/**
* A simplified http request.
*/
class StringHttpRequest : public HttpRequest {
public:
StringHttpRequest(const string& path, const string& referrer,
const string& host, const string& user_agent);
virtual const string& Path() { return path_; }
virtual const string& Referrer() { return referrer_; }
virtual const string& Host() { return host_; }
virtual const string& UserAgent() { return user_agent_; }
private:
string path_;
string referrer_;
string host_;
string user_agent_;
};
StringHttpRequest::StringHttpRequest(const string& path,
const string& referrer, const string& host, const string& user_agent)
: path_(path),
referrer_(referrer),
host_(host),
user_agent_(user_agent) { }
void ParseOptions(int argc, char* argv[], map<string, string>& options,
string* file) {
for (int i = 1; i < argc; i++) {
string arg = argv[i];
int index = arg.find('=', 0);
if (index == string::npos) {
*file = arg;
} else {
string key = arg.substr(0, index);
string value = arg.substr(index+1);
options[key] = value;
}
}
}
// Reads a file into a v8 string.
Handle<String> ReadFile(const string& name) {
FILE* file = fopen(name.c_str(), "rb");
if (file == NULL) return Handle<String>();
fseek(file, 0, SEEK_END);
long size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size; ) {
int read = fread(&chars[i], 1, size - i, file);
i += read;
}
fclose(file);
Handle<String> result = String::New(chars, size);
delete[] chars;
return result;
}
const int kSampleSize = 6;
StringHttpRequest kSampleRequests[kSampleSize] = {
StringHttpRequest("/process.cc", "localhost", "google.com", "firefox"),
StringHttpRequest("/", "localhost", "google.net", "firefox"),
StringHttpRequest("/", "localhost", "google.org", "safari"),
StringHttpRequest("/", "localhost", "yahoo.com", "ie"),
StringHttpRequest("/", "localhost", "yahoo.com", "safari"),
StringHttpRequest("/", "localhost", "yahoo.com", "firefox")
};
bool ProcessEntries(HttpRequestProcessor* processor, int count,
StringHttpRequest* reqs) {
for (int i = 0; i < count; i++) {
if (!processor->Process(&reqs[i]))
return false;
}
return true;
}
void PrintMap(map<string, string>& m) {
for (map<string, string>::iterator i = m.begin(); i != m.end(); i++) {
pair<string, string> entry = *i;
printf("%s: %s\n", entry.first.c_str(), entry.second.c_str());
}
}
int main(int argc, char* argv[]) {
map<string, string> options;
string file;
ParseOptions(argc, argv, options, &file);
if (file.empty()) {
fprintf(stderr, "No script was specified.\n");
return 1;
}
HandleScope scope;
Handle<String> source = ReadFile(file);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'.\n", file.c_str());
return 1;
}
JsHttpRequestProcessor processor(source);
map<string, string> output;
if (!processor.Initialize(&options, &output)) {
fprintf(stderr, "Error initializing processor.\n");
return 1;
}
if (!ProcessEntries(&processor, kSampleSize, kSampleRequests))
return 1;
PrintMap(output);
}

152
samples/shell.cc Normal file
View File

@ -0,0 +1,152 @@
// Copyright 2008 Google Inc. All Rights Reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <v8.h>
#include <cstring>
#include <cstdio>
void RunShell(v8::Handle<v8::Context> context);
bool ExecuteString(v8::Handle<v8::String> source);
v8::Handle<v8::Value> Print(const v8::Arguments& args);
v8::Handle<v8::String> ReadFile(const char* name);
int main(int argc, char* argv[]) {
v8::HandleScope handle_scope;
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
// Create a new execution environment containing the 'print' function.
v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
bool run_shell = (argc == 1);
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
run_shell = true;
} else {
v8::HandleScope handle_scope;
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
}
if (!ExecuteString(source))
return 1;
}
}
if (run_shell) RunShell(context);
return 0;
}
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
v8::Handle<v8::Value> Print(const v8::Arguments& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope;
if (first) first = false;
else printf(" ");
v8::String::AsciiValue str(args[i]);
printf("%s", *str);
}
printf("\n");
return v8::Undefined();
}
// Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
fseek(file, 0, SEEK_END);
long size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size; ) {
int read = fread(&chars[i], 1, size - i, file);
i += read;
}
fclose(file);
v8::Handle<v8::String> result = v8::String::New(chars, size);
delete[] chars;
return result;
}
// The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) {
printf("V8 version %s\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
while (true) {
char buffer[kBufferSize];
printf("> ");
char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break;
v8::HandleScope handle_scope;
ExecuteString(v8::String::New(str));
}
printf("\n");
}
// Executes a string within the current v8 context.
bool ExecuteString(v8::Handle<v8::String> source) {
v8::HandleScope handle_scope;
v8::TryCatch try_catch;
v8::Handle<v8::Script> script = v8::Script::Compile(source);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
v8::String::AsciiValue error(try_catch.Exception());
printf("%s\n", *error);
return false;
} else {
v8::Handle<v8::Value> result = script->Run();
if (result.IsEmpty()) {
// Print errors that happened during execution.
v8::String::AsciiValue error(try_catch.Exception());
printf("%s\n", *error);
return false;
} else {
if (!result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.
v8::String::AsciiValue str(result);
printf("%s\n", *str);
}
return true;
}
}
}

View File

@ -88,60 +88,6 @@ BUILD_OPTIONS_MAP = {
}
}
},
'gcc-darwin': {
'debug': {
'default': {
'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
'DIALECTFLAGS': '-ansi',
'LIBS': 'pthread',
'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
},
'dtoa': {
'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG'],
'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
'DIALECTFLAGS': '-ansi',
'LIBS': 'pthread',
'WARNINGFLAGS': '-Werror'
},
'jscre': {
'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -g -O0',
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DISASSEMBLER', 'DEBUG', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
'DIALECTFLAGS': '-ansi',
'LIBS': 'pthread',
'WARNINGFLAGS': '-w'
}
},
'release': {
'default': {
'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
'DIALECTFLAGS': '-ansi',
'LIBS': 'pthread',
'WARNINGFLAGS': '-pedantic -Wall -W -Wno-unused-parameter -Werror'
},
'dtoa': {
'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
'DIALECTFLAGS': '-ansi',
'LIBS': 'pthread',
'WARNINGFLAGS': '-Werror'
},
'jscre': {
'CCFLAGS': '$DIALECTFLAGS $WARNINGFLAGS -O2',
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING', 'SUPPORT_UTF8', 'NO_RECURSE', 'SUPPORT_UCP'],
'CXXFLAGS': '$CCFLAGS -fno-rtti -fno-exceptions',
'DIALECTFLAGS': '-ansi',
'LIBS': 'pthread',
'WARNINGFLAGS': '-w'
}
}
},
'msvc': {
'debug': {
'default': {
@ -240,6 +186,7 @@ conversions.cc
counters.cc
dateparser.cc
debug.cc
disassembler.cc
execution.cc
factory.cc
flags.cc
@ -282,8 +229,8 @@ zone.cc
PLATFORM_DEPENDENT_SOURCES = {
'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'disasm-arm.cc', 'disassembler-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'macro-assembler-arm.cc', 'simulator-arm.cc', 'stub-cache-arm.cc'],
'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'cpu-ia32.cc', 'disasm-ia32.cc', 'disassembler-ia32.cc', 'frames-ia32.cc', 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'simulator-ia32.cc', 'stub-cache-ia32.cc'],
'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'disasm-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'macro-assembler-arm.cc', 'simulator-arm.cc', 'stub-cache-arm.cc'],
'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'cpu-ia32.cc', 'disasm-ia32.cc', 'frames-ia32.cc', 'ic-ia32.cc', 'macro-assembler-ia32.cc', 'simulator-ia32.cc', 'stub-cache-ia32.cc'],
'os:linux': ['platform-linux.cc'],
'os:macos': ['platform-macos.cc'],
'os:win32': ['platform-win32.cc']
@ -320,7 +267,7 @@ def Abort(message):
sys.exit(1)
def BuildObject(env, input, **kw):
def ConfigureObject(env, input, **kw):
if library_type == 'static':
return env.StaticObject(input, **kw)
elif library_type == 'shared':
@ -329,7 +276,7 @@ def BuildObject(env, input, **kw):
return env.Object(input, **kw)
def ConfigureBuild():
def ConfigureObjectFiles():
env = Environment()
options = BUILD_OPTIONS_MAP[toolchain][mode]['default']
env.Replace(**options)
@ -342,51 +289,44 @@ def ConfigureBuild():
source_files += PLATFORM_DEPENDENT_SOURCES["os:%s" % os]
full_source_files = [s for s in source_files]
# Combine the javascript library files into a single C++ file and
# Combine the JavaScript library files into a single C++ file and
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files)
libraries_obj = BuildObject(env, libraries_src, CPPPATH=['.'])
libraries_obj = ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Build JSCRE.
jscre_env = env.Copy()
jscre_options = BUILD_OPTIONS_MAP[toolchain][mode]['jscre']
jscre_env.Replace(**jscre_options)
jscre_files = [join('third_party', 'jscre', s) for s in JSCRE_FILES]
jscre_obj = BuildObject(jscre_env, jscre_files)
jscre_obj = ConfigureObject(jscre_env, jscre_files)
# Build dtoa.
dtoa_env = env.Copy()
dtoa_options = BUILD_OPTIONS_MAP[toolchain][mode]['dtoa']
dtoa_env.Replace(**dtoa_options)
dtoa_files = ['dtoa-config.c']
dtoa_obj = BuildObject(dtoa_env, dtoa_files)
dtoa_obj = ConfigureObject(dtoa_env, dtoa_files)
full_source_objs = BuildObject(env, full_source_files)
full_source_objs = ConfigureObject(env, full_source_files)
non_snapshot_files = [jscre_obj, dtoa_obj, full_source_objs]
# Create snapshot if necessary.
empty_snapshot_obj = BuildObject(env, 'snapshot-empty.cc')
empty_snapshot_obj = ConfigureObject(env, 'snapshot-empty.cc')
if use_snapshot:
mksnapshot_src = 'mksnapshot.cc'
mksnapshot = env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
snapshot_obj = BuildObject(env, snapshot_cc, CPPPATH=['.'])
libraries_obj = BuildObject(env, libraries_empty_src, CPPPATH=['.'])
snapshot_obj = ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
libraries_obj = ConfigureObject(env, libraries_empty_src, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
all_files = [non_snapshot_files, libraries_obj, snapshot_obj]
if library_type == 'static':
env.StaticLibrary('v8', all_files)
elif library_type == 'shared':
# There seems to be a glitch in the way scons decides where to put
# .pdb files when compiling using msvc so we specify it manually.
# This should not affect any other platforms.
env.SharedLibrary('v8', all_files, PDB='v8.dll.pdb')
else:
env.Library('v8', all_files)
# Return all the object files needed to link the library.
return [non_snapshot_files, libraries_obj, snapshot_obj]
ConfigureBuild()
object_files = ConfigureObjectFiles()
Return('object_files')

View File

@ -2090,6 +2090,11 @@ bool v8::V8::Initialize() {
}
const char* v8::V8::GetVersion() {
return "0.2.0 (129146)";
}
Persistent<Context> v8::Context::New(v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
@ -2473,6 +2478,12 @@ void V8::AddObjectToGroup(void* group_id, Persistent<Object> obj) {
}
int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
if (IsDeadCheck("v8::V8::AdjustAmountOfExternalAllocatedMemory()")) return 0;
return i::Heap::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
if (IsDeadCheck("v8::V8::SetGlobalGCPrologueCallback()")) return;
i::Heap::SetGlobalGCPrologueCallback(callback);

View File

@ -1084,7 +1084,14 @@ void Assembler::swpb(Register dst,
// Exception-generating instructions and debugging support
void Assembler::stop(const char* msg) {
#if !defined(__arm__)
// The simulator handles these special instructions and stops execution.
emit(15 << 28 | ((intptr_t) msg));
#else
// Just issue a simple break instruction for now. Alternatively we could use
// the swi(0x9f0001) instruction on Linux.
bkpt(0);
#endif
}

View File

@ -503,16 +503,9 @@ class Assembler : public Malloced {
// Exception-generating instructions and debugging support
void stop(const char* msg);
void untested(const char* msg);
void unimplemented(const char* msg);
void unreachable(const char* msg);
void bkpt(uint32_t imm16); // v5 and above
void swi(uint32_t imm24, Condition cond = al);
// To generate a breakpoint on ARM Linux you can use swi(0x9f0001).
// For some reason stepi or cont will not work in gdb until you have done:
// set $pc = $pc + 4
inline void int3() { swi(0x9f0001); }
// Coprocessor instructions

View File

@ -204,12 +204,31 @@ Address Assembler::target_address_at(Address pc) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
void Assembler::set_target_address_at(Address pc, Address target) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
CPU::FlushICache(p, sizeof(int32_t));
}
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
void Assembler::disp_at_put(Label* L, Displacement disp) {
long_at_put(L->pos(), disp.data());
}
void Assembler::emit_disp(Label* L, Displacement::Type type) {
Displacement disp(L, type);
L->link_to(pc_offset());
emit(static_cast<int>(disp.data()));
}
void Operand::set_modrm(int mod, // reg == 0
Register rm) {
ASSERT((mod & -4) == 0);

View File

@ -135,85 +135,19 @@ void CpuFeatures::Probe() {
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
// may be used together with a Label in order to refer to a yet unknown code
// position. Displacements stored in the instruction stream are used to describe
// the instruction and to chain a list of instructions using the same Label.
// A Displacement contains 3 different fields:
//
// next field: position of next displacement in the chain (0 = end of list)
// type field: instruction type
//
// A next value of null (0) indicates the end of a chain (note that there can
// be no displacement at position zero, because there is always at least one
// instruction byte before the displacement).
//
// Displacement _data field layout
//
// |31.....1|.......0|
// [ next | type |
// Implementation of Displacement
class Displacement BASE_EMBEDDED {
private:
enum Type {
UNCONDITIONAL_JUMP,
OTHER
};
int data_;
class TypeField: public BitField<Type, 0, 1> {};
class NextField: public BitField<int, 1, 32-1> {};
void init(Label* L, Type type) {
ASSERT(!L->is_bound());
int next = 0;
if (L->is_linked()) {
next = L->pos();
ASSERT(next > 0); // Displacements must be at positions > 0
}
// Ensure that we _never_ overflow the next field.
ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
data_ = NextField::encode(next) | TypeField::encode(type);
}
int data() const { return data_; }
Type type() const { return TypeField::decode(data_); }
void next(Label* L) const {
int n = NextField::decode(data_);
n > 0 ? L->link_to(n) : L->Unuse();
}
void link_to(Label* L) { init(L, type()); }
explicit Displacement(int data) { data_ = data; }
Displacement(Label* L, Type type) { init(L, type); }
void print() {
PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
NextField::decode(data_));
}
friend class Assembler;
friend class MacroAssembler;
};
// TODO(1236137): Stop using macros here. The reason for using them is
// to avoid declaring the Displacement class in the .h file and have
// functions on the assembler that returns them. Maybe that's not a
// big issue?
#define disp_at(L) \
Displacement(long_at((L)->pos()))
#define disp_at_put(L, disp) \
long_at_put((L)->pos(), (disp).data())
#define emit_disp(L, type) { \
Displacement disp((L), (type)); \
(L)->link_to(pc_offset()); \
emit(static_cast<int>(disp.data())); \
void Displacement::init(Label* L, Type type) {
ASSERT(!L->is_bound());
int next = 0;
if (L->is_linked()) {
next = L->pos();
ASSERT(next > 0); // Displacements must be at positions > 0
}
// Ensure that we _never_ overflow the next field.
ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
data_ = NextField::encode(next) | TypeField::encode(type);
}
// -----------------------------------------------------------------------------

View File

@ -264,6 +264,59 @@ class Operand BASE_EMBEDDED {
};
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
// may be used together with a Label in order to refer to a yet unknown code
// position. Displacements stored in the instruction stream are used to describe
// the instruction and to chain a list of instructions using the same Label.
// A Displacement contains 2 different fields:
//
// next field: position of next displacement in the chain (0 = end of list)
// type field: instruction type
//
// A next value of null (0) indicates the end of a chain (note that there can
// be no displacement at position zero, because there is always at least one
// instruction byte before the displacement).
//
// Displacement _data field layout
//
// |31.....1|.......0|
// [ next | type |
class Displacement BASE_EMBEDDED {
public:
enum Type {
UNCONDITIONAL_JUMP,
OTHER
};
int data() const { return data_; }
Type type() const { return TypeField::decode(data_); }
void next(Label* L) const {
int n = NextField::decode(data_);
n > 0 ? L->link_to(n) : L->Unuse();
}
void link_to(Label* L) { init(L, type()); }
explicit Displacement(int data) { data_ = data; }
Displacement(Label* L, Type type) { init(L, type); }
void print() {
PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
NextField::decode(data_));
}
private:
int data_;
class TypeField: public BitField<Type, 0, 1> {};
class NextField: public BitField<int, 1, 32-1> {};
void init(Label* L, Type type);
};
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
@ -709,6 +762,11 @@ class Assembler : public Malloced {
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
// displacements
inline Displacement disp_at(Label* L);
inline void disp_at_put(Label* L, Displacement disp);
inline void emit_disp(Label* L, Displacement::Type type);
// record reloc info for current pc_
void RecordRelocInfo(RelocMode rmode, intptr_t data = 0);

View File

@ -143,7 +143,7 @@ static const int kNoPosition = -1;
enum RelocMode {
// Please note the order is important (see is_code_target).
// Please note the order is important (see is_code_target, is_gc_reloc_mode).
js_construct_call, // code target that is an exit JavaScript frame stub.
exit_js_frame, // code target that is an exit JavaScript frame stub.
code_target_context, // code target used for contextual loads.
@ -163,7 +163,8 @@ enum RelocMode {
// Pseudo-types
reloc_mode_count,
last_code_enum = code_target
last_code_enum = code_target,
last_gced_enum = embedded_string
};
@ -187,6 +188,12 @@ inline bool is_code_target(RelocMode mode) {
}
// Is the relocation mode affected by GC?
inline bool is_gc_reloc_mode(RelocMode mode) {
return mode <= last_gced_enum;
}
inline bool is_js_return(RelocMode mode) {
return mode == js_return;
}

View File

@ -491,8 +491,12 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
{ // --- E m p t y ---
Handle<Code> call_code =
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
empty_function->set_code(*call_code);
empty_function->shared()->set_script(*Factory::NewScript(source));
empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length());
global_context()->function_map()->set_prototype(*empty_function);
global_context()->function_instance_map()->set_prototype(*empty_function);
@ -1209,11 +1213,11 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
void Genesis::MakeFunctionInstancePrototypeWritable() {
// Make a new function map so all future functions
// will have settable prototype properties.
// will have settable and enumerable prototype properties.
HandleScope scope;
Handle<DescriptorArray> function_map_descriptors =
ComputeFunctionInstanceDescriptor(false);
ComputeFunctionInstanceDescriptor(false, true);
Handle<Map> fm = Factory::CopyMap(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors);
Top::context()->global_context()->set_function_map(*fm);

View File

@ -47,7 +47,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
__ EnterJSFrame(0, 0);
__ EnterJSFrame(0);
// Allocate the new receiver object.
__ push(r0);
@ -119,7 +119,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Remove receiver from the stack, remove caller arguments, and
// return.
__ bind(&exit);
__ ExitJSFrame(RETURN, 0);
__ ExitJSFrame(RETURN);
// Compute the offset from the beginning of the JSConstructCall
// builtin code object to the return address after the call.
@ -221,13 +221,13 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// TODO(1233523): Implement. Unused for now.
__ int3();
__ stop("Builtins::Generate_FunctionApply");
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// TODO(1233523): Implement. Unused for now.
__ int3();
__ stop("Builtins::Generate_ArgumentsAdaptorTrampoline");
Label return_site;
__ bind(&return_site);
@ -269,9 +269,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r2, Operand(cp)); // context to be saved
// push in reverse order: context (r2), args_len (r3), caller_pp, caller_fp,
// sp_on_exit (ip == pp), return address, prolog_pc
// sp_on_exit (ip == pp), return address
__ stm(db_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit() |
ip.bit() | lr.bit() | pc.bit());
ip.bit() | lr.bit());
// Setup new frame pointer.
__ add(fp, sp, Operand(-StandardFrameConstants::kContextOffset));
__ mov(pp, Operand(ip)); // setup new parameter pointer

View File

@ -50,33 +50,21 @@ Handle<Code> CodeStub::GetCode() {
// Generate the new code.
MacroAssembler masm(NULL, 256);
bool needs_check_for_stub_calls = !AllowsStubCalls();
if (needs_check_for_stub_calls) {
// Nested stubs are not allowed for leafs.
ASSERT(!masm.generating_stub());
masm.set_generating_stub(true);
}
// Nested stubs are not allowed for leafs.
masm.set_allow_stub_calls(AllowsStubCalls());
// Generate the code for the stub.
masm.set_generating_stub(true);
Generate(&masm);
if (needs_check_for_stub_calls) masm.set_generating_stub(false);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
// TODO(1238541): Simplify this somewhat complicated encoding.
CodeStub::Major major = MajorKey();
// Lower three bits in state field.
InlineCacheState state = static_cast<InlineCacheState>(major & 0x07);
// Upper two bits in type field.
PropertyType type = static_cast<PropertyType>((major >> 3) & 0x03);
// Compute flags with state and type used to hold majr key.
Code::Flags flags = Code::ComputeFlags(Code::STUB, state, type);
// Copy the generated code into a heap object, and store the major key.
Code::Flags flags = Code::ComputeFlags(Code::STUB);
Handle<Code> code = Factory::NewCode(desc, NULL, flags);
code->set_major_key(MajorKey());
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, &masm);
@ -110,22 +98,22 @@ const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) {
case CallFunction:
return "CallFunction";
case InlinedGenericOp:
return "InlinedGenericOp";
case GenericBinaryOp:
return "GenericBinaryOp";
case SmiOp:
return "SmiOp";
case Compare:
return "Compare";
case RecordWrite:
return "RecordWrite";
case GenericOp:
return "GenericOp";
case StackCheck:
return "StackCheck";
case UnarySub:
return "UnarySub";
case RevertToNumber:
return "RevertToNumber";
case ToBoolean:
return "ToBoolean";
case CounterOp:
return "CounterOp";
case ArgumentsAccess:

View File

@ -36,14 +36,14 @@ class CodeStub BASE_EMBEDDED {
public:
enum Major {
CallFunction,
InlinedGenericOp,
GenericBinaryOp,
SmiOp,
Compare,
RecordWrite, // Last stub that allows stub calls inside.
GenericOp,
StackCheck,
UnarySub,
RevertToNumber,
ToBoolean,
CounterOp,
ArgumentsAccess,
Runtime,

View File

@ -37,12 +37,9 @@
namespace v8 { namespace internal {
DEFINE_bool(optimize_locals, true,
"optimize locals by allocating them in registers");
DEFINE_bool(trace, false, "trace function calls");
DECLARE_bool(debug_info);
DECLARE_bool(debug_code);
DECLARE_bool(optimize_locals);
#ifdef DEBUG
DECLARE_bool(gc_greedy);
@ -163,8 +160,6 @@ class ArmCodeGenerator: public CodeGenerator {
Scope* scope_;
Condition cc_reg_;
CodeGenState* state_;
RegList reg_locals_; // the list of registers used to hold locals
int num_reg_locals_; // the number of registers holding locals
int break_stack_height_;
// Labels
@ -213,7 +208,6 @@ class ArmCodeGenerator: public CodeGenerator {
MemOperand FunctionOperand() const { return ParameterOperand(-2); }
Register SlotRegister(int slot_index);
MemOperand SlotOperand(Slot* slot, Register tmp);
void LoadCondition(Expression* x, CodeGenState::AccessType access,
@ -246,7 +240,7 @@ class ArmCodeGenerator: public CodeGenerator {
void AccessReferenceProperty(Expression* key,
CodeGenState::AccessType access);
void GenericOperation(Token::Value op);
void GenericBinaryOperation(Token::Value op);
void Comparison(Condition cc, bool strict = false);
void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
@ -274,9 +268,8 @@ class ArmCodeGenerator: public CodeGenerator {
void RecordStatementPosition(Node* node);
// Activation frames
void EnterJSFrame(int argc, RegList callee_saved); // preserves r1
void ExitJSFrame(RegList callee_saved,
ExitJSFlag flag = RETURN); // preserves r0-r2
void EnterJSFrame(int argc); // preserves r1
void ExitJSFrame(ExitJSFlag flag = RETURN); // preserves r0-r2
virtual void GenerateShiftDownAndTailCall(ZoneList<Expression*>* args);
virtual void GenerateSetThisFunction(ZoneList<Expression*>* args);
@ -296,6 +289,8 @@ class ArmCodeGenerator: public CodeGenerator {
virtual void GenerateValueOf(ZoneList<Expression*>* args);
virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
};
@ -417,15 +412,6 @@ void ArmCodeGenerator::GenCode(FunctionLiteral* fun) {
state_ = &state;
scope_ = scope;
cc_reg_ = al;
if (FLAG_optimize_locals) {
num_reg_locals_ = scope->num_stack_slots() < kNumJSCalleeSaved
? scope->num_stack_slots()
: kNumJSCalleeSaved;
reg_locals_ = JSCalleeSavedList(num_reg_locals_);
} else {
num_reg_locals_ = 0;
reg_locals_ = 0;
}
// Entry
// stack: function, receiver, arguments, return address
@ -436,46 +422,43 @@ void ArmCodeGenerator::GenCode(FunctionLiteral* fun) {
// cp: callee's context
{ Comment cmnt(masm_, "[ enter JS frame");
EnterJSFrame(scope->num_parameters(), reg_locals_);
EnterJSFrame(scope->num_parameters());
}
// tos: code slot
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ bkpt(0); // not supported before v5, but illegal instruction works too
__ stop("stop-at");
}
#endif
// Allocate space for locals and initialize them.
if (scope->num_stack_slots() > num_reg_locals_) {
if (scope->num_stack_slots() > 0) {
Comment cmnt(masm_, "[ allocate space for locals");
// Pushing the first local materializes the code slot on the stack
// (formerly stored in tos register r0).
__ Push(Operand(Factory::undefined_value()));
// The remaining locals are pushed using the fact that r0 (tos)
// already contains the undefined value.
for (int i = scope->num_stack_slots(); i-- > num_reg_locals_ + 1;) {
for (int i = 1; i < scope->num_stack_slots(); i++) {
__ push(r0);
}
}
// Initialize locals allocated in registers
if (num_reg_locals_ > 0) {
if (scope->num_stack_slots() > num_reg_locals_) {
// r0 contains 'undefined'
__ mov(SlotRegister(0), Operand(r0));
} else {
__ mov(SlotRegister(0), Operand(Factory::undefined_value()));
}
for (int i = num_reg_locals_ - 1; i > 0; i--) {
__ mov(SlotRegister(i), Operand(SlotRegister(0)));
}
}
if (scope->num_heap_slots() > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
__ Push(FunctionOperand());
__ CallRuntime(Runtime::kNewContext, 2);
__ CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
if (kDebug) {
Label verified_true;
__ cmp(r0, Operand(cp));
__ b(eq, &verified_true);
__ stop("NewContext: r0 is expected to be the same as cp");
__ bind(&verified_true);
}
__ pop(r0); // restore TOS
// Update context local.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@ -580,7 +563,7 @@ void ArmCodeGenerator::GenCode(FunctionLiteral* fun) {
__ Push(Operand(Factory::undefined_value()));
__ bind(&function_return_);
if (FLAG_trace) __ CallRuntime(Runtime::kTraceExit, 1);
ExitJSFrame(reg_locals_);
ExitJSFrame();
// Code generation state must be reset.
scope_ = NULL;
@ -589,13 +572,6 @@ void ArmCodeGenerator::GenCode(FunctionLiteral* fun) {
}
Register ArmCodeGenerator::SlotRegister(int slot_index) {
Register reg;
reg.code_ = JSCalleeSavedCode(slot_index);
return reg;
}
MemOperand ArmCodeGenerator::SlotOperand(Slot* slot, Register tmp) {
// Currently, this assertion will fail if we try to assign to
// a constant variable that is constant because it is read-only
@ -614,9 +590,9 @@ MemOperand ArmCodeGenerator::SlotOperand(Slot* slot, Register tmp) {
case Slot::LOCAL: {
ASSERT(0 <= index &&
index < scope_->num_stack_slots() &&
index >= num_reg_locals_);
index >= 0);
int local_offset = JavaScriptFrameConstants::kLocal0Offset -
(index - num_reg_locals_) * kPointerSize;
index * kPointerSize;
return MemOperand(fp, local_offset);
}
@ -1063,7 +1039,34 @@ void SetPropertyStub::Generate(MacroAssembler* masm) {
}
void GenericOpStub::Generate(MacroAssembler* masm) {
class GenericBinaryOpStub : public CodeStub {
public:
explicit GenericBinaryOpStub(Token::Value op) : op_(op) { }
private:
Token::Value op_;
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { return static_cast<int>(op_); }
void Generate(MacroAssembler* masm);
const char* GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
default: return "GenericBinaryOpStub";
}
}
#ifdef DEBUG
void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); }
#endif
};
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
switch (op_) {
case Token::ADD: {
Label slow, exit;
@ -1303,34 +1306,27 @@ class JSExitStub : public CodeStub {
public:
enum Kind { Inc, Dec, ToNumber };
JSExitStub(int num_callee_saved, RegList callee_saved, ExitJSFlag flag)
: num_callee_saved_(num_callee_saved),
callee_saved_(callee_saved),
flag_(flag) { }
explicit JSExitStub(ExitJSFlag flag) : flag_(flag) { }
private:
int num_callee_saved_;
RegList callee_saved_;
ExitJSFlag flag_;
Major MajorKey() { return JSExit; }
int MinorKey() { return (num_callee_saved_ << 3) | static_cast<int>(flag_); }
int MinorKey() { return static_cast<int>(flag_); }
void Generate(MacroAssembler* masm);
const char* GetName() { return "JSExitStub"; }
#ifdef DEBUG
void Print() {
PrintF("JSExitStub (num_callee_saved %d, flag %d)\n",
num_callee_saved_,
static_cast<int>(flag_));
PrintF("JSExitStub flag %d)\n", static_cast<int>(flag_));
}
#endif
};
void JSExitStub::Generate(MacroAssembler* masm) {
__ ExitJSFrame(flag_, callee_saved_);
__ ExitJSFrame(flag_);
masm->StubReturn(1);
}
@ -1339,21 +1335,6 @@ void JSExitStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// r0 holds exception
ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
if (FLAG_optimize_locals) {
// Locals are allocated in callee-saved registers, so we need to restore
// saved callee-saved registers by unwinding the stack
static JSCalleeSavedBuffer regs;
intptr_t arg0 = reinterpret_cast<intptr_t>(&regs);
__ push(r0);
__ mov(r0, Operand(arg0)); // exception in r0 (TOS) is pushed, r0 == arg0
// Do not push a second C entry frame, but call directly
__ Call(FUNCTION_ADDR(StackFrameIterator::RestoreCalleeSavedForTopHandler),
runtime_entry); // passing r0
// Frame::RestoreJSCalleeSaved returns arg0 (TOS)
__ mov(r1, Operand(r0));
__ pop(r0); // r1 holds arg0, r0 holds exception
__ ldm(ia, r1, kJSCalleeSaved); // restore callee-saved registers
}
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
__ pop(r2); // pop next in chain
@ -1495,7 +1476,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(r3, Operand(Top::context_address()));
__ ldr(cp, MemOperand(r3));
__ mov(sp, Operand(fp)); // respect ABI stack constraint
__ ldm(ia, sp, kJSCalleeSaved | pp.bit() | fp.bit() | sp.bit() | pc.bit());
__ ldm(ia, sp, pp.bit() | fp.bit() | sp.bit() | pc.bit());
// check if we should retry or throw exception
Label retry;
@ -1552,7 +1533,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// all JS callee-saved are saved and traversed by GC; push in reverse order:
// JS callee-saved, caller_pp, caller_fp, sp_on_exit (ip==pp), caller_pc
__ stm(db_w, sp, kJSCalleeSaved | pp.bit() | fp.bit() | ip.bit() | lr.bit());
__ stm(db_w, sp, pp.bit() | fp.bit() | ip.bit() | lr.bit());
__ mov(fp, Operand(sp)); // setup new frame pointer
// Store the current context in top.
@ -1844,13 +1825,13 @@ void ArmCodeGenerator::AccessReferenceProperty(
}
void ArmCodeGenerator::GenericOperation(Token::Value op) {
void ArmCodeGenerator::GenericBinaryOperation(Token::Value op) {
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
case Token::ADD: // fall through.
case Token::SUB: // fall through.
case Token::MUL: {
GenericOpStub stub(op);
GenericBinaryOpStub stub(op);
__ CallStub(&stub);
break;
}
@ -2039,7 +2020,7 @@ void ArmCodeGenerator::SmiOperation(Token::Value op,
__ mov(ip, Operand(value));
__ push(ip);
}
GenericOperation(op);
GenericBinaryOperation(op);
break;
}
@ -2096,48 +2077,74 @@ void ArmCodeGenerator::Comparison(Condition cc, bool strict) {
}
class CallFunctionStub: public CodeStub {
public:
explicit CallFunctionStub(int argc) : argc_(argc) {}
void Generate(MacroAssembler* masm);
private:
int argc_;
const char* GetName() { return "CallFuntionStub"; }
#if defined(DEBUG)
void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
#endif // defined(DEBUG)
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
};
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
// Push the number of arguments.
masm->Push(Operand(argc_));
// Get the function to call from the stack.
// function, receiver [, arguments], argc_
masm->ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
// Check that the function is really a JavaScript function.
masm->tst(r1, Operand(kSmiTagMask));
masm->b(eq, &slow);
// Get the map of the function object.
masm->ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
masm->ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
masm->cmp(r2, Operand(JS_FUNCTION_TYPE));
masm->b(ne, &slow);
// Fast-case: Invoke the function now.
masm->ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
masm->ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
masm->ldr(r1,
MemOperand(r1, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
masm->add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
masm->Jump(r1); // Callee will return to the original call site directly.
// Slow-case: Non-function called.
masm->bind(&slow);
masm->InvokeBuiltin("CALL_NON_FUNCTION", 0, JUMP_JS);
}
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void ArmCodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
Label fast, slow, exit;
// Push the arguments ("left-to-right") on the stack.
for (int i = 0; i < args->length(); i++) Load(args->at(i));
// Push the number of arguments.
__ Push(Operand(args->length()));
// Get the function to call from the stack.
// +1 ~ receiver.
__ ldr(r1, MemOperand(sp, (args->length() + 1) * kPointerSize));
// Check that the function really is a JavaScript function.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); // get the map
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(eq, &fast);
// Record the position for debugging purposes.
__ RecordPosition(position);
// Slow-case: Non-function called.
__ bind(&slow);
__ InvokeBuiltin("CALL_NON_FUNCTION", 0, CALL_JS);
__ b(&exit);
// Fast-case: Get the code from the function, call the first
// instruction in it, and pop function.
__ bind(&fast);
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
__ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, MemOperand(r1, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r1);
// Use the shared code stub to call the function.
CallFunctionStub call_function(args->length());
__ CallStub(&call_function);
// Restore context and pop function from the stack.
__ bind(&exit);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ add(sp, sp, Operand(kPointerSize)); // discard
}
@ -2352,7 +2359,15 @@ void ArmCodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
Comment cmnt(masm_, "[ WithEnterStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
Load(node->expression());
__ CallRuntime(Runtime::kPushContext, 2);
__ CallRuntime(Runtime::kPushContext, 1);
if (kDebug) {
Label verified_true;
__ cmp(r0, Operand(cp));
__ b(eq, &verified_true);
__ stop("PushContext: r0 is expected to be the same as cp");
__ bind(&verified_true);
}
__ pop(r0); // restore TOS
// Update context local.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@ -2999,12 +3014,7 @@ void ArmCodeGenerator::VisitSlot(Slot* node) {
case CodeGenState::LOAD: // fall through
case CodeGenState::LOAD_TYPEOF_EXPR:
// Special handling for locals allocated in registers.
if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
node->index() < num_reg_locals_) {
__ Push(Operand(SlotRegister(node->index())));
} else {
__ Push(SlotOperand(node, r2));
}
__ Push(SlotOperand(node, r2));
if (node->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't
// been initialized yet) which needs to be converted into the
@ -3022,22 +3032,12 @@ void ArmCodeGenerator::VisitSlot(Slot* node) {
// the code is identical to a normal store (see below).
{ Comment cmnt(masm_, "[ Init const");
Label L;
if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
node->index() < num_reg_locals_) {
__ mov(r2, Operand(SlotRegister(node->index())));
} else {
__ ldr(r2, SlotOperand(node, r2));
}
__ ldr(r2, SlotOperand(node, r2));
__ cmp(r2, Operand(Factory::the_hole_value()));
__ b(ne, &L);
// We must execute the store.
if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
node->index() < num_reg_locals_) {
__ mov(SlotRegister(node->index()), Operand(r0));
} else {
// r2 may be loaded with context; used below in RecordWrite.
__ str(r0, SlotOperand(node, r2));
}
// r2 may be loaded with context; used below in RecordWrite.
__ str(r0, SlotOperand(node, r2));
if (node->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
Label exit;
@ -3063,13 +3063,8 @@ void ArmCodeGenerator::VisitSlot(Slot* node) {
// Variable::CONST because of const declarations which will
// initialize consts to 'the hole' value and by doing so, end
// up calling this code.
if (FLAG_optimize_locals && node->type() == Slot::LOCAL &&
node->index() < num_reg_locals_) {
__ mov(SlotRegister(node->index()), Operand(r0));
} else {
// r2 may be loaded with context; used below in RecordWrite.
__ str(r0, SlotOperand(node, r2));
}
// r2 may be loaded with context; used below in RecordWrite.
__ str(r0, SlotOperand(node, r2));
if (node->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
Label exit;
@ -3306,7 +3301,7 @@ void ArmCodeGenerator::VisitAssignment(Assignment* node) {
SmiOperation(node->binary_op(), literal->handle(), false);
} else {
Load(node->value());
GenericOperation(node->binary_op());
GenericBinaryOperation(node->binary_op());
}
}
@ -3602,14 +3597,13 @@ void ArmCodeGenerator::GenerateTailCallWithArguments(
__ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
// Reset parameter pointer and frame pointer to previous frame
ExitJSFrame(reg_locals_, DO_NOT_RETURN);
ExitJSFrame(DO_NOT_RETURN);
// Jump (tail-call) to the function in register r1.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
__ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
__ add(pc, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
return;
}
@ -3726,10 +3720,21 @@ void ArmCodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
}
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void ArmCodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
__ push(r0);
__ mov(r0, Operand(Factory::undefined_value()));
}
// This is used in the implementation of apply on ia32 but it is not
// used on ARM yet.
void ArmCodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
__ int3();
__ stop("ArmCodeGenerator::GenerateIsArray");
cc_reg_ = eq;
}
@ -3772,7 +3777,7 @@ void ArmCodeGenerator::GenerateShiftDownAndTailCall(
// Get the 'this' function and exit the frame without returning.
__ ldr(r1, MemOperand(pp, JavaScriptFrameConstants::kFunctionOffset));
ExitJSFrame(reg_locals_, DO_NOT_RETURN);
ExitJSFrame(DO_NOT_RETURN);
// return address in lr
// Move arguments one element down the stack.
@ -4132,7 +4137,7 @@ void ArmCodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else {
Load(node->left());
Load(node->right());
GenericOperation(node->op());
GenericBinaryOperation(node->op());
}
}
}
@ -4346,21 +4351,13 @@ void ArmCodeGenerator::RecordStatementPosition(Node* node) {
}
void ArmCodeGenerator::EnterJSFrame(int argc, RegList callee_saved) {
__ EnterJSFrame(argc, callee_saved);
void ArmCodeGenerator::EnterJSFrame(int argc) {
__ EnterJSFrame(argc);
}
void ArmCodeGenerator::ExitJSFrame(RegList callee_saved, ExitJSFlag flag) {
// The JavaScript debugger expects ExitJSFrame to be implemented as a stub,
// so that a breakpoint can be inserted at the end of a function.
int num_callee_saved = NumRegs(callee_saved);
// We support a fixed number of register variable configurations
ASSERT(num_callee_saved <= 5 &&
JSCalleeSavedList(num_callee_saved) == callee_saved);
JSExitStub stub(num_callee_saved, callee_saved, flag);
void ArmCodeGenerator::ExitJSFrame(ExitJSFlag flag) {
JSExitStub stub(flag);
__ CallJSExitStub(&stub);
}

File diff suppressed because it is too large Load Diff

View File

@ -251,7 +251,9 @@ bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
{&v8::internal::CodeGenerator::GenerateValueOf,
"_ValueOf"},
{&v8::internal::CodeGenerator::GenerateSetValueOf,
"_SetValueOf"}
"_SetValueOf"},
{&v8::internal::CodeGenerator::GenerateFastCharCodeAt,
"_FastCharCodeAt"}
};
if (node->name()->length() > 0 && node->name()->Get(0) == '_') {
for (unsigned i = 0;
@ -278,15 +280,4 @@ void RuntimeStub::Generate(MacroAssembler* masm) {
}
const char* GenericOpStub::GetName() {
switch (op_) {
case Token::ADD: return "GenericOpStub_ADD";
case Token::SUB: return "GenericOpStub_SUB";
case Token::MUL: return "GenericOpStub_MUL";
case Token::DIV: return "GenericOpStub_DIV";
default: return "GenericOpStub";
}
}
} } // namespace v8::internal

View File

@ -169,6 +169,9 @@ class CodeGenerator: public Visitor {
virtual void GenerateValueOf(ZoneList<Expression*>* args) = 0;
virtual void GenerateSetValueOf(ZoneList<Expression*>* args) = 0;
// Fast support for charCodeAt(n).
virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args) = 0;
private:
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
@ -199,25 +202,6 @@ class RuntimeStub : public CodeStub {
};
class GenericOpStub : public CodeStub {
public:
explicit GenericOpStub(Token::Value op) : op_(op) { }
private:
Token::Value op_;
Major MajorKey() { return GenericOp; }
int MinorKey() { return static_cast<int>(op_); }
void Generate(MacroAssembler* masm);
const char* GetName();
#ifdef DEBUG
void Print() { PrintF("GenericOpStub (token %s)\n", Token::String(op_)); }
#endif
};
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }

View File

@ -35,126 +35,6 @@
namespace v8 { namespace internal {
// Helper class for building result strings in a character buffer. The
// purpose of the class is to use safe operations that checks the
// buffer bounds on all operations in debug mode.
class StringBuilder {
public:
// Create a string builder with a buffer of the given size. The
// buffer is allocated through NewArray<char> and must be
// deallocated by the caller of Finalize().
explicit StringBuilder(int size);
StringBuilder(char* buffer, int size)
: buffer_(buffer), size_(size), position_(0) { }
~StringBuilder() { if (!is_finalized()) Finalize(); }
// Get the current position in the builder.
inline int position() const;
// Add a single character to the builder. It is not allowed to add
// 0-characters; use the Finalize() method to terminate the string
// instead.
inline void AddCharacter(char c);
// Add an entire string to the builder. Uses strlen() internally to
// compute the length of the input string.
void AddString(const char* s);
// Add the first 'n' characters of the given string 's' to the
// builder. The input string must have enough characters.
void AddSubstring(const char* s, int n);
// Add formatted contents to the builder just like printf().
void AddFormatted(const char* format, ...);
// Add character padding to the builder. If count is non-positive,
// nothing is added to the builder.
void AddPadding(char c, int count);
// Finalize the string by 0-terminating it and returning the buffer.
char* Finalize();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
char* buffer_;
int size_;
int position_;
bool is_finalized() const { return position_ < 0; }
};
StringBuilder::StringBuilder(int size) {
buffer_ = NewArray<char>(size);
size_ = size;
position_ = 0;
}
inline int StringBuilder::position() const {
ASSERT(!is_finalized());
return position_;
}
inline void StringBuilder::AddCharacter(char c) {
ASSERT(c != '\0');
ASSERT(!is_finalized() && position_ < size_);
buffer_[position_++] = c;
}
void StringBuilder::AddString(const char* s) {
AddSubstring(s, strlen(s));
}
void StringBuilder::AddSubstring(const char* s, int n) {
ASSERT(!is_finalized() && position_ + n < size_);
ASSERT(static_cast<size_t>(n) <= strlen(s));
memcpy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
void StringBuilder::AddFormatted(const char* format, ...) {
ASSERT(!is_finalized() && position_ < size_);
va_list args;
va_start(args, format);
int remaining = size_ - position_;
int n = OS::VSNPrintF(&buffer_[position_], remaining, format, args);
va_end(args);
if (n < 0 || n >= remaining) {
position_ = size_;
} else {
position_ += n;
}
}
void StringBuilder::AddPadding(char c, int count) {
for (int i = 0; i < count; i++) {
AddCharacter(c);
}
}
char* StringBuilder::Finalize() {
ASSERT(!is_finalized() && position_ < size_);
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
// buffer while building the string.
ASSERT(strlen(buffer_) == static_cast<size_t>(position_));
position_ = -1;
ASSERT(is_finalized());
return buffer_;
}
int HexValue(uc32 c) {
if ('0' <= c && c <= '9')
return c - '0';

View File

@ -26,8 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#if defined(__arm__)
#include <sys/syscall.h> // for cache flushing.
#endif
#include "v8.h"

View File

@ -37,6 +37,7 @@
#include "global-handles.h"
#include "natives.h"
#include "stub-cache.h"
#include "log.h"
namespace v8 { namespace internal {
@ -44,6 +45,7 @@ DEFINE_bool(remote_debugging, false, "enable remote debugging");
DEFINE_int(debug_port, 5858, "port for remote debugging");
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response");
DECLARE_bool(allow_natives_syntax);
DECLARE_bool(log_debugger);
static void PrintLn(v8::Local<v8::Value> value) {
@ -59,35 +61,6 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
PendingRequest::PendingRequest(const uint16_t* json_request, int length)
: json_request_(Vector<uint16_t>::empty()),
next_(NULL) {
// Copy the request.
json_request_ =
Vector<uint16_t>(const_cast<uint16_t *>(json_request), length).Clone();
}
PendingRequest::~PendingRequest() {
// Deallocate what was allocated.
if (!json_request_.is_empty()) {
json_request_.Dispose();
}
}
Handle<String> PendingRequest::request() {
// Create a string in the heap from the pending request.
if (!json_request_.is_empty()) {
return Factory::NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(json_request_.start()),
json_request_.length()));
} else {
return Handle<String>();
}
}
static Handle<Code> ComputeCallDebugBreak(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeCallDebugBreak(argc), Code);
}
@ -1016,7 +989,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = GetCodeTarget(addr);
return code->state() == DEBUG_BREAK;
return code->ic_state() == DEBUG_BREAK;
}
@ -1326,80 +1299,6 @@ DebugMessageThread* Debugger::message_thread_ = NULL;
v8::DebugMessageHandler Debugger::debug_message_handler_ = NULL;
void* Debugger::debug_message_handler_data_ = NULL;
Mutex* Debugger::pending_requests_access_ = OS::CreateMutex();
PendingRequest* Debugger::pending_requests_head_ = NULL;
PendingRequest* Debugger::pending_requests_tail_ = NULL;
void Debugger::DebugRequest(const uint16_t* json_request, int length) {
// Create a pending request.
PendingRequest* pending_request = new PendingRequest(json_request, length);
// Add the pending request to list.
Guard with(pending_requests_access_);
if (pending_requests_head_ == NULL) {
ASSERT(pending_requests_tail_ == NULL);
pending_requests_head_ = pending_request;
pending_requests_tail_ = pending_request;
} else {
ASSERT(pending_requests_tail_ != NULL);
pending_requests_tail_->set_next(pending_request);
pending_requests_tail_ = pending_request;
}
// Set the pending request flag to force the VM to stop soon.
v8::Debug::DebugBreak();
}
bool Debugger::ProcessPendingRequests() {
HandleScope scope;
// Lock access to pending requests list while processing them. Typically
// there will be either zero or one pending request.
Guard with(pending_requests_access_);
EnterDebuggerContext enter;
// Get the current execution state.
bool caught_exception;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
if (caught_exception) {
return false;
}
// Process the list of pending requests.
bool plain_break = false;
PendingRequest* pending_request = pending_requests_head_;
if (pending_request == NULL) {
// If no pending commands plain break issued some other way (e.g. debugger
// statement).
plain_break = true;
}
while (pending_request != NULL) {
Handle<String> response = ProcessRequest(exec_state,
pending_request->request(),
false);
OnPendingRequestProcessed(response);
// Check whether one of the commands is a plain break request.
if (!plain_break) {
plain_break = IsPlainBreakRequest(pending_request->request());
}
// Move to the next item in the list.
PendingRequest* next = pending_request->next();
delete pending_request;
pending_request = next;
}
// List processed.
pending_requests_head_ = NULL;
pending_requests_tail_ = NULL;
return plain_break;
}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
@ -1536,7 +1435,6 @@ bool Debugger::IsPlainBreakRequest(Handle<Object> request) {
if (caught_exception) {
return false;
}
return *result == Heap::true_value();
}
@ -1684,7 +1582,6 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
if (caught_exception) {
return;
}
// Bail out based on state or if there is no listener for this event
if (Debug::InDebugger()) return;
if (!Debugger::EventActive(v8::AfterCompile)) return;
@ -1697,7 +1594,6 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
if (caught_exception) {
return;
}
// Process debug event
ProcessDebugEvent(v8::AfterCompile, event_data);
}
@ -1724,18 +1620,11 @@ void Debugger::OnNewFunction(Handle<JSFunction> function) {
if (caught_exception) {
return;
}
// Process debug event.
ProcessDebugEvent(v8::NewFunction, event_data);
}
void Debugger::OnPendingRequestProcessed(Handle<Object> event_data) {
// Process debug event.
ProcessDebugEvent(v8::PendingRequestProcessed, event_data);
}
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data) {
// Create the execution state.
@ -1744,12 +1633,10 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
if (caught_exception) {
return;
}
// First notify the builtin debugger.
if (message_thread_ != NULL) {
message_thread_->DebugEvent(event, exec_state, event_data);
}
// Notify registered debug event listeners. The list can contain both C and
// JavaScript functions.
v8::NeanderArray listeners(Factory::debug_event_listeners());
@ -1799,6 +1686,10 @@ void Debugger::SetMessageHandler(v8::DebugMessageHandler handler, void* data) {
}
// Posts an output message from the debugger to the debug_message_handler
// callback. This callback is part of the public API. Messages are
// kept internally as Vector<uint16_t> strings, which are allocated in various
// places and deallocated by the calling function sometime after this call.
void Debugger::SendMessage(Vector< uint16_t> message) {
if (debug_message_handler_ != NULL) {
debug_message_handler_(message.start(), message.length(),
@ -1823,7 +1714,6 @@ void Debugger::UpdateActiveDebugger() {
for (int i = 0; i < length && !active_listener; i++) {
active_listener = !listeners.get(i)->IsUndefined();
}
set_debugger_active((Debugger::message_thread_ != NULL &&
Debugger::debug_message_handler_ != NULL) ||
active_listener);
@ -1834,22 +1724,29 @@ void Debugger::UpdateActiveDebugger() {
DebugMessageThread::DebugMessageThread()
: host_running_(true),
event_json_(Vector<uint16_t>::empty()),
command_(Vector<uint16_t>::empty()),
result_(Vector<uint16_t>::empty()) {
command_queue_(kQueueInitialSize),
message_queue_(kQueueInitialSize) {
command_received_ = OS::CreateSemaphore(0);
debug_event_ = OS::CreateSemaphore(0);
debug_command_ = OS::CreateSemaphore(0);
debug_result_ = OS::CreateSemaphore(0);
message_received_ = OS::CreateSemaphore(0);
}
// Does not free resources held by DebugMessageThread
// because this cannot be done thread-safely.
DebugMessageThread::~DebugMessageThread() {
}
void DebugMessageThread::SetEventJSON(Vector<uint16_t> event_json) {
SetVector(&event_json_, event_json);
// Puts an event coming from V8 on the queue. Creates
// a copy of the JSON formatted event string managed by the V8.
// Called by the V8 thread.
// The new copy of the event string is destroyed in Run().
void DebugMessageThread::SendMessage(Vector<uint16_t> message) {
Vector<uint16_t> message_copy = message.Clone();
if (FLAG_log_debugger) {
Logger::StringEvent("Put message on event message_queue.", "");
}
message_queue_.Put(message_copy);
message_received_->Signal();
}
@ -1862,60 +1759,24 @@ void DebugMessageThread::SetEventJSONFromEvent(Handle<Object> event_data) {
v8::Local<v8::Function> fun =
v8::Function::Cast(*api_event_data->Get(fun_name));
v8::TryCatch try_catch;
v8::Local<v8::Value> json_result = *fun->Call(api_event_data, 0, NULL);
v8::Local<v8::String> json_result_string;
v8::Local<v8::Value> json_event = *fun->Call(api_event_data, 0, NULL);
v8::Local<v8::String> json_event_string;
if (!try_catch.HasCaught()) {
if (!json_result->IsUndefined()) {
json_result_string = json_result->ToString();
if (!json_event->IsUndefined()) {
json_event_string = json_event->ToString();
if (FLAG_trace_debug_json) {
PrintLn(json_result_string);
PrintLn(json_event_string);
}
v8::String::Value val(json_result_string);
v8::String::Value val(json_event_string);
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
json_result_string->Length());
SetEventJSON(str);
json_event_string->Length());
SendMessage(str);
} else {
SetEventJSON(Vector<uint16_t>::empty());
SendMessage(Vector<uint16_t>::empty());
}
} else {
PrintLn(try_catch.Exception());
SetEventJSON(Vector<uint16_t>::empty());
}
}
void DebugMessageThread::SetCommand(Vector<uint16_t> command) {
SetVector(&command_, command);
}
void DebugMessageThread::SetResult(const char* result) {
int len = strlen(result);
uint16_t* tmp = NewArray<uint16_t>(len);
for (int i = 0; i < len; i++) {
tmp[i] = result[i];
}
SetResult(Vector<uint16_t>(tmp, len));
DeleteArray(tmp);
}
void DebugMessageThread::SetResult(Vector<uint16_t> result) {
SetVector(&result_, result);
}
void DebugMessageThread::SetVector(Vector<uint16_t>* vector,
Vector<uint16_t> value) {
// Deallocate current result.
if (!vector->is_empty()) {
vector->Dispose();
*vector = Vector<uint16_t>::empty();
}
// Allocate a copy of the new result.
if (!value.is_empty()) {
*vector = value.Clone();
SendMessage(Vector<uint16_t>::empty());
}
}
@ -1935,31 +1796,17 @@ bool DebugMessageThread::TwoByteEqualsAscii(Vector<uint16_t> two_byte,
}
void DebugMessageThread::CommandResult(Vector<uint16_t> result) {
SetResult(result);
debug_result_->Signal();
}
void DebugMessageThread::Run() {
// Process commands and debug events.
// Sends debug events to an installed debugger message callback.
while (true) {
// Set the current command prompt
Semaphore* sems[2];
sems[0] = command_received_;
sems[1] = debug_event_;
int signal = Select(2, sems).WaitSingle();
if (signal == 0) {
if (command_.length() > 0) {
HandleCommand();
if (result_.length() > 0) {
Debugger::SendMessage(result_);
SetResult(Vector<uint16_t>::empty());
}
}
} else {
// Send the the current event as JSON to the debugger.
Debugger::SendMessage(event_json_);
// Wait and Get are paired so that semaphore count equals queue length.
message_received_->Wait();
if (FLAG_log_debugger) {
Logger::StringEvent("Get message from event message_queue.", "");
}
Vector<uint16_t> message = message_queue_.Get();
if (message.length() > 0) {
Debugger::SendMessage(message);
}
}
}
@ -1987,18 +1834,6 @@ void DebugMessageThread::DebugEvent(v8::DebugEvent event,
break;
case v8::NewFunction:
break;
case v8::PendingRequestProcessed: {
// For a processed pending request the event_data is the JSON response
// string.
v8::Handle<v8::String> str =
v8::Handle<v8::String>(
Utils::ToLocal(Handle<String>::cast(event_data)));
v8::String::Value val(str);
SetEventJSON(Vector<uint16_t>(reinterpret_cast<uint16_t*>(*val),
str->Length()));
debug_event_->Signal();
break;
}
default:
UNREACHABLE();
}
@ -2021,15 +1856,77 @@ void DebugMessageThread::DebugEvent(v8::DebugEvent event,
return;
}
// Notify the debug session thread that a debug event has occoured.
host_running_ = false;
event_ = event;
SetEventJSONFromEvent(event_data);
debug_event_->Signal();
// First process all pending commands in the queue. During this processing
// each message is checked to see if it is a plain break command. If there is
// a plain break request in the queue or if the queue is empty a break event
// is sent to the debugger.
bool plain_break = false;
if (command_queue_.IsEmpty()) {
plain_break = true;
} else {
// Drain queue.
while (!command_queue_.IsEmpty()) {
command_received_->Wait();
if (FLAG_log_debugger) {
Logger::StringEvent(
"Get command from command_queue, in drain queue loop.",
"");
}
Vector<uint16_t> command = command_queue_.Get();
// Support for sending a break command as just "break" instead of an
// actual JSON break command.
// If break is made into a separate API call, function
// TwoByteEqualsASCII can be removed.
if (TwoByteEqualsAscii(command, "break")) {
plain_break = true;
continue;
}
// Wait for commands from the debug session.
// Get the command as a string object.
Handle<String> command_string;
if (!command.is_empty()) {
command_string = Factory::NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(
command.start()),
command.length()));
} else {
command_string = Handle<String>();
}
// Process the request.
Handle<String> message_string = Debugger::ProcessRequest(exec_state,
command_string,
false);
// Convert text result to UTF-16 string and send it.
v8::String::Value val(Utils::ToLocal(message_string));
Vector<uint16_t> message(reinterpret_cast<uint16_t*>(*val),
message_string->length());
SendMessage(message);
// Check whether one of the commands is a plain break request.
if (!plain_break) {
plain_break = Debugger::IsPlainBreakRequest(message_string);
}
}
}
// If this break event is not to go to the debugger just return.
if (!plain_break) return;
// Notify the debugger that a debug event has occoured.
host_running_ = false;
SetEventJSONFromEvent(event_data);
// Wait for commands from the debugger.
while (true) {
debug_command_->Wait();
command_received_->Wait();
if (FLAG_log_debugger) {
Logger::StringEvent(
"Get command from command queue, in interactive loop.",
"");
}
Vector<uint16_t> command = command_queue_.Get();
ASSERT(!host_running_);
if (!Debugger::debugger_active()) {
host_running_ = true;
@ -2037,7 +1934,7 @@ void DebugMessageThread::DebugEvent(v8::DebugEvent event,
}
// Invoke the JavaScript to convert the debug command line to a JSON
// request, invoke the JSON request and convert the JSON respose to a text
// request, invoke the JSON request and convert the JSON response to a text
// representation.
v8::Local<v8::String> fun_name;
v8::Local<v8::Function> fun;
@ -2045,8 +1942,8 @@ void DebugMessageThread::DebugEvent(v8::DebugEvent event,
v8::TryCatch try_catch;
fun_name = v8::String::New("processDebugCommand");
fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
args[0] = v8::String::New(reinterpret_cast<uint16_t*>(command_.start()),
command_.length());
args[0] = v8::String::New(reinterpret_cast<uint16_t*>(command.start()),
command.length());
v8::Local<v8::Value> result_val = fun->Call(cmd_processor, 1, args);
// Get the result of the command.
@ -2083,13 +1980,11 @@ void DebugMessageThread::DebugEvent(v8::DebugEvent event,
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
result_string->Length());
// Change the prompt if VM is running after this command.
if (running) {
host_running_ = true;
}
// Set host_running_ correctly for nested debugger evaluations.
host_running_ = running;
// Return the result.
CommandResult(str);
SendMessage(str);
// Return from debug event processing is VM should be running.
if (running) {
@ -2099,34 +1994,132 @@ void DebugMessageThread::DebugEvent(v8::DebugEvent event,
}
void DebugMessageThread::HandleCommand() {
// Handle the command.
if (TwoByteEqualsAscii(command_, "b") ||
TwoByteEqualsAscii(command_, "break")) {
v8::Debug::DebugBreak();
SetResult("request queued");
} else if (host_running_) {
// Send the JSON command to the running VM.
Debugger::DebugRequest(command_.start(), command_.length());
SetResult("request queued");
} else {
debug_command_->Signal();
debug_result_->Wait();
}
}
// Puts a command coming from the public API on the queue. Creates
// a copy of the command string managed by the debugger. Up to this
// point, the command data was managed by the API client. Called
// by the API client thread. This is where the API client hands off
// processing of the command to the DebugMessageThread thread.
// The new copy of the command is destroyed in HandleCommand().
void DebugMessageThread::ProcessCommand(Vector<uint16_t> command) {
SetCommand(command);
Vector<uint16_t> command_copy = command.Clone();
if (FLAG_log_debugger) {
Logger::StringEvent("Put command on command_queue.", "");
}
command_queue_.Put(command_copy);
// If not in a break schedule a break and send the "request queued" response.
if (host_running_) {
v8::Debug::DebugBreak();
uint16_t buffer[14] = {'r', 'e', 'q', 'u', 'e', 's', 't', ' ',
'q', 'u', 'e', 'u', 'e', 'd'};
SendMessage(Vector<uint16_t>(buffer, 14));
}
command_received_->Signal();
}
void DebugMessageThread::OnDebuggerInactive() {
// Send an empty command to the debugger if in a break to make JavaScript run
// again if the debugger is closed.
if (!host_running_) {
debug_command_->Signal();
SetResult("");
ProcessCommand(Vector<uint16_t>::empty());
}
}
MessageQueue::MessageQueue(int size) : start_(0), end_(0), size_(size) {
messages_ = NewArray<Vector<uint16_t> >(size);
}
MessageQueue::~MessageQueue() {
DeleteArray(messages_);
}
Vector<uint16_t> MessageQueue::Get() {
ASSERT(!IsEmpty());
int result = start_;
start_ = (start_ + 1) % size_;
return messages_[result];
}
void MessageQueue::Put(const Vector<uint16_t>& message) {
if ((end_ + 1) % size_ == start_) {
Expand();
}
messages_[end_] = message;
end_ = (end_ + 1) % size_;
}
void MessageQueue::Expand() {
MessageQueue new_queue(size_ * 2);
while (!IsEmpty()) {
new_queue.Put(Get());
}
Vector<uint16_t>* array_to_free = messages_;
*this = new_queue;
new_queue.messages_ = array_to_free;
// Automatic destructor called on new_queue, freeing array_to_free.
}
LockingMessageQueue::LockingMessageQueue(int size) : queue_(size) {
lock_ = OS::CreateMutex();
}
LockingMessageQueue::~LockingMessageQueue() {
delete lock_;
}
bool LockingMessageQueue::IsEmpty() const {
ScopedLock sl(lock_);
return queue_.IsEmpty();
}
Vector<uint16_t> LockingMessageQueue::Get() {
ScopedLock sl(lock_);
Vector<uint16_t> result = queue_.Get();
// Logging code for debugging debugger.
if (FLAG_log_debugger) {
LogQueueOperation("Get", result);
}
return result;
}
void LockingMessageQueue::Put(const Vector<uint16_t>& message) {
ScopedLock sl(lock_);
queue_.Put(message);
// Logging code for debugging debugger.
if (FLAG_log_debugger) {
LogQueueOperation("Put", message);
}
}
void LockingMessageQueue::Clear() {
ScopedLock sl(lock_);
queue_.Clear();
}
void LockingMessageQueue::LogQueueOperation(const char* operation_name,
Vector<uint16_t> parameter) {
StringBuilder s(23+parameter.length()+strlen(operation_name) +1);
s.AddFormatted("Time: %f15.3 %s ", OS::TimeCurrentMillis(), operation_name);
for (int i = 0; i < parameter.length(); ++i) {
s.AddCharacter(static_cast<char>(parameter[i]));
}
char* result_string = s.Finalize();
Logger::StringEvent(result_string, "");
DeleteArray(result_string);
}
} } // namespace v8::internal

View File

@ -234,7 +234,7 @@ class Debug {
return reinterpret_cast<Address *>(&registers_[r]);
}
// Addres of the debug break return entry code.
// Address of the debug break return entry code.
static Code* debug_break_return_entry() { return debug_break_return_entry_; }
// Support for getting the address of the debug break on return code.
@ -321,14 +321,11 @@ class Debug {
};
class PendingRequest;
class DebugMessageThread;
class Debugger {
public:
static void DebugRequest(const uint16_t* json_request, int length);
static bool ProcessPendingRequests();
static Handle<Object> MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
@ -357,7 +354,6 @@ class Debugger {
static void OnAfterCompile(Handle<Script> script,
Handle<JSFunction> fun);
static void OnNewFunction(Handle<JSFunction> fun);
static void OnPendingRequestProcessed(Handle<Object> event_data);
static void ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data);
static void SetMessageHandler(v8::DebugMessageHandler handler, void* data);
@ -384,68 +380,91 @@ class Debugger {
static DebugMessageThread* message_thread_;
static v8::DebugMessageHandler debug_message_handler_;
static void* debug_message_handler_data_;
// Head and tail of linked list of pending commands. The list is protected
// by a mutex as it can be updated/read from different threads.
static Mutex* pending_requests_access_;
static PendingRequest* pending_requests_head_;
static PendingRequest* pending_requests_tail_;
};
// Linked list of pending requests issued by debugger while V8 was running.
class PendingRequest {
// A Queue of Vector<uint16_t> objects. A thread-safe version is
// LockingMessageQueue, based on this class.
class MessageQueue BASE_EMBEDDED {
public:
PendingRequest(const uint16_t* json_request, int length);
~PendingRequest();
PendingRequest* next() { return next_; }
void set_next(PendingRequest* next) { next_ = next; }
Handle<String> request();
explicit MessageQueue(int size);
~MessageQueue();
bool IsEmpty() const { return start_ == end_; }
Vector<uint16_t> Get();
void Put(const Vector<uint16_t>& message);
void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
private:
Vector<uint16_t> json_request_; // Request string.
PendingRequest* next_; // Next pointer for linked list.
// Doubles the size of the message queue, and copies the messages.
void Expand();
Vector<uint16_t>* messages_;
int start_;
int end_;
int size_; // The size of the queue buffer. Queue can hold size-1 messages.
};
// LockingMessageQueue is a thread-safe circular buffer of Vector<uint16_t>
// messages. The message data is not managed by LockingMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a
// Mutex to MessageQueue.
class LockingMessageQueue BASE_EMBEDDED {
public:
explicit LockingMessageQueue(int size);
~LockingMessageQueue();
bool IsEmpty() const;
Vector<uint16_t> Get();
void Put(const Vector<uint16_t>& message);
void Clear();
private:
// Logs a timestamp, operation name, and operation argument
void LogQueueOperation(const char* operation_name,
Vector<uint16_t> parameter);
MessageQueue queue_;
Mutex* lock_;
DISALLOW_EVIL_CONSTRUCTORS(LockingMessageQueue);
};
/* This class is the data for a running thread that serializes
* event messages and command processing for the debugger.
* All uncommented methods are called only from this message thread.
*/
class DebugMessageThread: public Thread {
public:
DebugMessageThread();
virtual ~DebugMessageThread();
DebugMessageThread(); // Called from API thread.
virtual ~DebugMessageThread(); // Never called.
// Called by V8 thread. Reports events from V8 VM.
// Also handles command processing in stopped state of V8,
// when host_running_ is false.
void DebugEvent(v8::DebugEvent,
Handle<Object> exec_state,
Handle<Object> event_data);
void SetEventJSON(Vector<uint16_t> event_json);
// Puts event on the output queue. Called by V8.
// This is where V8 hands off
// processing of the event to the DebugMessageThread thread,
// which forwards it to the debug_message_handler set by the API.
void SendMessage(Vector<uint16_t> event_json);
// Formats an event into JSON, and calls SendMessage.
void SetEventJSONFromEvent(Handle<Object> event_data);
void SetCommand(Vector<uint16_t> command);
void SetResult(const char* result);
void SetResult(Vector<uint16_t> result);
void CommandResult(Vector<uint16_t> result);
// Puts a command coming from the public API on the queue. Called
// by the API client thread. This is where the API client hands off
// processing of the command to the DebugMessageThread thread.
void ProcessCommand(Vector<uint16_t> command);
void OnDebuggerInactive();
protected:
// Main function of DebugMessageThread thread.
void Run();
void HandleCommand();
bool host_running_; // Is the debugging host running or stopped
v8::DebugEvent event_; // Active event
Semaphore* command_received_; // Signal from the telnet connection
Semaphore* debug_event_; // Signal from the V8 thread
Semaphore* debug_command_; // Signal to the V8 thread
Semaphore* debug_result_; // Signal from the V8 thread
bool host_running_; // Is the debugging host running or stopped?
Semaphore* command_received_; // Non-zero when command queue is non-empty.
Semaphore* message_received_; // Exactly equal to message queue length.
private:
void SetVector(Vector<uint16_t>* vector, Vector<uint16_t> value);
bool TwoByteEqualsAscii(Vector<uint16_t> two_byte, const char* ascii);
Vector<uint16_t> event_json_; // Active event JSON.
Vector<uint16_t> command_; // Current command.
Vector<uint16_t> result_; // Result of processing command.
static const int kQueueInitialSize = 4;
LockingMessageQueue command_queue_;
LockingMessageQueue message_queue_;
DISALLOW_EVIL_CONSTRUCTORS(DebugMessageThread);
};

View File

@ -145,27 +145,30 @@ void Decoder::PrintShiftRm(Instr* instr) {
int rm = instr->RmField();
PrintRegister(rm);
if ((shift != LSL) || (shift_amount != 0)) {
if (instr->RegShiftField() == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
Print(", RRX");
return;
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_size_ - out_buffer_pos_,
", %s #%d",
shift_names[shift], shift_amount);
} else {
// by register
int rs = instr->RsField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_size_ - out_buffer_pos_,
", %s ", shift_names[shift]);
PrintRegister(rs);
if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
// Special case for using rm only.
return;
}
if (instr->RegShiftField() == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
Print(", RRX");
return;
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_size_ - out_buffer_pos_,
", %s #%d",
shift_names[shift], shift_amount);
} else {
// by register
int rs = instr->RsField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_size_ - out_buffer_pos_,
", %s ", shift_names[shift]);
PrintRegister(rs);
}
}
@ -799,6 +802,11 @@ void Decoder::DecodeType7(Instr* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_size_ - out_buffer_pos_,
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == special_condition) {
Format(instr, "break 'msg");
return Instr::kInstrSize;
@ -921,6 +929,16 @@ int Disassembler::InstructionDecode(char* buffer, const int buffer_size,
}
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
int instruction_bits = *(reinterpret_cast<int*>(instruction));
if ((instruction_bits & 0xfff00000) == 0x03000000) {
return instruction_bits & 0x0000ffff;
} else {
return -1;
}
}
void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
Disassembler d;
for (byte* pc = begin; pc < end;) {
@ -928,16 +946,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, sizeof buffer, pc);
fprintf(f, "%p", prev_pc);
fprintf(f, " ");
for (byte* bp = prev_pc; bp < pc; bp++) {
fprintf(f, "%02x", *bp);
}
for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
fprintf(f, " ");
}
fprintf(f, " %s\n", buffer);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
}
}

View File

@ -28,28 +28,12 @@
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
#ifndef WIN32
#include <stdint.h>
#endif
#include "v8.h"
#include "disasm.h"
namespace disasm {
// Windows is missing the stdint.h header file
#ifdef WIN32
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef int int32_t;
#endif
#define UNIMPLEMENTED() \
assert(false)
#define UNREACHABLE() \
assert(false)
enum OperandOrder {
UNSET_OP_ORDER = 0,
REG_OPER_OP_ORDER,
@ -653,6 +637,7 @@ int DisassemblerIA32::FPUInstruction(byte* data) {
const char* mnem = "?";
switch (regop) {
case eax: mnem = "fild_s"; break;
case edx: mnem = "fist_s"; break;
case ebx: mnem = "fistp_s"; break;
default: UnimplementedInstruction();
}
@ -691,6 +676,10 @@ int DisassemblerIA32::FPUInstruction(byte* data) {
}
AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
return 2;
} else if (b1 == 0xDA && b2 == 0xE9) {
const char* mnem = "fucompp";
AppendToBuffer("%s", mnem);
return 2;
}
AppendToBuffer("Unknown FP instruction");
return 2;
@ -965,6 +954,7 @@ int DisassemblerIA32::InstructionDecode(char* out_buffer,
break;
case 0xD9: // fall through
case 0xDA: // fall through
case 0xDB: // fall through
case 0xDC: // fall through
case 0xDD: // fall through
@ -1044,12 +1034,26 @@ int DisassemblerIA32::InstructionDecode(char* out_buffer,
}
int instr_len = data - instr;
if (instr_len == 0) instr_len = 1; // parse at least a byte
#ifdef WIN32
_snprintf(out_buffer, out_buffer_size, "%s", tmp_buffer_);
#else
snprintf(out_buffer, out_buffer_size, "%s", tmp_buffer_);
#endif
ASSERT(instr_len > 0); // Ensure progress.
int outp = 0;
// Instruction bytes.
for (byte* bp = instr; bp < data; bp++) {
outp += v8::internal::OS::SNPrintF(out_buffer + outp,
out_buffer_size - outp,
"%02x",
*bp);
}
for (int i = 6 - instr_len; i >= 0; i--) {
outp += v8::internal::OS::SNPrintF(out_buffer + outp,
out_buffer_size - outp,
" ");
}
outp += v8::internal::OS::SNPrintF(out_buffer + outp,
out_buffer_size - outp,
" %s",
tmp_buffer_);
return instr_len;
}
@ -1124,6 +1128,10 @@ int Disassembler::InstructionDecode(char* buffer,
}
// The IA-32 assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
Disassembler d;
for (byte* pc = begin; pc < end;) {

View File

@ -61,6 +61,10 @@ class Disassembler {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(char* buffer, const int buffer_size, byte* instruction);
// Returns -1 if instruction does not mark the beginning of a constant pool,
// or the number of entries in the constant pool beginning here.
int ConstantPoolSizeAt(byte* instruction);
// Write disassembly into specified file 'f' using specified NameConverter
// (see constructor).
static void Disassemble(FILE* f, byte* begin, byte* end);

View File

@ -1,280 +0,0 @@
// Copyright 2006-2008 Google Inc. All Rights Reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "debug.h"
#include "disasm.h"
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
#include "string-stream.h"
namespace v8 { namespace internal {
#ifdef ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
for (byte* pc = begin; pc < end; pc++) {
if (f == NULL) {
PrintF("%p %4d %02x\n", pc, pc - begin, *pc);
} else {
fprintf(f, "%p %4d %02x\n", pc, pc - begin, *pc);
}
}
}
class V8NameConverter: public disasm::NameConverter {
public:
explicit V8NameConverter(Code* code) : code_(code) {}
virtual const char* NameOfAddress(byte* pc) const;
virtual const char* NameInCode(byte* addr) const;
Code* code() const { return code_; }
private:
Code* code_;
};
const char* V8NameConverter::NameOfAddress(byte* pc) const {
static char buffer[128];
const char* name = Builtins::Lookup(pc);
if (name != NULL) {
OS::SNPrintF(buffer, sizeof buffer, "%s (%p)", name, pc);
return buffer;
}
if (code_ != NULL) {
int offs = pc - code_->instruction_start();
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
OS::SNPrintF(buffer, sizeof buffer, "%d (%p)", offs, pc);
return buffer;
}
}
return disasm::NameConverter::NameOfAddress(pc);
}
const char* V8NameConverter::NameInCode(byte* addr) const {
// If the V8NameConverter is used for well known code, so we can "safely"
// dereference pointers in generated code.
return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
}
static void DumpBuffer(FILE* f, char* buff) {
if (f == NULL) {
PrintF("%s", buff);
} else {
fprintf(f, "%s", buff);
}
}
static const int kOutBufferSize = 1024;
static const int kRelocInfoPosition = 57;
static int DecodeIt(FILE* f,
const V8NameConverter& converter,
byte* begin,
byte* end) {
ExternalReferenceEncoder ref_encoder;
char decode_buffer[128];
char out_buffer[kOutBufferSize];
const int sob = sizeof out_buffer;
byte* pc = begin;
disasm::Disassembler d(converter);
RelocIterator* it = NULL;
if (converter.code() != NULL) {
it = new RelocIterator(converter.code());
} else {
// No relocation information when printing code stubs.
}
int constants = -1; // no constants being decoded at the start
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
if (constants > 0) {
OS::SNPrintF(decode_buffer, sizeof(decode_buffer), "%s", "constant");
constants--;
pc += 4;
} else {
int instruction_bits = *(reinterpret_cast<int*>(pc));
if ((instruction_bits & 0xfff00000) == 0x03000000) {
OS::SNPrintF(decode_buffer, sizeof(decode_buffer),
"%s", "constant pool begin");
constants = instruction_bits & 0x0000ffff;
pc += 4;
} else {
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
}
}
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
List<const char*> comments(4);
List<byte*> pcs(1);
List<RelocMode> rmodes(1);
List<intptr_t> datas(1);
if (it != NULL) {
while (!it->done() && it->rinfo()->pc() < pc) {
if (is_comment(it->rinfo()->rmode())) {
// For comments just collect the text.
comments.Add(reinterpret_cast<const char*>(it->rinfo()->data()));
} else {
// For other reloc info collect all data.
pcs.Add(it->rinfo()->pc());
rmodes.Add(it->rinfo()->rmode());
datas.Add(it->rinfo()->data());
}
it->next();
}
}
int outp = 0; // pointer into out_buffer, implements append operation.
// Comments.
for (int i = 0; i < comments.length(); i++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" %s\n", comments[i]);
}
// Write out comments, resets outp so that we can format the next line.
if (outp > 0) {
DumpBuffer(f, out_buffer);
outp = 0;
}
// Instruction address and instruction offset.
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
"%p %4d ", prev_pc, prev_pc - begin);
// Instruction bytes.
ASSERT(pc - prev_pc == 4);
outp += OS::SNPrintF(out_buffer + outp,
sob - outp,
"%08x",
*reinterpret_cast<intptr_t*>(prev_pc));
for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
}
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " %s", decode_buffer);
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
// Indent the printing of the reloc info.
if (i == 0) {
// The first reloc info is printed after the disassembled instruction.
for (int p = outp; p < kRelocInfoPosition; p++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
}
} else {
// Additional reloc infos are printed on separate lines.
outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
for (int p = 0; p < kRelocInfoPosition; p++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
}
}
if (is_position(relocinfo.rmode())) {
outp += OS::SNPrintF(out_buffer + outp,
sob - outp,
" ;; debug: statement %d",
relocinfo.data());
} else if (relocinfo.rmode() == embedded_object) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
relocinfo.target_object()->ShortPrint(&accumulator);
SmartPointer<char> obj_name = accumulator.ToCString();
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; object: %s",
*obj_name);
} else if (relocinfo.rmode() == external_reference) {
const char* reference_name =
ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; external reference (%s)",
reference_name);
} else if (relocinfo.rmode() == code_target) {
outp +=
OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; code target (%s)",
converter.NameOfAddress(relocinfo.target_address()));
} else {
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; %s%s",
#if defined(DEBUG)
RelocInfo::RelocModeName(relocinfo.rmode()),
#else
"reloc_info",
#endif
"");
}
}
outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
if (outp > 0) {
ASSERT(outp < kOutBufferSize);
DumpBuffer(f, out_buffer);
outp = 0;
}
}
delete it;
return pc - begin;
}
int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
V8NameConverter defaultConverter(NULL);
return DecodeIt(f, defaultConverter, begin, end);
}
void Disassembler::Decode(FILE* f, Code* code) {
byte* begin = Code::cast(code)->instruction_start();
byte* end = begin + Code::cast(code)->instruction_size();
V8NameConverter v8NameConverter(code);
DecodeIt(f, v8NameConverter, begin, end);
}
#else // ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
void Disassembler::Decode(FILE* f, Code* code) {}
#endif // ENABLE_DISASSEMBLER
} } // namespace v8::internal

View File

@ -54,6 +54,7 @@ class V8NameConverter: public disasm::NameConverter {
public:
explicit V8NameConverter(Code* code) : code_(code) {}
virtual const char* NameOfAddress(byte* pc) const;
virtual const char* NameInCode(byte* addr) const;
Code* code() const { return code_; }
private:
Code* code_;
@ -82,12 +83,22 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
}
static void DumpBuffer(FILE* f, char* buff) {
if (f == NULL) PrintF("%s", buff);
else fprintf(f, "%s", buff);
const char* V8NameConverter::NameInCode(byte* addr) const {
// The V8NameConverter is used for well known code, so we can "safely"
// dereference pointers in generated code.
return (code_ != NULL) ? reinterpret_cast<const char*>(addr) : "";
}
static const int kOutBufferSize = 1024;
static void DumpBuffer(FILE* f, char* buff) {
if (f == NULL) {
PrintF("%s", buff);
} else {
fprintf(f, "%s", buff);
}
}
static const int kOutBufferSize = 256 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
static int DecodeIt(FILE* f,
@ -100,7 +111,6 @@ static int DecodeIt(FILE* f,
char decode_buffer[128];
char out_buffer[kOutBufferSize];
const int sob = sizeof out_buffer;
byte* pc = begin;
disasm::Disassembler d(converter);
RelocIterator* it = NULL;
@ -109,12 +119,32 @@ static int DecodeIt(FILE* f,
} else {
// No relocation information when printing code stubs.
}
int constants = -1; // no constants being decoded at the start
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
if (constants > 0) {
OS::SNPrintF(decode_buffer,
sizeof(decode_buffer),
"%08x constant",
*reinterpret_cast<int32_t*>(pc));
constants--;
pc += 4;
} else {
int num_const = d.ConstantPoolSizeAt(pc);
if (num_const >= 0) {
OS::SNPrintF(decode_buffer,
sizeof(decode_buffer),
"%08x constant pool begin",
*reinterpret_cast<int32_t*>(pc));
constants = num_const;
pc += 4;
} else {
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, sizeof decode_buffer, pc);
}
}
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
List<const char*> comments(4);
@ -136,32 +166,22 @@ static int DecodeIt(FILE* f,
}
}
int outp = 0; // pointer into out_buffer, implements append operation.
StringBuilder out(out_buffer, sizeof(out_buffer));
// Comments.
for (int i = 0; i < comments.length(); i++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" %s\n", comments[i]);
out.AddFormatted(" %s\n", comments[i]);
}
// Write out comments, resets outp so that we can format the next line.
if (outp > 0) {
DumpBuffer(f, out_buffer);
outp = 0;
}
DumpBuffer(f, out.Finalize());
out.Reset();
// Instruction address and instruction offset.
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
"%p %4d ", prev_pc, prev_pc - begin);
out.AddFormatted("%p %4d ", prev_pc, prev_pc - begin);
// Instruction bytes.
for (byte* bp = prev_pc; bp < pc; bp++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, "%02x", *bp);
}
for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
}
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " %s", decode_buffer);
// Instruction.
out.AddFormatted("%s", decode_buffer);
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
@ -171,40 +191,28 @@ static int DecodeIt(FILE* f,
// Indent the printing of the reloc info.
if (i == 0) {
// The first reloc info is printed after the disassembled instruction.
for (int p = outp; p < kRelocInfoPosition; p++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
}
out.AddPadding(' ', kRelocInfoPosition - out.position());
} else {
// Additional reloc infos are printed on separate lines.
outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
for (int p = 0; p < kRelocInfoPosition; p++) {
outp += OS::SNPrintF(out_buffer + outp, sob - outp, " ");
}
out.AddFormatted("\n");
out.AddPadding(' ', kRelocInfoPosition);
}
if (is_position(relocinfo.rmode())) {
outp += OS::SNPrintF(out_buffer + outp,
sob - outp,
" ;; debug: statement %d",
relocinfo.data());
out.AddFormatted(" ;; debug: statement %d", relocinfo.data());
} else if (relocinfo.rmode() == embedded_object) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
relocinfo.target_object()->ShortPrint(&accumulator);
SmartPointer<char> obj_name = accumulator.ToCString();
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; object: %s",
*obj_name);
out.AddFormatted(" ;; object: %s", *obj_name);
} else if (relocinfo.rmode() == external_reference) {
const char* reference_name =
ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; external reference (%s)",
reference_name);
out.AddFormatted(" ;; external reference (%s)", reference_name);
} else {
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" ;; %s",
RelocInfo::RelocModeName(relocinfo.rmode()));
out.AddFormatted(" ;; %s",
RelocInfo::RelocModeName(relocinfo.rmode()));
if (is_code_target(relocinfo.rmode())) {
Code* code = Debug::GetCodeTarget(relocinfo.target_address());
Code::Kind kind = code->kind();
@ -219,45 +227,32 @@ static int DecodeIt(FILE* f,
CodeStub::Major major_key = code->major_key();
uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" (%s, %s, ",
Code::Kind2String(kind),
CodeStub::MajorName(code->major_key()));
out.AddFormatted(" (%s, %s, ",
Code::Kind2String(kind),
CodeStub::MajorName(code->major_key()));
switch (code->major_key()) {
case CodeStub::CallFunction:
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
"argc = %d)",
minor_key);
out.AddFormatted("argc = %d)", minor_key);
break;
case CodeStub::Runtime: {
Runtime::FunctionId id =
static_cast<Runtime::FunctionId>(minor_key);
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
"%s)",
Runtime::FunctionForId(id)->name);
out.AddFormatted("%s)", Runtime::FunctionForId(id)->name);
break;
}
default:
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
"minor: %d)",
minor_key);
out.AddFormatted("minor: %d)", minor_key);
}
}
} else {
outp += OS::SNPrintF(out_buffer + outp, sob - outp,
" (%s)",
Code::Kind2String(kind));
out.AddFormatted(" (%s)", Code::Kind2String(kind));
}
}
}
}
outp += OS::SNPrintF(out_buffer + outp, sob - outp, "\n");
if (outp > 0) {
ASSERT(outp < kOutBufferSize);
DumpBuffer(f, out_buffer);
outp = 0;
}
out.AddString("\n");
DumpBuffer(f, out.Finalize());
out.Reset();
}
delete it;
@ -271,13 +266,14 @@ int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
}
// Called by Code::CodePrint
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
byte* begin = Code::cast(code)->instruction_start();
byte* end = begin + Code::cast(code)->instruction_size();
V8NameConverter v8NameConverter(code);
DecodeIt(f, v8NameConverter, begin, end);
}
#else // ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}

View File

@ -199,9 +199,12 @@ StackGuard::StackGuard() {
ASSERT(thread_local_.climit_ == kIllegalLimit);
thread_local_.initial_jslimit_ = thread_local_.jslimit_ =
GENERATED_CODE_STACK_LIMIT(kLimitSize);
GENERATED_CODE_STACK_LIMIT(kLimitSize);
// NOTE: The check for overflow is not safe as there is no guarentee that
// the running thread has its stack in all memory up to address 0x00000000.
thread_local_.initial_climit_ = thread_local_.climit_ =
reinterpret_cast<uintptr_t>(this) - kLimitSize;
reinterpret_cast<uintptr_t>(this) >= kLimitSize ?
reinterpret_cast<uintptr_t>(this) - kLimitSize : 0;
if (thread_local_.interrupt_flags_ != 0) {
set_limits(kInterruptLimit, access);
@ -271,9 +274,7 @@ bool StackGuard::IsInterrupted() {
void StackGuard::Interrupt() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= INTERRUPT;
if (!Top::is_break_no_lock()) {
set_limits(kInterruptLimit, access);
}
set_limits(kInterruptLimit, access);
}
@ -286,9 +287,7 @@ bool StackGuard::IsPreempted() {
void StackGuard::Preempt() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= PREEMPT;
if (!Top::is_break_no_lock()) {
set_limits(kInterruptLimit, access);
}
set_limits(kInterruptLimit, access);
}
@ -300,10 +299,8 @@ bool StackGuard::IsDebugBreak() {
void StackGuard::DebugBreak() {
ExecutionAccess access;
if (!Top::is_break_no_lock()) {
thread_local_.interrupt_flags_ |= DEBUGBREAK;
set_limits(kInterruptLimit, access);
}
thread_local_.interrupt_flags_ |= DEBUGBREAK;
set_limits(kInterruptLimit, access);
}

View File

@ -188,7 +188,7 @@ class StackGuard BASE_EMBEDDED {
static void EnableInterrupts();
static void DisableInterrupts();
static const int kLimitSize = 512 * KB;
static const uintptr_t kLimitSize = 512 * KB;
static const uintptr_t kInterruptLimit = 0xfffffffe;
static const uintptr_t kIllegalLimit = 0xffffffff;

View File

@ -76,19 +76,7 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
void ExitFrame::Iterate(ObjectVisitor* v) const {
// Traverse pointers in the callee-saved registers.
const int offset = ExitFrameConstants::kSavedRegistersOffset;
Object** base = &Memory::Object_at(fp() + offset);
Object** limit = base + kNumJSCalleeSaved;
v->VisitPointers(base, limit);
}
void ExitFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
// The callee-saved registers in an exit frame are pointed to by the
// frame pointer. See the implementations of C entry runtime stubs.
const int offset = ExitFrameConstants::kSavedRegistersOffset;
memcpy(buffer, fp() + offset, kNumJSCalleeSaved * kPointerSize);
// Do nothing
}
@ -119,58 +107,6 @@ Address InternalFrame::GetCallerStackPointer() const {
}
RegList JavaScriptFrame::FindCalleeSavedRegisters() const {
const unsigned kRegListTag = 1; // pc values have bit 0 cleared (no thumb)
const unsigned kRegListTagSize = 1;
const unsigned kRegListTagMask = (1 << kRegListTagSize) - 1;
// The prologue pc (or the cached register list) is available as a
// slot in the fixed part of the stack frame.
const int offset = +4 * kPointerSize;
// Once the register list has been calculated for a frame, it is
// cached in the prologue pc stack slot. Check the cache before
// doing the more expensive instruction decoding.
uint32_t cache = Memory::int_at(fp() + offset);
if ((cache & kRegListTagMask) == kRegListTag) {
return static_cast<RegList>(cache >> kRegListTagSize);
}
// If we can't find the register list in the instruction stream, we
// assume it's the empty list. [NOTE: Is this really a smart thing
// to do? Don't all JavaScript frames have the instruction?]
RegList result = 0;
// Compute the address of the stm (store multiple) instruction.
Address stm_address = AddressFrom<Address>(cache - PcStoreOffset());
ASSERT((Memory::int32_at(stm_address) & 0xffffcc00) == 0xe92dcc00);
// Fetch the instruction preceeding the stm - if it is also a stm
// instruction we read the register list from there.
uint32_t instruction = Memory::int32_at(stm_address - 4);
if ((instruction & 0xfffffc00) == 0xe92d0000) {
// The register list shouldn't be empty and must consist only of JS
// callee-saved registers.
result = instruction & 0xffff;
ASSERT(result != 0 && (result & ~kJSCalleeSaved) == 0);
}
// Cache the result in the prologue pc stack slot before returning
// it. This way future access to the register list is a bit faster.
Memory::int_at(fp() + offset) = (result << kRegListTagSize) | kRegListTag;
return result;
}
void JavaScriptFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
// The callee-saved registers in java script frames are in the fixed
// part of the frame below the frame pointer.
const int n = NumRegs(FindCalleeSavedRegisters());
const int offset = 5 * kPointerSize;
memcpy(buffer, fp() + offset, n * kPointerSize);
}
Code* JavaScriptFrame::FindCode() const {
const int offset = StandardFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp() + offset);

View File

@ -58,27 +58,19 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
int JSCallerSavedCode(int n);
// Callee-saved registers available for variable allocation in JavaScript code
static const RegList kJSCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4
kR9Available << 9 ; // r9 v6
static const int kNumJSCalleeSaved = 4 + kR9Available;
typedef Object* JSCalleeSavedBuffer[kNumJSCalleeSaved];
// Callee-saved registers preserved when switching from C to JavaScript
static const RegList kCalleeSaved = kJSCalleeSaved |
static const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available
<< 9 | // r9 v6
1 << 10 | // r10 v7 (pp in JavaScript code)
1 << 11 ; // r11 v8 (fp in JavaScript code)
static const int kNumCalleeSaved = kNumJSCalleeSaved + 3;
static const int kNumCalleeSaved = 7 + kR9Available;
// ----------------------------------------------------
@ -117,14 +109,13 @@ class ExitFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = 0 * kPointerSize;
// Let the parameters pointer for exit frames point just below the
// frame structure on the stack (includes callee saved registers).
static const int kPPDisplacement = (4 + kNumJSCalleeSaved) * kPointerSize;
// frame structure on the stack.
static const int kPPDisplacement = 4 * kPointerSize;
// The frame pointer for exit frames points to the JavaScript callee
// saved registers. The caller fields are below those on the stack.
static const int kCallerPPOffset = (0 + kNumJSCalleeSaved) * kPointerSize;
static const int kCallerFPOffset = (1 + kNumJSCalleeSaved) * kPointerSize;
static const int kCallerPCOffset = (3 + kNumJSCalleeSaved) * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerPPOffset = +0 * kPointerSize;
static const int kCallerFPOffset = +1 * kPointerSize;
static const int kCallerPCOffset = +3 * kPointerSize;
};
@ -150,8 +141,11 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kArgsLengthOffset = -1 * kPointerSize;
// 0 * kPointerSize : StandardFrameConstants::kCallerPPOffset
// 1 * kPointersize : StandardFrameConstents::kCallerFPOffset
static const int kSPOnExitOffset = +2 * kPointerSize;
static const int kSavedRegistersOffset = +5 * kPointerSize;
// 3 * kPointerSize : StandardFrameConstants::kCallerPCOffset
static const int kSavedRegistersOffset = +4 * kPointerSize;
// PP-relative.
static const int kParam0Offset = -2 * kPointerSize;
@ -177,12 +171,6 @@ inline Object* JavaScriptFrame::function() const {
}
inline Object** StackFrameIterator::register_buffer() const {
static Object* buffer[kNumJSCalleeSaved];
return buffer;
}
// ----------------------------------------------------
@ -198,6 +186,7 @@ inline Object** StackFrameIterator::register_buffer() const {
// ----------- +=============+ <--- sp (stack pointer)
// | function |
// +-------------+
// +-------------+
// | |
// | expressions |
// | |
@ -221,13 +210,6 @@ inline Object** StackFrameIterator::register_buffer() const {
// m 2 | sp_on_exit | (pp if return, caller_sp if no return)
// e +-------------+
// 3 | caller_pc |
// +-------------+
// 4 | prolog_pc | (used to find list of callee-saved regs)
// +-------------+
// 5 | |
// |callee-saved | (only saved if clobbered by this function,
// | regs | must be traversed during GC)
// | |
// +-------------+ <--- caller_sp (incl. parameters)
// | |
// | parameters |
@ -374,16 +356,6 @@ inline Object** StackFrameIterator::register_buffer() const {
// | parameters | (first 4 args are passed in r0-r3)
// | |
// +-------------+ <--- fp (frame pointer)
// C 0 | r4 | r4-r7, r9 are potentially holding JS locals
// +-------------+
// 1 | r5 | and must be traversed by the GC for proper
// e +-------------+
// n 2 | r6 | relocation
// t +-------------+
// r 3 | r7 |
// y +-------------+
// [ 4 | r9 | ] only if r9 available
// +-------------+
// f 4/5 | caller_fp |
// r +-------------+
// a 5/6 | sp_on_exit | (pp)

View File

@ -71,11 +71,6 @@ void ExitFrame::Iterate(ObjectVisitor* v) const {
}
void ExitFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
// Do nothing.
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
@ -117,16 +112,6 @@ Address InternalFrame::GetCallerStackPointer() const {
}
RegList JavaScriptFrame::FindCalleeSavedRegisters() const {
return 0;
}
void JavaScriptFrame::RestoreCalleeSavedRegisters(Object* buffer[]) const {
// Do nothing.
}
Code* JavaScriptFrame::FindCode() const {
JSFunction* function = JSFunction::cast(this->function());
return function->shared()->code();

View File

@ -48,12 +48,6 @@ static const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Callee-saved registers available for variable allocation in JavaScript code
static const RegList kJSCalleeSaved = 0;
static const int kNumJSCalleeSaved = 0;
// ----------------------------------------------------
@ -143,11 +137,6 @@ inline Object* JavaScriptFrame::function() const {
}
Object** StackFrameIterator::register_buffer() const {
ASSERT(kNumJSCalleeSaved == 0);
return NULL;
}
// ----------------------------------------------------

View File

@ -103,11 +103,6 @@ inline StackHandler* StackFrame::top_handler() const {
}
inline Object** StackFrame::top_register_buffer() const {
return iterator_->register_buffer();
}
inline Object* StandardFrame::GetExpression(int index) const {
return Memory::Object_at(GetExpressionAddress(index));
}

View File

@ -73,12 +73,12 @@ class StackHandlerIterator BASE_EMBEDDED {
#define INITIALIZE_SINGLETON(type, field) field##_(this),
StackFrameIterator::StackFrameIterator()
: STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL), thread(Top::GetCurrentThread()) {
frame_(NULL), handler_(NULL), thread_(Top::GetCurrentThread()) {
Reset();
}
StackFrameIterator::StackFrameIterator(ThreadLocalTop* t)
: STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
frame_(NULL), handler_(NULL), thread(t) {
frame_(NULL), handler_(NULL), thread_(t) {
Reset();
}
#undef INITIALIZE_SINGLETON
@ -93,13 +93,6 @@ void StackFrameIterator::Advance() {
StackFrame::State state;
StackFrame::Type type = frame_->GetCallerState(&state);
// Restore any callee-saved registers to the register buffer. Avoid
// the virtual call if the platform doesn't have any callee-saved
// registers.
if (kNumJSCalleeSaved > 0) {
frame_->RestoreCalleeSavedRegisters(register_buffer());
}
// Unwind handlers corresponding to the current frame.
StackHandlerIterator it(frame_, handler_);
while (!it.done()) it.Advance();
@ -115,33 +108,11 @@ void StackFrameIterator::Advance() {
void StackFrameIterator::Reset() {
Address fp = Top::c_entry_fp(thread);
Address fp = Top::c_entry_fp(thread_);
StackFrame::State state;
StackFrame::Type type = ExitFrame::GetStateForFramePointer(fp, &state);
frame_ = SingletonFor(type, &state);
handler_ = StackHandler::FromAddress(Top::handler(thread));
// Zap the register buffer in debug mode.
if (kDebug) {
Object** buffer = register_buffer();
for (int i = 0; i < kNumJSCalleeSaved; i++) {
buffer[i] = reinterpret_cast<Object*>(kZapValue);
}
}
}
Object** StackFrameIterator::RestoreCalleeSavedForTopHandler(Object** buffer) {
ASSERT(kNumJSCalleeSaved > 0);
// Traverse the frames until we find the frame containing the top
// handler. Such a frame is guaranteed to always exists by the
// callers of this function.
for (StackFrameIterator it; true; it.Advance()) {
StackHandlerIterator handlers(it.frame(), it.handler());
if (!handlers.done()) {
memcpy(buffer, it.register_buffer(), kNumJSCalleeSaved * kPointerSize);
return buffer;
}
}
handler_ = StackHandler::FromAddress(Top::handler(thread_));
}
@ -302,20 +273,9 @@ Code* ExitDebugFrame::FindCode() const {
}
RegList ExitFrame::FindCalleeSavedRegisters() const {
// Exit frames save all - if any - callee-saved registers.
return kJSCalleeSaved;
}
Address StandardFrame::GetExpressionAddress(int n) const {
ASSERT(0 <= n && n < ComputeExpressionsCount());
if (kNumJSCalleeSaved > 0 && n < kNumJSCalleeSaved) {
return reinterpret_cast<Address>(top_register_buffer() + n);
} else {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - (n - kNumJSCalleeSaved) * kPointerSize;
}
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize;
}
@ -326,7 +286,7 @@ int StandardFrame::ComputeExpressionsCount() const {
Address limit = sp();
ASSERT(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
return (base - limit) / kPointerSize + kNumJSCalleeSaved;
return (base - limit) / kPointerSize;
}
@ -360,12 +320,7 @@ Object* JavaScriptFrame::GetParameter(int index) const {
int JavaScriptFrame::ComputeParametersCount() const {
Address base = pp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
int result = (base - limit) / kPointerSize;
if (kNumJSCalleeSaved > 0) {
return result - NumRegs(FindCalleeSavedRegisters());
} else {
return result;
}
return (base - limit) / kPointerSize;
}
@ -492,7 +447,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
}
// Print the expression stack.
int expressions_start = Max(stack_locals_count, kNumJSCalleeSaved);
int expressions_start = stack_locals_count;
if (expressions_start < expressions_count) {
accumulator->Add(" // expression stack (top to bottom)\n");
}
@ -643,36 +598,4 @@ int JSCallerSavedCode(int n) {
}
int JSCalleeSavedCode(int n) {
static int reg_code[kNumJSCalleeSaved + 1]; // avoid zero-size array error
static bool initialized = false;
if (!initialized) {
initialized = true;
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCalleeSaved & (1 << r)) != 0)
reg_code[i++] = r;
ASSERT(i == kNumJSCalleeSaved);
}
ASSERT(0 <= n && n < kNumJSCalleeSaved);
return reg_code[n];
}
RegList JSCalleeSavedList(int n) {
// avoid zero-size array error
static RegList reg_list[kNumJSCalleeSaved + 1];
static bool initialized = false;
if (!initialized) {
initialized = true;
reg_list[0] = 0;
for (int i = 0; i < kNumJSCalleeSaved; i++)
reg_list[i+1] = reg_list[i] + (1 << JSCalleeSavedCode(i));
}
ASSERT(0 <= n && n <= kNumJSCalleeSaved);
return reg_list[n];
}
} } // namespace v8::internal

View File

@ -37,11 +37,6 @@ int NumRegs(RegList list);
// Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n);
int JSCalleeSavedCode(int n);
// Return the list of the first n callee-saved registers available to
// JavaScript.
RegList JSCalleeSavedList(int n);
// Forward declarations.
@ -186,15 +181,8 @@ class StackFrame BASE_EMBEDDED {
PrintMode mode,
int index);
// Find callee-saved registers for this frame.
virtual RegList FindCalleeSavedRegisters() const { return 0; }
// Restore state of callee-saved registers to the provided buffer.
virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const { }
// Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const;
inline Object** top_register_buffer() const;
// Compute the stack frame type for the given state.
static Type ComputeType(State* state);
@ -297,9 +285,6 @@ class ExitFrame: public StackFrame {
virtual Address GetCallerStackPointer() const;
virtual RegList FindCalleeSavedRegisters() const;
virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const;
private:
virtual Type GetCallerState(State* state) const;
@ -438,14 +423,6 @@ class JavaScriptFrame: public StandardFrame {
virtual Address GetCallerStackPointer() const;
// Find the callee-saved registers for this JavaScript frame. This
// may require traversing the instruction stream and decoding
// certain instructions.
virtual RegList FindCalleeSavedRegisters() const;
// Restore callee-saved registers.
virtual void RestoreCalleeSavedRegisters(Object* buffer[]) const;
private:
friend class StackFrameIterator;
};
@ -539,18 +516,13 @@ class StackFrameIterator BASE_EMBEDDED {
// Go back to the first frame.
void Reset();
// Computes the state of the callee-saved registers for the top
// stack handler structure. Used for restoring register state when
// unwinding due to thrown exceptions.
static Object** RestoreCalleeSavedForTopHandler(Object** buffer);
private:
#define DECLARE_SINGLETON(ignore, type) type type##_;
STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
#undef DECLARE_SINGLETON
StackFrame* frame_;
StackHandler* handler_;
ThreadLocalTop* thread;
ThreadLocalTop* thread_;
StackHandler* handler() const {
ASSERT(!done());
@ -560,11 +532,6 @@ class StackFrameIterator BASE_EMBEDDED {
// Get the type-specific frame singleton in a given state.
StackFrame* SingletonFor(StackFrame::Type type, StackFrame::State* state);
// The register buffer contains the state of callee-saved registers
// for the current frame. It is computed as the stack frame
// iterators advances through stack frames.
inline Object** register_buffer() const;
friend class StackFrame;
DISALLOW_EVIL_CONSTRUCTORS(StackFrameIterator);
};

View File

@ -132,21 +132,6 @@ const int kBitsPerByteLog2 = 3;
const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte;
// Bits used by the mark-compact collector, PLEASE READ.
//
// The first word of a heap object is a map pointer. The last two bits are
// tagged as '01' (kHeapObjectTag). We reuse the last two bits to mark an
// object as live and/or overflowed:
// last bit = 0, marked as alive
// second bit = 1, overflowed
// An object is only marked as overflowed when it is marked as live while
// the marking stack is overflowed.
const int kMarkingBit = 0; // marking bit
const int kMarkingMask = (1 << kMarkingBit); // marking mask
const int kOverflowBit = 1; // overflow bit
const int kOverflowMask = (1 << kOverflowBit); // overflow mask
// Zap-value: The value used for zapping dead objects. Should be a recognizable
// illegal heap object pointer.
@ -227,13 +212,14 @@ typedef bool (*WeakSlotCallback)(Object** pointer);
// Miscellaneous
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive and that NEW_SPACE is the first.
// consecutive.
enum AllocationSpace {
NEW_SPACE,
OLD_SPACE,
CODE_SPACE,
MAP_SPACE,
LO_SPACE,
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE
};
const int kSpaceTagSize = 3;

View File

@ -75,6 +75,32 @@ Object* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space) {
}
Object* Heap::AllocateForDeserialization(int size_in_bytes,
AllocationSpace space) {
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
PagedSpace* where;
switch (space) {
case NEW_SPACE:
return new_space_->AllocateRaw(size_in_bytes);
case LO_SPACE:
return lo_space_->AllocateRaw(size_in_bytes);
case OLD_SPACE:
where = old_space_;
break;
case CODE_SPACE:
where = code_space_;
break;
case MAP_SPACE:
where = map_space_;
break;
}
// Only paged spaces fall through.
return where->AllocateForDeserialization(size_in_bytes);
}
Object* Heap::NumberFromInt32(int32_t value) {
if (Smi::IsValid(value)) return Smi::FromInt(value);
// Bypass NumberFromDouble to avoid various redundant checks.
@ -142,6 +168,21 @@ Object* Heap::AllocatePropertyStorageForMap(Map* map) {
}
AllocationSpace Heap::TargetSpace(HeapObject* object) {
// Heap numbers and sequential strings are promoted to code space, all
// other object types are promoted to old space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
InstanceType type = object->map()->instance_type();
ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
bool has_pointers =
type != HEAP_NUMBER_TYPE &&
(type >= FIRST_NONSTRING_TYPE ||
String::cast(object)->representation_tag() != kSeqStringTag);
return has_pointers ? OLD_SPACE : CODE_SPACE;
}
#define GC_GREEDY_CHECK() \
ASSERT(!FLAG_gc_greedy \
|| v8::internal::Heap::disallow_allocation_failure() \

View File

@ -91,6 +91,9 @@ LargeObjectSpace* Heap::lo_space_ = NULL;
int Heap::promoted_space_limit_ = 0;
int Heap::old_gen_exhausted_ = false;
int Heap::amount_of_external_allocated_memory_ = 0;
int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
int Heap::semispace_size_ = 1*MB;
@ -109,11 +112,12 @@ int Heap::new_space_growth_limit_ = 8;
int Heap::scavenge_count_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
#ifdef DEBUG
bool Heap::allocation_allowed_ = true;
int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0;
#ifdef DEBUG
bool Heap::allocation_allowed_ = true;
int Heap::allocation_timeout_ = 0;
bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG
@ -156,7 +160,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
}
// Is enough data promoted to justify a global GC?
if (PromotedSpaceSize() > promoted_space_limit_) {
if (PromotedSpaceSize() + PromotedExternalMemorySize()
> promoted_space_limit_) {
Counters::gc_compactor_caused_by_promoted_data.Increment();
return MARK_COMPACTOR;
}
@ -239,10 +244,10 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
RegExpImpl::NewSpaceCollectionPrologue();
gc_count_++;
#ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
allow_allocation(false);
gc_count_++;
if (FLAG_verify_heap) {
Verify();
@ -298,57 +303,6 @@ void Heap::GarbageCollectionEpilogue() {
}
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
class GCTracer BASE_EMBEDDED {
public:
GCTracer() : start_time_(0.0), start_size_(0.0) {
if (!FLAG_trace_gc) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = SizeOfHeapObjects();
}
~GCTracer() {
if (!FLAG_trace_gc) return;
// Printf ONE line iff flag is set.
PrintF("%s %.1f -> %.1f MB, %d ms.\n",
CollectorString(),
start_size_, SizeOfHeapObjects(),
static_cast<int>(OS::TimeCurrentMillis() - start_time_));
}
// Sets the collector.
void set_collector(GarbageCollector collector) {
collector_ = collector;
}
private:
// Returns a string matching the collector.
const char* CollectorString() {
switch (collector_) {
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
return MarkCompactCollector::HasCompacted() ? "Mark-compact"
: "Mark-sweep";
}
return "Unknown GC";
}
// Returns size of object in heap (in MB).
double SizeOfHeapObjects() {
return (static_cast<double>(Heap::SizeOfObjects())) / MB;
}
double start_time_; // Timestamp set in the constructor.
double start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
};
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@ -364,15 +318,19 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
{ GCTracer tracer;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
tracer.set_gc_count(gc_count_);
GarbageCollector collector = SelectGarbageCollector(space);
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
StatsRate* rate = (collector == SCAVENGER)
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
PerformGarbageCollection(space, collector);
PerformGarbageCollection(space, collector, &tracer);
rate->Stop();
GarbageCollectionEpilogue();
@ -399,15 +357,22 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
}
void Heap::PerformScavenge() {
GCTracer tracer;
PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
}
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector) {
GarbageCollector collector,
GCTracer* tracer) {
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
global_gc_prologue_callback_();
}
if (collector == MARK_COMPACTOR) {
MarkCompact();
MarkCompact(tracer);
int promoted_space_size = PromotedSpaceSize();
promoted_space_limit_ =
@ -434,6 +399,12 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
// Process weak handles post gc.
GlobalHandles::PostGarbageCollectionProcessing();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
}
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_);
global_gc_epilogue_callback_();
@ -441,16 +412,15 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
}
void Heap::MarkCompact() {
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
#ifdef DEBUG
mc_count_++;
#endif
tracer->set_full_gc_count(mc_count_);
LOG(ResourceEvent("markcompact", "begin"));
MarkCompactPrologue();
MarkCompactCollector::CollectGarbage();
MarkCompactCollector::CollectGarbage(tracer);
MarkCompactEpilogue();
@ -482,6 +452,7 @@ Object* Heap::FindCodeObject(Address a) {
if (obj->IsFailure()) {
obj = lo_space_->FindObject(a);
}
ASSERT(!obj->IsFailure());
return obj;
}
@ -777,9 +748,8 @@ HeapObject* Heap::MigrateObject(HeapObject** source_p,
*dst++ = *src++;
} while (counter-- > 0);
// Set forwarding pointers, cannot use Map::cast because it asserts
// the value type to be Map.
(*source_p)->set_map(reinterpret_cast<Map*>(target));
// Set the forwarding address.
(*source_p)->set_map_word(MapWord::FromForwardingAddress(target));
// Update NewSpace stats if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@ -795,24 +765,23 @@ void Heap::CopyObject(HeapObject** p) {
HeapObject* object = *p;
// We use the first word (where the map pointer usually is) of a
// HeapObject to record the forwarding pointer. A forwarding pointer can
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
// point to the old space, the code space, or the to space of the new
// generation.
HeapObject* first_word = object->map();
MapWord first_word = object->map_word();
// If the first word (where the map pointer is) is not a map pointer, the
// object has already been copied. We do not use first_word->IsMap()
// because we know that first_word always has the heap object tag.
if (first_word->map()->instance_type() != MAP_TYPE) {
*p = first_word;
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
*p = first_word.ToForwardingAddress();
return;
}
// Optimization: Bypass ConsString objects where the right-hand side is
// Heap::empty_string(). We do not use object->IsConsString because we
// already know that object has the heap object tag.
InstanceType type = Map::cast(first_word)->instance_type();
InstanceType type = first_word.ToMap()->instance_type();
if (type < FIRST_NONSTRING_TYPE &&
String::cast(object)->representation_tag() == kConsStringTag &&
ConsString::cast(object)->second() == Heap::empty_string()) {
@ -821,35 +790,29 @@ void Heap::CopyObject(HeapObject** p) {
// After patching *p we have to repeat the checks that object is in the
// active semispace of the young generation and not already copied.
if (!InFromSpace(object)) return;
first_word = object->map();
if (first_word->map()->instance_type() != MAP_TYPE) {
*p = first_word;
first_word = object->map_word();
if (first_word.IsForwardingAddress()) {
*p = first_word.ToForwardingAddress();
return;
}
type = Map::cast(first_word)->instance_type();
type = first_word.ToMap()->instance_type();
}
int object_size = object->SizeFromMap(Map::cast(first_word));
int object_size = object->SizeFromMap(first_word.ToMap());
Object* result;
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
// Heap numbers and sequential strings are promoted to code space, all
// other object types are promoted to old space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
bool has_pointers =
type != HEAP_NUMBER_TYPE &&
(type >= FIRST_NONSTRING_TYPE ||
String::cast(object)->representation_tag() != kSeqStringTag);
if (has_pointers) {
AllocationSpace target_space = Heap::TargetSpace(object);
if (target_space == OLD_SPACE) {
result = old_space_->AllocateRaw(object_size);
} else {
ASSERT(target_space == CODE_SPACE);
result = code_space_->AllocateRaw(object_size);
}
if (!result->IsFailure()) {
*p = MigrateObject(p, HeapObject::cast(result), object_size);
if (has_pointers) {
if (target_space == OLD_SPACE) {
// Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger.
promoted_top -= kPointerSize;
@ -2469,6 +2432,11 @@ bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
}
bool Heap::ConfigureHeapDefault() {
return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
}
int Heap::PromotedSpaceSize() {
return old_space_->Size()
+ code_space_->Size()
@ -2477,6 +2445,14 @@ int Heap::PromotedSpaceSize() {
}
int Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_
- amount_of_external_allocated_memory_at_last_global_gc_;
}
bool Heap::Setup(bool create_heap_objects) {
// Initialize heap spaces and initial maps and objects. Whenever something
// goes wrong, just return false. The caller should check the results and
@ -2487,7 +2463,7 @@ bool Heap::Setup(bool create_heap_objects) {
// size) and old-space-size if set or the initial values of semispace_size_
// and old_generation_size_ otherwise.
if (!heap_configured) {
if (!ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size)) return false;
if (!ConfigureHeapDefault()) return false;
}
// Setup memory allocator and allocate an initial chunk of memory. The
@ -2509,31 +2485,35 @@ bool Heap::Setup(bool create_heap_objects) {
int old_space_size = new_space_start - old_space_start;
int code_space_size = young_generation_size_ - old_space_size;
// Initialize new space.
new_space_ = new NewSpace(initial_semispace_size_, semispace_size_);
// Initialize new space. It will not contain code.
new_space_ = new NewSpace(initial_semispace_size_,
semispace_size_,
NEW_SPACE,
false);
if (new_space_ == NULL) return false;
if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
// Initialize old space, set the maximum capacity to the old generation
// size.
old_space_ = new OldSpace(old_generation_size_, OLD_SPACE);
// size. It will not contain code.
old_space_ = new OldSpace(old_generation_size_, OLD_SPACE, false);
if (old_space_ == NULL) return false;
if (!old_space_->Setup(old_space_start, old_space_size)) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size.
code_space_ = new OldSpace(old_generation_size_, CODE_SPACE);
// generation size. It needs executable memory.
code_space_ = new OldSpace(old_generation_size_, CODE_SPACE, true);
if (code_space_ == NULL) return false;
if (!code_space_->Setup(code_space_start, code_space_size)) return false;
// Initialize map space.
map_space_ = new MapSpace(kMaxMapSpaceSize);
map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
if (map_space_ == NULL) return false;
// Setting up a paged space without giving it a virtual memory range big
// enough to hold at least a page will cause it to allocate.
if (!map_space_->Setup(NULL, 0)) return false;
lo_space_ = new LargeObjectSpace();
// The large object space may contain code, so it needs executable memory.
lo_space_ = new LargeObjectSpace(LO_SPACE, true);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
@ -2617,6 +2597,66 @@ void Heap::PrintHandles() {
#endif
SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
}
SpaceIterator::~SpaceIterator() {
// Delete active iterator if any.
delete iterator_;
}
bool SpaceIterator::has_next() {
// Iterate until no more spaces.
return current_space_ != LAST_SPACE;
}
ObjectIterator* SpaceIterator::next() {
if (iterator_ != NULL) {
delete iterator_;
iterator_ = NULL;
// Move to the next space
current_space_++;
if (current_space_ > LAST_SPACE) {
return NULL;
}
}
// Return iterator for the new current space.
return CreateIterator();
}
// Create an iterator for the space to iterate.
ObjectIterator* SpaceIterator::CreateIterator() {
ASSERT(iterator_ == NULL);
switch (current_space_) {
case NEW_SPACE:
iterator_ = new SemiSpaceIterator(Heap::new_space());
break;
case OLD_SPACE:
iterator_ = new HeapObjectIterator(Heap::old_space());
break;
case CODE_SPACE:
iterator_ = new HeapObjectIterator(Heap::code_space());
break;
case MAP_SPACE:
iterator_ = new HeapObjectIterator(Heap::map_space());
break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(Heap::lo_space());
break;
}
// Return the newly allocated iterator;
ASSERT(iterator_ != NULL);
return iterator_;
}
HeapIterator::HeapIterator() {
Init();
}
@ -2907,4 +2947,43 @@ void Heap::TracePathToGlobal() {
#endif
GCTracer::GCTracer()
: start_time_(0.0),
start_size_(0.0),
gc_count_(0),
full_gc_count_(0),
is_compacting_(false),
marked_count_(0) {
// These two fields reflect the state of the previous full collection.
// Set them before they are changed by the collector.
previous_has_compacted_ = MarkCompactCollector::HasCompacted();
previous_marked_count_ = MarkCompactCollector::previous_marked_count();
if (!FLAG_trace_gc) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = SizeOfHeapObjects();
}
GCTracer::~GCTracer() {
if (!FLAG_trace_gc) return;
// Printf ONE line iff flag is set.
PrintF("%s %.1f -> %.1f MB, %d ms.\n",
CollectorString(),
start_size_, SizeOfHeapObjects(),
static_cast<int>(OS::TimeCurrentMillis() - start_time_));
}
const char* GCTracer::CollectorString() {
switch (collector_) {
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
return MarkCompactCollector::HasCompacted() ? "Mark-compact"
: "Mark-sweep";
}
return "Unknown GC";
}
} } // namespace v8::internal

View File

@ -192,6 +192,10 @@ namespace v8 { namespace internal {
V(zero_symbol, "0")
// Forward declaration of the GCTracer class.
class GCTracer;
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@ -200,6 +204,7 @@ class Heap : public AllStatic {
// Configure heap size before setup. Return false if the heap has been
// setup already.
static bool ConfigureHeap(int semispace_size, int old_gen_size);
static bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
// also creates the basic non-mutable objects.
@ -504,6 +509,13 @@ class Heap : public AllStatic {
// Please note this function does not perform a garbage collection.
static inline Object* AllocateRaw(int size_in_bytes, AllocationSpace space);
// Allocate an unitialized object during deserialization. Performs linear
// allocation (ie, guaranteed no free list allocation) and assumes the
// spaces are all preexpanded so allocation should not fail.
static inline Object* AllocateForDeserialization(int size_in_bytes,
AllocationSpace space);
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@ -544,9 +556,7 @@ class Heap : public AllStatic {
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
static void PerformScavenge() {
PerformGarbageCollection(NEW_SPACE, SCAVENGER);
}
static void PerformScavenge();
static void SetGlobalGCPrologueCallback(GCCallback callback) {
global_gc_prologue_callback_ = callback;
@ -601,6 +611,9 @@ class Heap : public AllStatic {
static bool InSpace(Address addr, AllocationSpace space);
static bool InSpace(HeapObject* value, AllocationSpace space);
// Finds out which space an object should get promoted to based on its type.
static inline AllocationSpace TargetSpace(HeapObject* object);
// Sets the stub_cache_ (only used when expanding the dictionary).
static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
@ -634,9 +647,7 @@ class Heap : public AllStatic {
// Write barrier support for address[offset] = o.
inline static void RecordWrite(Address address, int offset);
// Given an address in the heap, returns a pointer to the object which
// body contains the address. Returns Failure::Exception() if the
// operation fails.
// Given an address occupied by a live code object, return that object.
static Object* FindCodeObject(Address a);
// Invoke Shrink on shrinkable spaces.
@ -657,14 +668,6 @@ class Heap : public AllStatic {
static void TracePathToGlobal();
#endif
// Helper for Serialization/Deserialization that restricts memory allocation
// to the predictable LINEAR_ONLY policy
static void SetLinearAllocationOnly(bool linear_only) {
old_space_->SetLinearAllocationOnly(linear_only);
code_space_->SetLinearAllocationOnly(linear_only);
map_space_->SetLinearAllocationOnly(linear_only);
}
// Callback function pased to Heap::Iterate etc. Copies an object if
// necessary, the object might be promoted to an old space. The caller must
// ensure the precondition that the object is (a) a heap object and (b) in
@ -695,6 +698,25 @@ class Heap : public AllStatic {
// Entries in the cache. Must be a power of 2.
static const int kNumberStringCacheSize = 64;
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes >= 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
}
} else {
// Avoid underflow.
if (amount >= 0) {
amount_of_external_allocated_memory_ = amount;
}
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
return amount_of_external_allocated_memory_;
}
private:
static int semispace_size_;
static int initial_semispace_size_;
@ -716,11 +738,15 @@ class Heap : public AllStatic {
// Returns the size of object residing in non new spaces.
static int PromotedSpaceSize();
#ifdef DEBUG
static bool allocation_allowed_;
// Returns the amount of external memory registered since last global gc.
static int PromotedExternalMemorySize();
static int mc_count_; // how many mark-compact collections happened
static int gc_count_; // how many gc happened
#ifdef DEBUG
static bool allocation_allowed_;
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
@ -734,6 +760,13 @@ class Heap : public AllStatic {
// Promotion limit that trigger a global GC
static int promoted_space_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
static int amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
static int amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
static int old_gen_exhausted_;
@ -762,7 +795,8 @@ class Heap : public AllStatic {
// Performs garbage collection
static void PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector);
GarbageCollector collector,
GCTracer* tracer);
// Returns either a Smi or a Number object from 'value'. If 'new_object'
// is false, it may return a preallocated immutable object.
@ -801,7 +835,7 @@ class Heap : public AllStatic {
static void Scavenge();
// Performs a major collection in the whole heap.
static void MarkCompact();
static void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
static void MarkCompactPrologue();
@ -885,6 +919,26 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
#endif
// Space iterator for iterating over all spaces of the heap.
// For each space an object iterator is provided. The deallocation of the
// returned object iterators is handled by the space iterator.
class SpaceIterator : public Malloced {
public:
SpaceIterator();
virtual ~SpaceIterator();
bool has_next();
ObjectIterator* next();
private:
ObjectIterator* CreateIterator();
int current_space_; // from enum AllocationSpace.
ObjectIterator* iterator_; // object iterator for the current space.
};
// A HeapIterator provides iteration over the whole heap It aggregates a the
// specific iterators for the different spaces as these can only iterate over
// one space only.
@ -950,74 +1004,6 @@ class MarkingStack {
};
// ----------------------------------------------------------------------------
// Functions and constants used for marking live objects.
//
// Many operations (eg, Object::Size()) are based on an object's map. When
// objects are marked as live or overflowed, their map pointer is changed.
// Use clear_mark_bit and/or clear_overflow_bit to recover the original map
// word.
static inline intptr_t clear_mark_bit(intptr_t map_word) {
return map_word | kMarkingMask;
}
static inline intptr_t clear_overflow_bit(intptr_t map_word) {
return map_word & ~kOverflowMask;
}
// True if the object is marked live.
static inline bool is_marked(HeapObject* obj) {
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
return (map_word & kMarkingMask) == 0;
}
// Mutate an object's map pointer to indicate that the object is live.
static inline void set_mark(HeapObject* obj) {
ASSERT(!is_marked(obj));
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
obj->set_map(reinterpret_cast<Map*>(map_word & ~kMarkingMask));
}
// Mutate an object's map pointer to remove the indication that the object
// is live, ie, (partially) restore the map pointer.
static inline void clear_mark(HeapObject* obj) {
ASSERT(is_marked(obj));
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
obj->set_map(reinterpret_cast<Map*>(clear_mark_bit(map_word)));
}
// True if the object is marked overflowed.
static inline bool is_overflowed(HeapObject* obj) {
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
return (map_word & kOverflowMask) != 0;
}
// Mutate an object's map pointer to indicate that the object is overflowed.
// Overflowed objects have been reached during marking of the heap but not
// pushed on the marking stack (and thus their children have not necessarily
// been marked).
static inline void set_overflow(HeapObject* obj) {
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
obj->set_map(reinterpret_cast<Map*>(map_word | kOverflowMask));
}
// Mutate an object's map pointer to remove the indication that the object
// is overflowed, ie, (partially) restore the map pointer.
static inline void clear_overflow(HeapObject* obj) {
ASSERT(is_overflowed(obj));
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
obj->set_map(reinterpret_cast<Map*>(clear_overflow_bit(map_word)));
}
// A helper class to document/test C++ scopes where we do not
// expect a GC. Usage:
//
@ -1079,6 +1065,72 @@ class HeapProfiler {
};
#endif
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
class GCTracer BASE_EMBEDDED {
public:
GCTracer();
~GCTracer();
// Sets the collector.
void set_collector(GarbageCollector collector) { collector_ = collector; }
// Sets the GC count.
void set_gc_count(int count) { gc_count_ = count; }
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
// Sets the flag that this is a compacting full GC.
void set_is_compacting() { is_compacting_ = true; }
// Increment and decrement the count of marked objects.
void increment_marked_count() { ++marked_count_; }
void decrement_marked_count() { --marked_count_; }
int marked_count() { return marked_count_; }
private:
// Returns a string matching the collector.
const char* CollectorString();
// Returns size of object in heap (in MB).
double SizeOfHeapObjects() {
return (static_cast<double>(Heap::SizeOfObjects())) / MB;
}
double start_time_; // Timestamp set in the constructor.
double start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
int gc_count_;
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
// True if the current GC is a compacting full collection, false
// otherwise.
bool is_compacting_;
// True if the *previous* full GC cwas a compacting collection (will be
// false if there has not been a previous full GC).
bool previous_has_compacted_;
// On a full GC, a count of the number of marked objects. Incremented
// when an object is marked and decremented when an object's mark bit is
// cleared. Will be zero on a scavenge collection.
int marked_count_;
// The count from the end of the previous full GC. Will be zero if there
// was no previous full GC.
int previous_marked_count_;
};
} } // namespace v8::internal
#endif // V8_HEAP_H_

View File

@ -428,7 +428,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r1, MemOperand(ip, 0 * kPointerSize));
__ EnterJSFrame(0, 0);
__ EnterJSFrame(0);
// Push the receiver and the name of the function.
__ ldr(r0, MemOperand(pp, 0));
@ -447,7 +447,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ ldr(r0, MemOperand(v8::internal::fp, // fp is shadowed by IC::fp
JavaScriptFrameConstants::kArgsLengthOffset));
__ ExitJSFrame(DO_NOT_RETURN, 0);
__ ExitJSFrame(DO_NOT_RETURN);
// Patch the function on the stack; 1 ~ receiver.
__ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));

View File

@ -135,7 +135,7 @@ Address IC::OriginalCodeAddress() {
IC::State IC::StateFrom(Code* target, Object* receiver) {
IC::State state = target->state();
IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
@ -206,7 +206,7 @@ void IC::Clear(Address address) {
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
if (target->state() == DEBUG_BREAK) return;
if (target->ic_state() == DEBUG_BREAK) return;
switch (target->kind()) {
case Code::LOAD_IC: return LoadIC::Clear(address, target);
@ -220,32 +220,32 @@ void IC::Clear(Address address) {
void CallIC::Clear(Address address, Code* target) {
if (target->state() == UNINITIALIZED) return;
if (target->ic_state() == UNINITIALIZED) return;
Code* code = StubCache::FindCallInitialize(target->arguments_count());
SetTargetAtAddress(address, code);
}
void KeyedLoadIC::Clear(Address address, Code* target) {
if (target->state() == UNINITIALIZED) return;
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void LoadIC::Clear(Address address, Code* target) {
if (target->state() == UNINITIALIZED) return;
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void StoreIC::Clear(Address address, Code* target) {
if (target->state() == UNINITIALIZED) return;
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->state() == UNINITIALIZED) return;
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
}

View File

@ -45,11 +45,12 @@ DEFINE_bool(log_all, false, "Log all events to the log file.");
DEFINE_bool(log_api, false, "Log API events to the log file.");
DEFINE_bool(log_code, false,
"Log code events to the log file without profiling.");
DEFINE_bool(log_debugger, false, "Log debugger internal messages.");
DEFINE_bool(log_gc, false,
"Log heap samples on garbage collection for the hp2ps tool.");
DEFINE_bool(log_suspect, false, "Log suspect operations.");
DEFINE_bool(log_handles, false, "Log global handle events.");
DEFINE_bool(log_state_changes, false, "Log state changes.");
DEFINE_bool(log_suspect, false, "Log suspect operations.");
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).");
DEFINE_bool(sliding_state_window, false,
@ -266,26 +267,6 @@ void Profiler::Run() {
}
//
// Synchronize class used for ensuring block structured
// locking for the Logger::*Event functions.
//
class Synchronize {
public:
explicit Synchronize(Mutex* mutex) {
mutex_ = mutex;
mutex_->Lock();
}
~Synchronize() {
mutex_->Unlock();
}
private:
// Mutex used for enforcing block structured access.
Mutex* mutex_;
};
//
// Logger class implementation.
//
@ -301,7 +282,7 @@ SlidingStateWindow* Logger::sliding_state_window_ = NULL;
void Logger::Preamble(const char* content) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "%s", content);
#endif
}
@ -310,7 +291,7 @@ void Logger::Preamble(const char* content) {
void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "%s,\"%s\"\n", name, value);
#endif
}
@ -319,7 +300,7 @@ void Logger::StringEvent(const char* name, const char* value) {
void Logger::IntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "%s,%d\n", name, value);
#endif
}
@ -328,7 +309,7 @@ void Logger::IntEvent(const char* name, int value) {
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_handles) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "%s,0x%x\n", name,
reinterpret_cast<unsigned int>(location));
#endif
@ -341,7 +322,7 @@ void Logger::HandleEvent(const char* name, Object** location) {
// FLAG_log_api is true.
void Logger::ApiEvent(const char* format, ...) {
ASSERT(logfile_ != NULL && FLAG_log_api);
Synchronize s(mutex_);
ScopedLock sl(mutex_);
va_list ap;
va_start(ap, format);
vfprintf(logfile_, format, ap);
@ -370,7 +351,7 @@ void Logger::SharedLibraryEvent(const char* library_path,
unsigned end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_prof) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
start, end);
#endif
@ -382,7 +363,7 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
unsigned end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_prof) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
start, end);
#endif
@ -445,7 +426,7 @@ void Logger::ApiEntryCall(const char* name) {
void Logger::NewEvent(const char* name, void* object, size_t size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "new,%s,0x%x,%u\n", name,
reinterpret_cast<unsigned int>(object),
static_cast<unsigned int>(size));
@ -456,7 +437,7 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "delete,%s,0x%x\n", name,
reinterpret_cast<unsigned int>(object));
#endif
@ -466,7 +447,7 @@ void Logger::DeleteEvent(const char* name, void* object) {
void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "code-creation,%s,0x%x,%d,\"", tag,
reinterpret_cast<unsigned int>(code->address()),
@ -483,7 +464,7 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
fprintf(logfile_, "code-creation,%s,0x%x,%d,\"%s\"\n", tag,
@ -496,7 +477,7 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "code-creation,%s,0x%x,%d,\"args_count: %d\"\n", tag,
reinterpret_cast<unsigned int>(code->address()),
@ -509,7 +490,7 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "code-move,0x%x,0x%x\n",
reinterpret_cast<unsigned int>(from),
reinterpret_cast<unsigned int>(to));
@ -520,7 +501,7 @@ void Logger::CodeMoveEvent(Address from, Address to) {
void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_code) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "code-delete,0x%x\n", reinterpret_cast<unsigned int>(from));
#endif
}
@ -529,7 +510,7 @@ void Logger::CodeDeleteEvent(Address from) {
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "%s,%s,", name, tag);
uint32_t sec, usec;
@ -546,7 +527,7 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
void Logger::SuspectReadEvent(String* name, String* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_suspect) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "suspect-read,");
obj->PrintOn(logfile_);
fprintf(logfile_, ",\"");
@ -559,7 +540,7 @@ void Logger::SuspectReadEvent(String* name, String* obj) {
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_gc) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
#endif
}
@ -568,7 +549,7 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_gc) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "heap-sample-end,\"%s\",\"%s\"\n", space, kind);
#endif
}
@ -577,7 +558,7 @@ void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (logfile_ == NULL || !FLAG_log_gc) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "heap-sample-item,%s,%d,%d\n", type, number, bytes);
#endif
}
@ -586,7 +567,7 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (logfile_ == NULL) return;
Synchronize s(mutex_);
ScopedLock sl(mutex_);
fprintf(logfile_, "tick,0x%x,0x%x,%d", sample->pc, sample->sp,
static_cast<int>(sample->state));
if (overflow) fprintf(logfile_, ",overflow");
@ -611,7 +592,7 @@ bool Logger::Setup() {
// Each of the individual log flags implies --log. Check after
// checking --log-all and --prof in case they set --log-code.
if (FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
if (FLAG_log_api || FLAG_log_code || FLAG_log_debugger || FLAG_log_gc ||
FLAG_log_handles || FLAG_log_suspect) {
FLAG_log = true;
}

View File

@ -49,6 +49,9 @@ namespace v8 { namespace internal {
// Log code (create, move, and delete) events to the logfile, default is off.
// --log-code implies --log.
//
// --log-debugger
// Log the internal activity of the debugger, to aid in debugging the debugger.
//
// --log-gc
// Log GC heap samples after each GC that can be processed by hp2ps, default
// is off. --log-gc implies --log.

View File

@ -46,7 +46,8 @@ Register pp = { 10 }; // parameter pointer
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
generating_stub_(false) {
generating_stub_(false),
allow_stub_calls_(true) {
}
@ -209,23 +210,31 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
Label fast, done;
// First, test that the start address is not in the new space. We cannot
// set remembered set bits in the new space.
// First, test that the object is not in the new space. We cannot set
// remembered set bits in the new space.
// object: heap object pointer (with tag)
// offset: offset to store location from the object
and_(scratch, object, Operand(Heap::NewSpaceMask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
b(eq, &done);
mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
// Compute the bit offset in the remembered set.
and_(scratch, object, Operand(ip));
add(offset, scratch, Operand(offset));
// object: heap object pointer (with tag)
// offset: offset to store location from the object
mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
and_(scratch, object, Operand(ip)); // offset into page of the object
add(offset, scratch, Operand(offset)); // add offset into the object
mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
// Compute the page address from the heap object pointer.
// object: heap object pointer (with tag)
// offset: bit offset of store position in the remembered set
bic(object, object, Operand(ip));
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
// object: page start
// offset: bit offset of store position in the remembered set
cmp(offset, Operand(Page::kPageSize / kPointerSize));
b(lt, &fast);
@ -245,11 +254,14 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
add(object, object, Operand(scratch));
bind(&fast);
// Now object is the address of the start of the remembered set and offset
// is the bit offset from that start.
// Get address of the rset word.
add(object, object, Operand(offset, LSR, kRSetWordShift));
// Get bit offset in the word.
// object: start of the remembered set (page start for the fast case)
// offset: bit offset of store position in the remembered set
bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
add(object, object, Operand(scratch, LSR, kRSetWordShift));
// Get bit offset in the rset word.
// object: address of remembered set word
// offset: bit offset of store position
and_(offset, offset, Operand(kBitsPerInt - 1));
ldr(scratch, MemOperand(object));
@ -261,7 +273,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
}
void MacroAssembler::EnterJSFrame(int argc, RegList callee_saved) {
void MacroAssembler::EnterJSFrame(int argc) {
// Generate code entering a JS function called from a JS function
// stack: receiver, arguments
// r0: number of arguments (not including function, nor receiver)
@ -299,18 +311,10 @@ void MacroAssembler::EnterJSFrame(int argc, RegList callee_saved) {
mov(r3, Operand(r0)); // args_len to be saved
mov(r2, Operand(cp)); // context to be saved
// Make sure there are no instructions between both stm instructions, because
// the callee_saved list is obtained during stack unwinding by decoding the
// first stmdb instruction, which is found (or not) at a constant offset from
// the pc saved by the second stmdb instruction.
if (callee_saved != 0) {
stm(db_w, sp, callee_saved);
}
// push in reverse order: context (r2), args_len (r3), caller_pp, caller_fp,
// sp_on_exit (ip == pp, may be patched on exit), return address, prolog_pc
// sp_on_exit (ip == pp, may be patched on exit), return address
stm(db_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit() |
ip.bit() | lr.bit() | pc.bit());
ip.bit() | lr.bit());
// Setup new frame pointer.
add(fp, sp, Operand(-StandardFrameConstants::kContextOffset));
@ -321,20 +325,16 @@ void MacroAssembler::EnterJSFrame(int argc, RegList callee_saved) {
}
void MacroAssembler::ExitJSFrame(ExitJSFlag flag, RegList callee_saved) {
void MacroAssembler::ExitJSFrame(ExitJSFlag flag) {
// r0: result
// sp: stack pointer
// fp: frame pointer
// pp: parameter pointer
if (callee_saved != 0 || flag == DO_NOT_RETURN) {
if (flag == DO_NOT_RETURN) {
add(r3, fp, Operand(JavaScriptFrameConstants::kSavedRegistersOffset));
}
if (callee_saved != 0) {
ldm(ia_w, r3, callee_saved);
}
if (flag == DO_NOT_RETURN) {
// restore sp as caller_sp (not as pp)
str(r3, MemOperand(fp, JavaScriptFrameConstants::kSPOnExitOffset));
@ -563,13 +563,13 @@ void MacroAssembler::CheckAccessGlobal(Register holder_reg,
void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(!generating_stub()); // stub calls are not allowed in stubs
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), code_target);
}
void MacroAssembler::CallJSExitStub(CodeStub* stub) {
ASSERT(!generating_stub()); // stub calls are not allowed in stubs
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), exit_js_frame);
}
@ -592,6 +592,15 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
mov(r0, Operand(num_arguments - 1));
} else {
ASSERT(f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
// The number of arguments is fixed for this call.
// Set r0 correspondingly.
push(r0);
mov(r0, Operand(f->nargs - 1)); // receiver does not count as an argument
}
RuntimeStub stub((Runtime::FunctionId) f->stub_id);
@ -605,16 +614,6 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
void MacroAssembler::TailCallRuntime(Runtime::Function* f) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
if (f->nargs >= 0) {
// The number of arguments is fixed for this call.
// Set r0 correspondingly.
push(r0);
mov(r0, Operand(f->nargs - 1)); // receiver does not count as an argument
}
JumpToBuiltin(ExternalReference(f)); // tail call to runtime routine
}

View File

@ -93,8 +93,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Activation frames
void EnterJSFrame(int argc, RegList callee_saved);
void ExitJSFrame(ExitJSFlag flag, RegList callee_saved);
void EnterJSFrame(int argc);
void ExitJSFrame(ExitJSFlag flag);
// Support functions.
@ -198,10 +198,13 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
private:
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
};

View File

@ -43,7 +43,8 @@ DEFINE_bool(native_code_counters, false,
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
generating_stub_(false) {
generating_stub_(false),
allow_stub_calls_(true) {
}
@ -507,7 +508,7 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(!generating_stub()); // calls are not allowed in stubs
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
call(stub->GetCode(), code_target);
}
@ -681,8 +682,8 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
bool resolved;
Handle<Code> code = ResolveBuiltin(id, &resolved);
// Calls are not allowed in stubs.
ASSERT(flag == JUMP_FUNCTION || !generating_stub());
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a

View File

@ -236,10 +236,13 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
private:
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@ -279,6 +282,15 @@ static inline Operand FieldOperand(Register object, int offset) {
}
// Generate an Operand for loading an indexed field from an object.
static inline Operand FieldOperand(Register object,
Register index,
ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_IA32_H_

View File

@ -62,6 +62,10 @@ DECLARE_bool(gc_global);
bool MarkCompactCollector::compacting_collection_ = false;
int MarkCompactCollector::previous_marked_count_ = 0;
GCTracer* MarkCompactCollector::tracer_ = NULL;
#ifdef DEBUG
MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
@ -75,8 +79,14 @@ int MarkCompactCollector::live_map_objects_ = 0;
int MarkCompactCollector::live_lo_objects_ = 0;
#endif
void MarkCompactCollector::CollectGarbage() {
void MarkCompactCollector::CollectGarbage(GCTracer* tracer) {
// Rather than passing the tracer around we stash it in a static member
// variable.
tracer_ = tracer;
Prepare();
// Prepare has selected whether to compact the old generation or not.
// Tell the tracer.
if (IsCompacting()) tracer_->set_is_compacting();
MarkLiveObjects();
@ -96,6 +106,12 @@ void MarkCompactCollector::CollectGarbage() {
}
Finish();
// Save the count of marked objects remaining after the collection and
// null out the GC tracer.
previous_marked_count_ = tracer_->marked_count();
ASSERT(previous_marked_count_ == 0);
tracer_ = NULL;
}
@ -168,76 +184,6 @@ void MarkCompactCollector::Finish() {
}
// ---------------------------------------------------------------------------
// Forwarding pointers and map pointer encoding
// | 11 bits | offset to the live object in the page
// | 11 bits | offset in a map page
// | 10 bits | map table index
static const int kMapPageIndexBits = 10;
static const int kMapPageOffsetBits = 11;
static const int kForwardingOffsetBits = 11;
static const int kAlignmentBits = 1;
static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift =
kMapPageIndexShift + kMapPageIndexBits;
static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits;
// 0x000003FF
static const uint32_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1;
// 0x001FFC00
static const uint32_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
static uint32_t EncodePointers(Address map_addr, int offset) {
// Offset is the distance to the first alive object in the same
// page. The offset between two objects in the same page should not
// exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
int compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_addr);
int map_page_index = map_page->mc_page_index;
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = map_page->Offset(map_addr) >> kObjectAlignmentBits;
return (compact_offset << kForwardingOffsetShift)
| (map_page_offset << kMapPageOffsetShift)
| (map_page_index << kMapPageIndexShift);
}
static int DecodeOffset(uint32_t encoded) {
// The offset field is represented in the MSB.
int offset = (encoded >> kForwardingOffsetShift) << kObjectAlignmentBits;
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
return offset;
}
static Address DecodeMapPointer(uint32_t encoded, MapSpace* map_space) {
int map_page_index = (encoded & kMapPageIndexMask) >> kMapPageIndexShift;
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = ((encoded & kMapPageOffsetMask) >> kMapPageOffsetShift)
<< kObjectAlignmentBits;
return (map_space->PageAddress(map_page_index) + map_page_offset);
}
// ----------------------------------------------------------------------------
// Phase 1: tracing and marking live objects.
// before: all objects are in normal state.
@ -341,17 +287,15 @@ class MarkingVisitor : public ObjectVisitor {
// object->IsConsString() &&
// (ConsString::cast(object)->second() == Heap::empty_string())
// except the map for the object might be marked.
intptr_t map_word =
reinterpret_cast<intptr_t>(HeapObject::cast(obj)->map());
uint32_t tag =
(reinterpret_cast<Map*>(clear_mark_bit(map_word)))->instance_type();
if ((tag < FIRST_NONSTRING_TYPE) &&
(kConsStringTag ==
static_cast<StringRepresentationTag>(tag &
kStringRepresentationMask)) &&
(Heap::empty_string() ==
reinterpret_cast<String*>(
reinterpret_cast<ConsString*>(obj)->second()))) {
MapWord map_word = HeapObject::cast(obj)->map_word();
map_word.ClearMark();
InstanceType type = map_word.ToMap()->instance_type();
if ((type < FIRST_NONSTRING_TYPE) &&
(static_cast<StringRepresentationTag>(
type & kStringRepresentationMask) == kConsStringTag) &&
(reinterpret_cast<String*>(
reinterpret_cast<ConsString*>(obj)->second()) ==
Heap::empty_string())) {
// Since we don't have the object start it is impossible to update the
// remeber set quickly. Therefore this optimization only is taking
// place when we can avoid changing.
@ -381,7 +325,8 @@ class MarkingVisitor : public ObjectVisitor {
MarkCompactCollector::UpdateLiveObjectCount(obj);
#endif
Map* map = obj->map();
set_mark(obj);
obj->SetMark();
MarkCompactCollector::tracer()->increment_marked_count();
// Mark the map pointer and the body.
MarkCompactCollector::MarkObject(map);
obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
@ -398,7 +343,7 @@ class MarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
if (is_marked(obj)) continue;
if (obj->IsMarked()) continue;
VisitUnmarkedObject(obj);
}
return true;
@ -413,7 +358,7 @@ class SymbolTableCleaner : public ObjectVisitor {
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject() && !is_marked(HeapObject::cast(*p))) {
if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Set the entry to null_value (as deleted).
*p = Heap::null_value();
pointers_removed_++;
@ -429,92 +374,46 @@ class SymbolTableCleaner : public ObjectVisitor {
};
static void MarkObjectGroups(MarkingVisitor* marker) {
List<ObjectGroup*>& object_groups = GlobalHandles::ObjectGroups();
for (int i = 0; i < object_groups.length(); i++) {
ObjectGroup* entry = object_groups[i];
bool group_marked = false;
List<Object**>& objects = entry->objects_;
for (int j = 0; j < objects.length(); j++) {
Object* obj = *objects[j];
if (obj->IsHeapObject() && is_marked(HeapObject::cast(obj))) {
group_marked = true;
break;
}
}
if (!group_marked) continue;
for (int j = 0; j < objects.length(); j++) {
marker->VisitPointer(objects[j]);
}
}
}
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG
if (!is_marked(obj)) UpdateLiveObjectCount(obj);
UpdateLiveObjectCount(obj);
#endif
ASSERT(!is_marked(obj));
ASSERT(!obj->IsMarked());
if (obj->IsJSGlobalObject()) Counters::global_objects.Increment();
if (FLAG_cleanup_caches_in_maps_at_gc && obj->IsMap()) {
Map::cast(obj)->ClearCodeCache();
}
set_mark(obj);
obj->SetMark();
tracer_->increment_marked_count();
if (!marking_stack.overflowed()) {
ASSERT(Heap::Contains(obj));
marking_stack.Push(obj);
} else {
// Set object's stack overflow bit, wait for rescan.
set_overflow(obj);
obj->SetOverflow();
}
}
void MarkCompactCollector::MarkObjectsReachableFromTopFrame() {
MarkingVisitor marking_visitor;
do {
while (!marking_stack.is_empty()) {
HeapObject* obj = marking_stack.Pop();
ASSERT(Heap::Contains(obj));
ASSERT(is_marked(obj) && !is_overflowed(obj));
// Because the object is marked, the map pointer is not tagged as a
// normal HeapObject pointer, we need to recover the map pointer,
// then use the map pointer to mark the object body.
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
Map* map = reinterpret_cast<Map*>(clear_mark_bit(map_word));
MarkObject(map);
obj->IterateBody(map->instance_type(), obj->SizeFromMap(map),
&marking_visitor);
};
// Check objects in object groups.
MarkObjectGroups(&marking_visitor);
} while (!marking_stack.is_empty());
}
static int OverflowObjectSize(HeapObject* obj) {
// Recover the normal map pointer, it might be marked as live and
// overflowed.
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
map_word = clear_mark_bit(map_word);
map_word = clear_overflow_bit(map_word);
return obj->SizeFromMap(reinterpret_cast<Map*>(map_word));
MapWord map_word = obj->map_word();
map_word.ClearMark();
map_word.ClearOverflow();
return obj->SizeFromMap(map_word.ToMap());
}
static bool VisitOverflowedObject(HeapObject* obj) {
if (!is_overflowed(obj)) return true;
ASSERT(is_marked(obj));
if (!obj->IsOverflowed()) return true;
ASSERT(obj->IsMarked());
if (marking_stack.overflowed()) return false;
clear_overflow(obj); // clear overflow bit
obj->ClearOverflow();
ASSERT(Heap::Contains(obj));
marking_stack.Push(obj);
return true;
@ -536,61 +435,97 @@ static void ScanOverflowedObjects(T* it) {
bool MarkCompactCollector::MustBeMarked(Object** p) {
// Check whether *p is a HeapObject pointer.
if (!(*p)->IsHeapObject()) return false;
return !is_marked(HeapObject::cast(*p));
return !HeapObject::cast(*p)->IsMarked();
}
void MarkCompactCollector::MarkLiveObjects() {
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
// The to space contains live objects, the from space is used as a marking
// stack.
marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
Heap::new_space()->FromSpaceHigh());
ASSERT(!marking_stack.overflowed());
// Mark the heap roots, including global variables, stack variables, etc.
MarkingVisitor marking_visitor;
Heap::IterateStrongRoots(&marking_visitor);
void MarkCompactCollector::MarkStrongRoots(MarkingVisitor* marking_visitor) {
// Mark the heap roots gray, including global variables, stack variables,
// etc.
Heap::IterateStrongRoots(marking_visitor);
// Take care of the symbol table specially.
SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
// 1. Mark the prefix of the symbol table gray.
symbol_table->IteratePrefix(marking_visitor);
#ifdef DEBUG
UpdateLiveObjectCount(symbol_table);
#endif
// 2. Mark the symbol table black (ie, do not push it on the marking stack
// or mark it overflowed).
symbol_table->SetMark();
tracer_->increment_marked_count();
}
// 1. mark the prefix of the symbol table and push the objects on
// the stack.
symbol_table->IteratePrefix(&marking_visitor);
// 2. mark the symbol table without pushing it on the stack.
set_mark(symbol_table); // map word is changed.
bool has_processed_weak_pointers = false;
void MarkCompactCollector::MarkObjectGroups() {
List<ObjectGroup*>& object_groups = GlobalHandles::ObjectGroups();
// Mark objects reachable from the roots.
while (true) {
MarkObjectsReachableFromTopFrame();
for (int i = 0; i < object_groups.length(); i++) {
ObjectGroup* entry = object_groups[i];
if (entry == NULL) continue;
if (!marking_stack.overflowed()) {
if (has_processed_weak_pointers) break;
// First we mark weak pointers not yet reachable.
GlobalHandles::MarkWeakRoots(&MustBeMarked);
// Then we process weak pointers and process the transitive closure.
GlobalHandles::IterateWeakRoots(&marking_visitor);
has_processed_weak_pointers = true;
continue;
List<Object**>& objects = entry->objects_;
bool group_marked = false;
for (int j = 0; j < objects.length(); j++) {
Object* object = *objects[j];
if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
group_marked = true;
break;
}
}
// The marking stack overflowed, we need to rebuild it by scanning the
// whole heap.
marking_stack.clear_overflowed();
if (!group_marked) continue;
// We have early stops if the stack overflowed again while scanning
// overflowed objects in a space.
// An object in the group is marked, so mark as gray all white heap
// objects in the group.
for (int j = 0; j < objects.length(); ++j) {
if ((*objects[j])->IsHeapObject()) {
MarkObject(HeapObject::cast(*objects[j]));
}
}
// Once the entire group has been colored gray, set the object group
// to NULL so it won't be processed again.
delete object_groups[i];
object_groups[i] = NULL;
}
}
// Mark as black all objects reachable starting from gray objects. (Gray
// objects are marked and on the marking stack, or marked and marked as
// overflowed and not on the marking stack).
//
// Before: the heap contains a mixture of white, gray, and black objects.
// After: the heap contains a mixture of white and black objects.
void MarkCompactCollector::ProcessMarkingStack(
MarkingVisitor* marking_visitor) {
while (true) {
while (!marking_stack.is_empty()) {
HeapObject* object = marking_stack.Pop();
ASSERT(object->IsHeapObject());
ASSERT(Heap::Contains(object));
// Removing a (gray) object from the marking stack turns it black.
ASSERT(object->IsMarked() && !object->IsOverflowed());
// Because the object is marked, we have to recover the original map
// pointer and use it to mark the object's body.
MapWord map_word = object->map_word();
map_word.ClearMark();
Map* map = map_word.ToMap();
MarkObject(map);
object->IterateBody(map->instance_type(), object->SizeFromMap(map),
marking_visitor);
}
// The only gray objects are marked overflowed in the heap. If there
// are any, refill the marking stack and continue.
if (!marking_stack.overflowed()) return;
marking_stack.clear_overflowed();
// We have early stops if the marking stack overflows while refilling it
// with gray objects to avoid pointlessly scanning extra spaces.
SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
ScanOverflowedObjects(&new_it);
if (marking_stack.overflowed()) continue;
@ -610,9 +545,62 @@ void MarkCompactCollector::MarkLiveObjects() {
LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
ScanOverflowedObjects(&lo_it);
}
}
// Prune the symbol table removing all symbols only pointed to by
// the symbol table.
void MarkCompactCollector::ProcessObjectGroups(
MarkingVisitor* marking_visitor) {
bool work_to_do = true;
ASSERT(marking_stack.is_empty());
while (work_to_do) {
MarkObjectGroups();
work_to_do = !marking_stack.is_empty();
ProcessMarkingStack(marking_visitor);
}
}
void MarkCompactCollector::MarkLiveObjects() {
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
// The to space contains live objects, the from space is used as a marking
// stack.
marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
Heap::new_space()->FromSpaceHigh());
ASSERT(!marking_stack.overflowed());
MarkingVisitor marking_visitor;
MarkStrongRoots(&marking_visitor);
ProcessMarkingStack(&marking_visitor);
// The objects reachable from the roots are marked black, unreachable
// objects are white. Mark objects reachable from object groups with at
// least one marked object, and continue until no new objects are
// reachable from the object groups.
ProcessObjectGroups(&marking_visitor);
// The objects reachable from the roots or object groups are marked black,
// unreachable objects are white. Process objects reachable only from
// weak global handles.
//
// First we mark weak pointers not yet reachable.
GlobalHandles::MarkWeakRoots(&MustBeMarked);
// Then we process weak pointers and process the transitive closure.
GlobalHandles::IterateWeakRoots(&marking_visitor);
ProcessMarkingStack(&marking_visitor);
// Repeat the object groups to mark unmarked groups reachable from the
// weak roots.
ProcessObjectGroups(&marking_visitor);
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use SymbolTable::cast here because the symbol
// table is marked.
SymbolTable* symbol_table =
reinterpret_cast<SymbolTable*>(Heap::symbol_table());
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
@ -623,11 +611,6 @@ void MarkCompactCollector::MarkLiveObjects() {
// Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups();
// Objects in the active semispace of the young generation will be relocated
// to the inactive semispace. Set the relocation info to the beginning of
// the inactive semispace.
Heap::new_space()->MCResetRelocationInfo();
}
@ -652,11 +635,9 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
static int CountMarkedCallback(HeapObject* obj) {
if (!is_marked(obj)) return obj->Size();
clear_mark(obj);
int obj_size = obj->Size();
set_mark(obj);
return obj_size;
MapWord map_word = obj->map_word();
map_word.ClearMark();
return obj->SizeFromMap(map_word.ToMap());
}
@ -672,7 +653,7 @@ void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
live_objects = 0; \
while (it.has_next()) { \
HeapObject* obj = HeapObject::cast(it.next()); \
if (is_marked(obj)) live_objects++; \
if (obj->IsMarked()) live_objects++; \
} \
ASSERT(live_objects == expected);
@ -765,10 +746,14 @@ void EncodeFreeRegion(Address free_start, int free_size) {
// Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, all others to the old space.
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
bool has_pointers = !object->IsHeapNumber() && !object->IsSeqString();
Object* forwarded = has_pointers ?
Heap::old_space()->MCAllocateRaw(object_size) :
Heap::code_space()->MCAllocateRaw(object_size);
AllocationSpace target_space = Heap::TargetSpace(object);
Object* forwarded;
if (target_space == OLD_SPACE) {
forwarded = Heap::old_space()->MCAllocateRaw(object_size);
} else {
ASSERT(target_space == CODE_SPACE);
forwarded = Heap::code_space()->MCAllocateRaw(object_size);
}
if (forwarded->IsFailure()) {
forwarded = Heap::new_space()->MCAllocateRaw(object_size);
@ -819,8 +804,9 @@ inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
HeapObject::cast(new_object)->address();
}
uint32_t encoded = EncodePointers(old_object->map()->address(), *offset);
old_object->set_map(reinterpret_cast<Map*>(encoded));
MapWord encoding =
MapWord::EncodeAddress(old_object->map()->address(), *offset);
old_object->set_map_word(encoding);
*offset += object_size;
ASSERT(*offset <= Page::kObjectAreaSize);
}
@ -865,8 +851,9 @@ inline void EncodeForwardingAddressesInRange(Address start,
int object_size; // Will be set on each iteration of the loop.
for (Address current = start; current < end; current += object_size) {
HeapObject* object = HeapObject::FromAddress(current);
if (is_marked(object)) {
clear_mark(object);
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
object_size = object->Size();
Object* forwarded = Alloc(object, object_size);
@ -937,8 +924,9 @@ static void SweepSpace(NewSpace* space) {
current < space->top();
current += object->Size()) {
object = HeapObject::FromAddress(current);
if (is_marked(object)) {
clear_mark(object);
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
} else {
// We give non-live objects a map that will correctly give their size,
// since their existing map might not be live after the collection.
@ -971,8 +959,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
current < p->AllocationTop();
current += object->Size()) {
object = HeapObject::FromAddress(current);
if (is_marked(object)) {
clear_mark(object);
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
if (MarkCompactCollector::IsCompacting() && object->IsCode()) {
// If this is compacting collection marked code objects have had
// their IC targets converted to objects.
@ -1037,6 +1026,11 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
// relocated to the inactive semispace (if not promoted). Set the
// relocation info to the beginning of the inactive semispace.
Heap::new_space()->MCResetRelocationInfo();
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldSpace,
IgnoreNonLiveObject>(
@ -1267,8 +1261,8 @@ int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
// Decode the map pointer.
uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
// At this point, the first word of map_addr is also encoded, cannot
@ -1279,9 +1273,8 @@ int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
// Update map pointer.
Address new_map_addr = GetForwardingAddressInOldSpace(map);
int offset = DecodeOffset(encoded);
encoded = EncodePointers(new_map_addr, offset);
obj->set_map(reinterpret_cast<Map*>(encoded));
int offset = encoding.DecodeOffset();
obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
#ifdef DEBUG
if (FLAG_gc_verbose) {
@ -1299,10 +1292,10 @@ int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
// Object should either in old or map space.
uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
MapWord encoding = obj->map_word();
// Offset to the first live object's forwarding address.
int offset = DecodeOffset(encoded);
int offset = encoding.DecodeOffset();
Address obj_addr = obj->address();
// Find the first live object's forwarding address.
@ -1485,8 +1478,8 @@ int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// decode map pointer (forwarded address)
uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
@ -1514,9 +1507,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
// decode map pointer (forwarded address)
uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
@ -1560,8 +1553,8 @@ int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// decode map pointer (forwarded address)
uint32_t encoded = reinterpret_cast<uint32_t>(obj->map());
Address map_addr = DecodeMapPointer(encoded, Heap::map_space());
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
@ -1636,10 +1629,11 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
bool has_pointers = !obj->IsHeapNumber() && !obj->IsSeqString();
if (has_pointers) {
AllocationSpace target_space = Heap::TargetSpace(obj);
if (target_space == OLD_SPACE) {
Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
} else {
ASSERT(target_space == CODE_SPACE);
Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
}
}

View File

@ -39,6 +39,9 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
// Forward declaration of visitor.
class MarkingVisitor;
// ----------------------------------------------------------------------------
// Mark-Compact collector
//
@ -70,7 +73,7 @@ class MarkCompactCollector : public AllStatic {
typedef void (*ProcessNonLiveFunction)(HeapObject* object);
// Performs a global garbage collection.
static void CollectGarbage();
static void CollectGarbage(GCTracer* tracer);
// True if the last full GC performed heap compaction.
static bool HasCompacted() { return compacting_collection_; }
@ -78,6 +81,14 @@ class MarkCompactCollector : public AllStatic {
// True after the Prepare phase if the compaction is taking place.
static bool IsCompacting() { return compacting_collection_; }
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).
static int previous_marked_count() { return previous_marked_count_; }
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
static GCTracer* tracer() { return tracer_; }
#ifdef DEBUG
// Checks whether performing mark-compact collection.
static bool in_use() { return state_ > PREPARE_GC; }
@ -102,6 +113,14 @@ class MarkCompactCollector : public AllStatic {
// Global flag indicating whether spaces were compacted on the last GC.
static bool compacting_collection_;
// The number of objects left marked at the end of the last completed full
// GC (expected to be zero).
static int previous_marked_count_;
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
static GCTracer* tracer_;
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
static void Prepare();
@ -136,10 +155,24 @@ class MarkCompactCollector : public AllStatic {
static void MarkUnmarkedObject(HeapObject* obj);
static inline void MarkObject(HeapObject* obj) {
if (!is_marked(obj)) MarkUnmarkedObject(obj);
if (!obj->IsMarked()) MarkUnmarkedObject(obj);
}
static void MarkObjectsReachableFromTopFrame();
// Mark the heap roots.
static void MarkStrongRoots(MarkingVisitor* marking_visitor);
// Mark objects in object groups that have at least one object in the
// group marked.
static void MarkObjectGroups();
// Mark all objects in an object group with at least one marked
// object, then all objects reachable from marked objects in object
// groups, and repeat.
static void ProcessObjectGroups(MarkingVisitor* marking_visitor);
// Mark all objects reachable (transitively) from objects in the
// marking stack or marked as overflowed in the heap.
static void ProcessMarkingStack(MarkingVisitor* marking_visitor);
// Callback function for telling whether the object *p must be marked.
static bool MustBeMarked(Object** p);

View File

@ -174,7 +174,12 @@ int main(int argc, char** argv) {
v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
v8::Context::New(&extensions);
// TODO(1247464): Cache delayed scripts.
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
i::Bootstrapper::NativesSourceLookup(i);
}
}
// Get rid of unreferenced scripts.
i::Heap::CollectGarbage(0, i::OLD_SPACE);
i::Serializer ser;

View File

@ -624,9 +624,15 @@ void Code::CodePrint() {
void Code::CodeVerify() {
ASSERT(ic_flag() == IC_TARGET_IS_ADDRESS);
CHECK(ic_flag() == IC_TARGET_IS_ADDRESS);
Address last_gc_pc = NULL;
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify();
// Ensure that GC will not iterate twice over the same pointer.
if (is_gc_reloc_mode(it.rinfo()->rmode())) {
CHECK(it.rinfo()->pc() != last_gc_pc);
last_gc_pc = it.rinfo()->pc();
}
}
}

View File

@ -576,6 +576,121 @@ bool Smi::IsValid(int value) {
}
MapWord MapWord::FromMap(Map* map) {
return MapWord(reinterpret_cast<uintptr_t>(map));
}
Map* MapWord::ToMap() {
return reinterpret_cast<Map*>(value_);
}
bool MapWord::IsForwardingAddress() {
// This function only works for map words that are heap object pointers.
// Since it is a heap object, it has a map. We use that map's instance
// type to detect if this map word is not actually a map (ie, it is a
// forwarding address during a scavenge collection).
return reinterpret_cast<HeapObject*>(value_)->map()->instance_type() !=
MAP_TYPE;
}
MapWord MapWord::FromForwardingAddress(HeapObject* object) {
return MapWord(reinterpret_cast<uintptr_t>(object));
}
HeapObject* MapWord::ToForwardingAddress() {
ASSERT(IsForwardingAddress());
return reinterpret_cast<HeapObject*>(value_);
}
bool MapWord::IsMarked() {
return (value_ & kMarkingMask) == 0;
}
void MapWord::SetMark() {
value_ &= ~kMarkingMask;
}
void MapWord::ClearMark() {
value_ |= kMarkingMask;
}
bool MapWord::IsOverflowed() {
return (value_ & kOverflowMask) != 0;
}
void MapWord::SetOverflow() {
value_ |= kOverflowMask;
}
void MapWord::ClearOverflow() {
value_ &= ~kOverflowMask;
}
MapWord MapWord::EncodeAddress(Address map_address, int offset) {
// Offset is the distance in live bytes from the first live object in the
// same page. The offset between two objects in the same page should not
// exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
int compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_address);
ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
int map_page_offset =
map_page->Offset(map_address) >> kObjectAlignmentBits;
uintptr_t encoding =
(compact_offset << kForwardingOffsetShift) |
(map_page_offset << kMapPageOffsetShift) |
(map_page->mc_page_index << kMapPageIndexShift);
return MapWord(encoding);
}
Address MapWord::DecodeMapAddress(MapSpace* map_space) {
int map_page_index = (value_ & kMapPageIndexMask) >> kMapPageIndexShift;
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset =
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
<< kObjectAlignmentBits;
return (map_space->PageAddress(map_page_index) + map_page_offset);
}
int MapWord::DecodeOffset() {
// The offset field is represented in the kForwardingOffsetBits
// most-significant bits.
int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
return offset;
}
MapWord MapWord::FromEncodedAddress(Address address) {
return MapWord(reinterpret_cast<uintptr_t>(address));
}
Address MapWord::ToEncodedAddress() {
return reinterpret_cast<Address>(value_);
}
#ifdef DEBUG
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
@ -584,15 +699,25 @@ void HeapObject::VerifyObjectField(int offset) {
Map* HeapObject::map() {
return reinterpret_cast<Map*> READ_FIELD(this, kMapOffset);
return map_word().ToMap();
}
void HeapObject::set_map(Map* value) {
WRITE_FIELD(this, kMapOffset, value);
set_map_word(MapWord::FromMap(value));
}
MapWord HeapObject::map_word() {
return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
// WRITE_FIELD does not update the remembered set, but there is no need
// here.
WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
HeapObject* HeapObject::FromAddress(Address address) {
@ -635,6 +760,47 @@ void HeapObject::CopyBody(JSObject* from) {
}
bool HeapObject::IsMarked() {
return map_word().IsMarked();
}
void HeapObject::SetMark() {
ASSERT(!IsMarked());
MapWord first_word = map_word();
first_word.SetMark();
set_map_word(first_word);
}
void HeapObject::ClearMark() {
ASSERT(IsMarked());
MapWord first_word = map_word();
first_word.ClearMark();
set_map_word(first_word);
}
bool HeapObject::IsOverflowed() {
return map_word().IsOverflowed();
}
void HeapObject::SetOverflow() {
MapWord first_word = map_word();
first_word.SetOverflow();
set_map_word(first_word);
}
void HeapObject::ClearOverflow() {
ASSERT(IsOverflowed());
MapWord first_word = map_word();
first_word.ClearOverflow();
set_map_word(first_word);
}
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@ -1377,8 +1543,8 @@ Code::Kind Code::kind() {
}
InlineCacheState Code::state() {
InlineCacheState result = ExtractStateFromFlags(flags());
InlineCacheState Code::ic_state() {
InlineCacheState result = ExtractICStateFromFlags(flags());
// Only allow uninitialized or debugger states for non-IC code
// objects. This is used in the debugger to determine whether or not
// a call to code object has been replaced with a debug break call.
@ -1391,7 +1557,7 @@ InlineCacheState Code::state() {
PropertyType Code::type() {
ASSERT(state() == MONOMORPHIC);
ASSERT(ic_state() == MONOMORPHIC);
return ExtractTypeFromFlags(flags());
}
@ -1403,11 +1569,16 @@ int Code::arguments_count() {
CodeStub::Major Code::major_key() {
// TODO(1238541): Simplify this somewhat complicated encoding.
ASSERT(kind() == STUB);
int low = ExtractStateFromFlags(flags());
int high = ExtractTypeFromFlags(flags());
return static_cast<CodeStub::Major>(high << 3 | low);
return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
kStubMajorKeyOffset));
}
void Code::set_major_key(CodeStub::Major major) {
ASSERT(kind() == STUB);
ASSERT(0 <= major && major < 256);
WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
}
@ -1418,18 +1589,18 @@ bool Code::is_inline_cache_stub() {
Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState state,
InlineCacheState ic_state,
PropertyType type,
int argc) {
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
bits |= state << kFlagsStateShift;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= argc << kFlagsArgumentsCountShift;
// Cast to flags and validate result before returning it.
Flags result = static_cast<Flags>(bits);
ASSERT(ExtractKindFromFlags(result) == kind);
ASSERT(ExtractStateFromFlags(result) == state);
ASSERT(ExtractICStateFromFlags(result) == ic_state);
ASSERT(ExtractTypeFromFlags(result) == type);
ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
return result;
@ -1449,8 +1620,8 @@ Code::Kind Code::ExtractKindFromFlags(Flags flags) {
}
InlineCacheState Code::ExtractStateFromFlags(Flags flags) {
int bits = (flags & kFlagsStateMask) >> kFlagsStateShift;
InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
int bits = (flags & kFlagsICStateMask) >> kFlagsICStateShift;
return static_cast<InlineCacheState>(bits);
}
@ -1761,12 +1932,12 @@ INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
Code::ICTargetState Code::ic_flag() {
return static_cast<ICTargetState>(READ_INT_FIELD(this, kICFlagOffset));
return static_cast<ICTargetState>(READ_BYTE_FIELD(this, kICFlagOffset));
}
void Code::set_ic_flag(ICTargetState value) {
WRITE_INT_FIELD(this, kICFlagOffset, value);
WRITE_BYTE_FIELD(this, kICFlagOffset, value);
}

View File

@ -554,8 +554,8 @@ void String::StringShortPrint(StringStream* accumulator) {
StringInputBuffer buf(this);
bool truncated = false;
if (len > 1024) {
len = 1024;
if (len > kMaxShortPrintLength) {
len = kMaxShortPrintLength;
truncated = true;
}
bool ascii = true;
@ -2324,7 +2324,7 @@ Object* Map::Copy() {
Object* Map::UpdateCodeCache(String* name, Code* code) {
ASSERT(code->state() == MONOMORPHIC);
ASSERT(code->ic_state() == MONOMORPHIC);
FixedArray* cache = code_cache();
// When updating the code cache we disregard the type encoded in the

View File

@ -818,14 +818,150 @@ class Failure: public Object {
};
// Heap objects typically have a map pointer in their first word. However,
// during GC other data (eg, mark bits, forwarding addresses) is sometimes
// encoded in the first word. The class MapWord is an abstraction of the
// value in a heap object's first word.
class MapWord BASE_EMBEDDED {
public:
// Normal state: the map word contains a map pointer.
// Create a map word from a map pointer.
static inline MapWord FromMap(Map* map);
// View this map word as a map pointer.
inline Map* ToMap();
// Scavenge collection: the map word of live objects in the from space
// contains a forwarding address (a heap object pointer in the to space).
// True if this map word is a forwarding address for a scavenge
// collection. Only valid during a scavenge collection (specifically,
// when all map words are heap object pointers, ie. not during a full GC).
inline bool IsForwardingAddress();
// Create a map word from a forwarding address.
static inline MapWord FromForwardingAddress(HeapObject* object);
// View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress();
// Marking phase of full collection: the map word of live objects is
// marked, and may be marked as overflowed (eg, the object is live, its
// children have not been visited, and it does not fit in the marking
// stack).
// True if this map word's mark bit is set.
inline bool IsMarked();
// Return this map word but with its mark bit set.
inline void SetMark();
// Return this map word but with its mark bit cleared.
inline void ClearMark();
// True if this map word's overflow bit is set.
inline bool IsOverflowed();
// Return this map word but with its overflow bit set.
inline void SetOverflow();
// Return this map word but with its overflow bit cleared.
inline void ClearOverflow();
// Compacting phase of a full compacting collection: the map word of live
// objects contains an encoding of the original map address along with the
// forwarding address (represented as an offset from the first live object
// in the same page as the (old) object address).
// Create a map word from a map address and a forwarding address offset.
static inline MapWord EncodeAddress(Address map_address, int offset);
// Return the map address encoded in this map word.
inline Address DecodeMapAddress(MapSpace* map_space);
// Return the forwarding offset encoded in this map word.
inline int DecodeOffset();
// During serialization: the map word is used to hold an encoded
// address, and possibly a mark bit (set and cleared with SetMark
// and ClearMark).
// Create a map word from an encoded address.
static inline MapWord FromEncodedAddress(Address address);
inline Address ToEncodedAddress();
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
explicit MapWord(uintptr_t value) : value_(value) {}
uintptr_t value_;
// Bits used by the marking phase of the garbage collector.
//
// The first word of a heap object is normall a map pointer. The last two
// bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
// mark an object as live and/or overflowed:
// last bit = 0, marked as alive
// second bit = 1, overflowed
// An object is only marked as overflowed when it is marked as live while
// the marking stack is overflowed.
static const int kMarkingBit = 0; // marking bit
static const int kMarkingMask = (1 << kMarkingBit); // marking mask
static const int kOverflowBit = 1; // overflow bit
static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
// Forwarding pointers and map pointer encoding
// 31 21 20 10 9 0
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
// 11 bits 11 bits 10 bits
static const int kMapPageIndexBits = 10;
static const int kMapPageOffsetBits = 11;
static const int kForwardingOffsetBits = 11;
static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift =
kMapPageIndexShift + kMapPageIndexBits;
static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits;
// 0x000003FF
static const uint32_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1;
// 0x001FFC00
static const uint32_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
};
// HeapObject is the superclass for all classes describing heap allocated
// objects.
class HeapObject: public Object {
public:
// [map]: contains a Map which contains the objects reflective information.
// [map]: Contains a map which contains the object's reflective
// information.
inline Map* map();
inline void set_map(Map* value);
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
inline MapWord map_word();
inline void set_map_word(MapWord map_word);
// Converts an address to a HeapObject pointer.
static inline HeapObject* FromAddress(Address address);
@ -857,6 +993,31 @@ class HeapObject: public Object {
// GC internal.
inline int SizeFromMap(Map* map);
// Support for the marking heap objects during the marking phase of GC.
// True if the object is marked live.
inline bool IsMarked();
// Mutate this object's map pointer to indicate that the object is live.
inline void SetMark();
// Mutate this object's map pointer to remove the indication that the
// object is live (ie, partially restore the map pointer).
inline void ClearMark();
// True if this object is marked as overflowed. Overflowed objects have
// been reached and marked during marking of the heap, but their children
// have not necessarily been marked and they have not been pushed on the
// marking stack.
inline bool IsOverflowed();
// Mutate this object's map pointer to indicate that the object is
// overflowed.
inline void SetOverflow();
// Mutate this object's map pointer to remove the indication that the
// object is overflowed (ie, partially restore the map pointer).
inline void ClearOverflow();
static inline Object* GetHeapObjectField(HeapObject* obj, int index);
// Casting.
@ -1875,10 +2036,9 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
inline InlineCacheState state(); // only valid for IC stubs
inline InlineCacheState ic_state(); // only valid for IC stubs
inline PropertyType type(); // only valid for monomorphic IC stubs
inline int arguments_count(); // only valid for call IC stubs
inline CodeStub::Major major_key(); // only valid for kind STUB
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@ -1894,9 +2054,13 @@ class Code: public HeapObject {
inline ICTargetState ic_flag();
inline void set_ic_flag(ICTargetState value);
// [major_key]: For kind STUB, the major key.
inline CodeStub::Major major_key();
inline void set_major_key(CodeStub::Major major);
// Flags operations.
static inline Flags ComputeFlags(Kind kind,
InlineCacheState state = UNINITIALIZED,
InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
int argc = -1);
@ -1905,7 +2069,7 @@ class Code: public HeapObject {
int argc = -1);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheState ExtractStateFromFlags(Flags flags);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@ -1970,16 +2134,20 @@ class Code: public HeapObject {
static const int kRelocationSizeOffset = kInstructionSizeOffset + kIntSize;
static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
static const int kICFlagOffset = kFlagsOffset + kIntSize;
static const int kHeaderSize = kICFlagOffset + kIntSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kHeaderSize = kKindSpecificFlagsOffset + kIntSize;
// Byte offsets within kKindSpecificFlagsOffset.
static const int kICFlagOffset = kKindSpecificFlagsOffset + 0;
static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
// Flags layout.
static const int kFlagsStateShift = 0;
static const int kFlagsICStateShift = 0;
static const int kFlagsKindShift = 3;
static const int kFlagsTypeShift = 6;
static const int kFlagsArgumentsCountShift = 9;
static const int kFlagsStateMask = 0x00000007; // 000000111
static const int kFlagsICStateMask = 0x00000007; // 000000111
static const int kFlagsKindMask = 0x00000038; // 000111000
static const int kFlagsTypeMask = 0x000001C0; // 111000000
static const int kFlagsArgumentsCountMask = 0xFFFFFE00;
@ -2709,6 +2877,9 @@ class String: public HeapObject {
static const int kHashComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
// Limit for truncation in short printing.
static const int kMaxShortPrintLength = 1024;
// Support for regular expressions.
const uc16* GetTwoByteData();
const uc16* GetTwoByteData(unsigned start);

View File

@ -108,18 +108,16 @@ char* OS::LocalTimezone(double time) {
double OS::DaylightSavingsOffset(double time) {
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
return t->tm_isdst ? 3600 * msPerSecond : 0;
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
double OS::LocalTimeOffset() {
// 1199174400 = Jan 1 2008 (UTC).
// Random date where daylight savings time is not in effect.
static const int kJan1st2008 = 1199174400;
time_t tv = static_cast<time_t>(kJan1st2008);
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
ASSERT(t->tm_isdst <= 0);
return static_cast<double>(t->tm_gmtoff * msPerSecond);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
@ -159,7 +157,13 @@ int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
return vsnprintf(str, size, format, args); // forward to linux.
int n = vsnprintf(str, size, format, args); // forward to linux.
if (n < 0 || static_cast<size_t>(n) >= size) {
str[size - 1] = '\0';
return -1;
} else {
return n;
}
}
@ -192,10 +196,12 @@ size_t OS::AllocateAlignment() {
}
void* OS::Allocate(const size_t requested, size_t* allocated) {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
const size_t msize = RoundUp(requested, getpagesize());
void* mbase = mmap(NULL, msize, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
@ -224,6 +230,15 @@ void OS::Abort() {
}
void OS::DebugBreak() {
#if defined (__arm__) || defined(__thumb__)
asm("bkpt 0");
#else
asm("int $3");
#endif
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@ -351,8 +366,9 @@ bool VirtualMemory::IsReserved() {
}
bool VirtualMemory::Commit(void* address, size_t size) {
if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
@ -516,40 +532,24 @@ class LinuxSemaphore : public Semaphore {
explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
virtual void Wait() { sem_wait(&sem_); }
virtual void Wait();
virtual void Signal() { sem_post(&sem_); }
private:
sem_t sem_;
};
void LinuxSemaphore::Wait() {
while (true) {
int result = sem_wait(&sem_);
if (result == 0) return; // Successfully got semaphore.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
Semaphore* OS::CreateSemaphore(int count) {
return new LinuxSemaphore(count);
}
// TODO(1233584): Implement Linux support.
Select::Select(int len, Semaphore** sems) {
FATAL("Not implemented");
}
Select::~Select() {
FATAL("Not implemented");
}
int Select::WaitSingle() {
FATAL("Not implemented");
return 0;
}
void Select::WaitAll() {
FATAL("Not implemented");
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static ProfileSampler* active_sampler_ = NULL;

View File

@ -114,18 +114,16 @@ char* OS::LocalTimezone(double time) {
double OS::DaylightSavingsOffset(double time) {
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
return t->tm_isdst ? 3600 * msPerSecond : 0;
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
double OS::LocalTimeOffset() {
// 1199174400 = Jan 1 2008 (UTC).
// Random date where daylight savings time is not in effect.
static const int kJan1st2008 = 1199174400;
time_t tv = static_cast<time_t>(kJan1st2008);
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
ASSERT(t->tm_isdst <= 0);
return static_cast<double>(t->tm_gmtoff * msPerSecond);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
@ -165,7 +163,13 @@ int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
return vsnprintf(str, size, format, args); // forward to Mac OS X.
int n = vsnprintf(str, size, format, args); // forward to Mac OS X.
if (n < 0 || static_cast<size_t>(n) >= size) {
str[size - 1] = '\0';
return -1;
} else {
return n;
}
}
@ -192,21 +196,29 @@ bool OS::IsOutsideAllocatedSpace(void* address) {
size_t OS::AllocateAlignment() {
return kPointerSize;
return getpagesize();
}
void* OS::Allocate(const size_t requested, size_t* allocated) {
*allocated = requested;
void* mbase = malloc(requested);
UpdateAllocatedSpaceLimits(mbase, requested);
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
void OS::Free(void* buf, const size_t length) {
free(buf);
USE(length);
// TODO(1240712): munmap has a return value which is ignored here.
munmap(buf, length);
}
@ -221,6 +233,11 @@ void OS::Abort() {
}
void OS::DebugBreak() {
asm("int $3");
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@ -315,8 +332,9 @@ bool VirtualMemory::IsReserved() {
}
bool VirtualMemory::Commit(void* address, size_t size) {
if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
@ -479,6 +497,9 @@ class MacOSSemaphore : public Semaphore {
semaphore_destroy(mach_task_self(), semaphore_);
}
// The MacOS mach semaphore documentation claims it does not have spurious
// wakeups, the way pthreads semaphores do. So the code from the linux
// platform is not needed here.
void Wait() { semaphore_wait(semaphore_); }
void Signal() { semaphore_signal(semaphore_); }
@ -492,28 +513,6 @@ Semaphore* OS::CreateSemaphore(int count) {
return new MacOSSemaphore(count);
}
// TODO(1233584): Implement MacOS support.
Select::Select(int len, Semaphore** sems) {
FATAL("Not implemented");
}
Select::~Select() {
FATAL("Not implemented");
}
int Select::WaitSingle() {
FATAL("Not implemented");
return 0;
}
void Select::WaitAll() {
FATAL("Not implemented");
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static ProfileSampler* active_sampler_ = NULL;

View File

@ -537,12 +537,13 @@ char* OS::LocalTimezone(double time) {
}
// Returns the local time offset in milliseconds east of UTC.
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
double OS::LocalTimeOffset() {
// 1199174400 = Jan 1 2008 (UTC).
// Random date where daylight savings time is not in effect.
int64_t offset = Time(1199174400).LocalOffset();
return static_cast<double>(offset);
// Use current time, rounded to the millisecond.
Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
}
@ -653,8 +654,12 @@ int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
int n = _vsnprintf(str, size, format, args);
// Make sure to zero-terminate the string if the output was
// truncated or if there was an error.
if (n < 0 || static_cast<size_t>(n) >= size) str[size - 1] = '\0';
return n;
if (n < 0 || static_cast<size_t>(n) >= size) {
str[size - 1] = '\0';
return -1;
} else {
return n;
}
}
@ -704,13 +709,15 @@ size_t OS::AllocateAlignment() {
}
void* OS::Allocate(const size_t requested, size_t* allocated) {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, GetPageSize());
// Windows XP SP2 allows Data Excution Prevention (DEP).
LPVOID mbase = VirtualAlloc(NULL, requested, MEM_COMMIT | MEM_RESERVE,
PAGE_EXECUTE_READWRITE);
int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
LPVOID mbase = VirtualAlloc(NULL, requested, MEM_COMMIT | MEM_RESERVE, prot);
if (mbase == NULL) {
LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
@ -743,6 +750,11 @@ void OS::Abort() {
}
void OS::DebugBreak() {
__debugbreak();
}
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory)
@ -1153,7 +1165,7 @@ bool VirtualMemory::IsReserved() {
VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
address_ =
VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_EXECUTE_READWRITE);
VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_NOACCESS);
size_ = size;
}
@ -1165,8 +1177,9 @@ VirtualMemory::~VirtualMemory() {
}
bool VirtualMemory::Commit(void* address, size_t size) {
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE)) {
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
@ -1359,46 +1372,6 @@ Mutex* OS::CreateMutex() {
}
// ----------------------------------------------------------------------------
// Win32 select support.
//
// On Win32 the function WaitForMultipleObjects can be used to wait
// for all kind of synchronization handles. Currently the
// implementation only suports the fixed Select::MaxSelectSize maximum
// number of handles
class Select::PlatformData : public Malloced {
public:
PlatformData(int len, Semaphore** sems);
int len_;
HANDLE objs_[Select::MaxSelectSize];
};
Select::Select(int len, Semaphore** sems) {
data_ = new PlatformData(len, sems);
}
Select::~Select() {
delete data_;
}
int Select::WaitSingle() {
return WaitForMultipleObjects(data_->len_,
data_->objs_,
FALSE,
INFINITE) - WAIT_OBJECT_0;
}
void Select::WaitAll() {
WaitForMultipleObjects(data_->len_, data_->objs_, TRUE, INFINITE);
}
// ----------------------------------------------------------------------------
// Win32 semaphore support.
//
@ -1428,7 +1401,6 @@ class Win32Semaphore : public Semaphore {
private:
HANDLE sem;
friend class Select::PlatformData;
};
@ -1436,16 +1408,6 @@ Semaphore* OS::CreateSemaphore(int count) {
return new Win32Semaphore(count);
}
Select::PlatformData::PlatformData(int len, Semaphore** sems) : len_(len) {
ASSERT(len_ < Select::MaxSelectSize);
for (int i = 0; i < len_; i++) {
objs_[i] = reinterpret_cast<Win32Semaphore*>(sems[i])->sem;
}
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------

View File

@ -144,10 +144,12 @@ class OS {
static void PrintError(const char* format, ...);
static void VPrintError(const char* format, va_list args);
// Allocate/Free memory used by JS heap.
// Pages are readable/writeable/executable by default.
// Allocate/Free memory used by JS heap. Pages are readable/writeable, but
// they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or NULL if failed.
static void* Allocate(const size_t requested, size_t* allocated);
static void* Allocate(const size_t requested,
size_t* allocated,
bool executable);
static void Free(void* buf, const size_t length);
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
@ -165,6 +167,9 @@ class OS {
// Abort the current process.
static void Abort();
// Debug break.
static void DebugBreak();
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
@ -191,7 +196,8 @@ class OS {
virtual void* memory() = 0;
};
// Safe formatting print.
// Safe formatting print. Ensures that str is always null-terminated.
// Returns the number of chars written, or -1 if output was truncated.
static int SNPrintF(char* str, size_t size, const char* format, ...);
static int VSNPrintF(char* str,
size_t size,
@ -232,7 +238,7 @@ class VirtualMemory {
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size);
bool Commit(void* address, size_t size, bool executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
@ -341,19 +347,22 @@ class Mutex {
// ----------------------------------------------------------------------------
// Guard
// ScopedLock
//
// Stack-allocated Guards provide block-scoped locking and unlocking
// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
// of a mutex.
class Guard {
class ScopedLock {
public:
explicit Guard(Mutex* mux): mux_(mux) { mux_->Lock(); }
~Guard() { mux_->Unlock(); }
explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
mutex_->Lock();
}
~ScopedLock() {
mutex_->Unlock();
}
private:
Mutex* mux_;
DISALLOW_EVIL_CONSTRUCTORS(Guard);
Mutex* mutex_;
DISALLOW_EVIL_CONSTRUCTORS(ScopedLock);
};
@ -379,26 +388,6 @@ class Semaphore {
};
// ----------------------------------------------------------------------------
// Select
//
// A selector makes it possible to wait for several synchronization objects
class Select {
public:
Select(int len, Semaphore** sems);
~Select();
int WaitSingle();
void WaitAll();
static const int MaxSelectSize = 32;
class PlatformData;
private:
PlatformData* data_; // Platform specific data.
DISALLOW_EVIL_CONSTRUCTORS(Select);
};
#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------
// ProfileSampler

View File

@ -806,6 +806,10 @@ static Object* Runtime_SetCode(Arguments args) {
target->shared()->set_length(fun->shared()->length());
target->shared()->set_formal_parameter_count(
fun->shared()->formal_parameter_count());
// Set the source code of the target function.
target->shared()->set_script(fun->shared()->script());
target->shared()->set_start_position(fun->shared()->start_position());
target->shared()->set_end_position(fun->shared()->end_position());
context = Handle<Context>(fun->context());
// Make sure we get a fresh copy of the literal vector to avoid
@ -2730,31 +2734,31 @@ static Object* Runtime_GetFunctionDelegate(Arguments args) {
static Object* Runtime_NewContext(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[1]);
CONVERT_CHECKED(JSFunction, function, args[0]);
int length = ScopeInfo<>::NumberOfContextSlots(function->code());
Object* result = Heap::AllocateFunctionContext(length, function);
if (result->IsFailure()) return result;
Top::set_context(Context::cast(result));
return args[0]; // return TOS
return result; // non-failure
}
static Object* Runtime_PushContext(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
ASSERT(args.length() == 1);
// Convert the object to a proper JavaScript object.
Object* object = args[1];
Object* object = args[0];
if (!object->IsJSObject()) {
object = object->ToObject();
if (object->IsFailure()) {
if (!Failure::cast(object)->IsInternalError()) return object;
HandleScope scope;
Handle<Object> handle(args[1]);
Handle<Object> handle(args[0]);
Handle<Object> result =
Factory::NewTypeError("with_expression", HandleVector(&handle, 1));
return Top::Throw(*result);
@ -2767,7 +2771,7 @@ static Object* Runtime_PushContext(Arguments args) {
Top::set_context(Context::cast(result));
return args[0]; // return TOS
return result;
}
@ -2993,9 +2997,9 @@ static Object* Runtime_DebugBreak(Arguments args) {
return args[0];
}
// Don't break in system functions. If the current function is either in the
// builtins object of some context or is in the debug context just return with
// the debug break stack guard active.
// Don't break in system functions. If the current function is
// either in the builtins object of some context or is in the debug
// context just return with the debug break stack guard active.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
Object* fun = frame->function();
@ -3013,13 +3017,8 @@ static Object* Runtime_DebugBreak(Arguments args) {
SaveBreakFrame save;
EnterDebuggerContext enter;
// Process debug requests. Returns true if break request.
bool break_request = Debugger::ProcessPendingRequests();
// Notify the debug event listeners if break request.
if (break_request) {
Debugger::OnDebugBreak(Factory::undefined_value());
}
// Notify the debug event listeners.
Debugger::OnDebugBreak(Factory::undefined_value());
// Return to continue execution.
return args[0];

View File

@ -256,8 +256,8 @@ namespace v8 { namespace internal {
F(StackGuard, 1) \
\
/* Contexts */ \
F(NewContext, 2) \
F(PushContext, 2) \
F(NewContext, 1) \
F(PushContext, 1) \
F(LookupContext, 2) \
F(LoadContextSlot, 2) \
F(LoadContextSlotNoReferenceError, 2) \

View File

@ -507,10 +507,6 @@ ExternalReferenceTable::ExternalReferenceTable() : refs_(64) {
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
Add(FUNCTION_ADDR(StackFrameIterator::RestoreCalleeSavedForTopHandler),
RUNTIME_ENTRY,
2,
"StackFrameIterator::RestoreCalleeSavedForTopHandler");
// Miscellaneous
Add(ExternalReference::builtin_passed_function().address(),
@ -670,6 +666,8 @@ class SnapshotWriter {
int length() { return len_; }
Address position() { return reinterpret_cast<Address>(&str_[len_]); }
private:
char* str_; // the snapshot
int len_; // the curent length of str_
@ -704,8 +702,73 @@ int SnapshotWriter::InsertString(const char* s, int pos) {
}
class ReferenceUpdater: public ObjectVisitor {
public:
ReferenceUpdater(HeapObject* obj, Serializer* serializer)
: obj_address_(obj->address()),
serializer_(serializer),
reference_encoder_(serializer->reference_encoder_),
offsets_(8),
addresses_(8) {
}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; ++p) {
if ((*p)->IsHeapObject()) {
offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
addresses_.Add(a);
}
}
}
virtual void VisitExternalReferences(Address* start, Address* end) {
for (Address* p = start; p < end; ++p) {
uint32_t code = reference_encoder_->Encode(*p);
CHECK(*p == NULL ? code == 0 : code != 0);
offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
addresses_.Add(reinterpret_cast<Address>(code));
}
}
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
Address target = rinfo->target_address();
uint32_t encoding = reference_encoder_->Encode(target);
CHECK(target == NULL ? encoding == 0 : encoding != 0);
offsets_.Add(reinterpret_cast<Address>(rinfo->pc()) - obj_address_);
addresses_.Add(reinterpret_cast<Address>(encoding));
}
void Update(Address start_address) {
for (int i = 0; i < offsets_.length(); i++) {
Address* p = reinterpret_cast<Address*>(start_address + offsets_[i]);
*p = addresses_[i];
}
}
private:
Address obj_address_;
Serializer* serializer_;
ExternalReferenceEncoder* reference_encoder_;
List<int> offsets_;
List<Address> addresses_;
};
// Helper functions for a map of encoded heap object addresses.
static uint32_t HeapObjectHash(HeapObject* key) {
return reinterpret_cast<uint32_t>(key) >> 2;
}
static bool MatchHeapObject(void* key1, void* key2) {
return key1 == key2;
}
Serializer::Serializer()
: global_handles_(4) {
: global_handles_(4),
saved_addresses_(MatchHeapObject) {
root_ = true;
roots_ = 0;
objects_ = 0;
@ -751,6 +814,28 @@ void Serializer::InitializeAllocators() {
}
bool Serializer::IsVisited(HeapObject *obj) {
HashMap::Entry* entry =
saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
return entry != NULL;
}
Address Serializer::GetSavedAddress(HeapObject *obj) {
HashMap::Entry* entry
= saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
ASSERT(entry != NULL);
return reinterpret_cast<Address>(entry->value);
}
void Serializer::SaveAddress(HeapObject* obj, Address addr) {
HashMap::Entry* entry =
saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
entry->value = addr;
}
void Serializer::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
@ -776,48 +861,25 @@ void Serializer::Finalize(char** str, int* len) {
}
// Serialize roots by writing them into the stream. Serialize pointers
// in HeapObjects by changing them to the encoded address where the
// object will be allocated on deserialization
// Serialize objects by writing them into the stream.
void Serializer::VisitPointers(Object** start, Object** end) {
bool root = root_;
root_ = false;
for (Object** p = start; p < end; ++p) {
bool serialized;
Address a = Encode(*p, &serialized);
if (root) {
roots_++;
Address a = Encode(*p, &serialized);
// If the object was not just serialized,
// write its encoded address instead.
if (!serialized) PutEncodedAddress(a);
} else {
// Rewrite the pointer in the HeapObject.
*p = reinterpret_cast<Object*>(Encode(*p, &serialized));
}
}
root_ = root;
}
void Serializer::VisitExternalReferences(Address* start, Address* end) {
for (Address* p = start; p < end; ++p) {
uint32_t code = reference_encoder_->Encode(*p);
CHECK(*p == NULL ? code == 0 : code != 0);
*p = reinterpret_cast<Address>(code);
}
}
void Serializer::VisitRuntimeEntry(RelocInfo* rinfo) {
Address target = rinfo->target_address();
uint32_t encoding = reference_encoder_->Encode(target);
CHECK(target == NULL ? encoding == 0 : encoding != 0);
uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->pc());
*pc = encoding;
}
class GlobalHandlesRetriever: public ObjectVisitor {
public:
explicit GlobalHandlesRetriever(List<Object**>* handles)
@ -858,12 +920,13 @@ void Serializer::PutHeader() {
#else
writer_->PutC('0');
#endif
// Write sizes of paged memory spaces.
// Write sizes of paged memory spaces. Allocate extra space for the old
// and code spaces, because objects in new space will be promoted to them.
writer_->PutC('S');
writer_->PutC('[');
writer_->PutInt(Heap::old_space()->Size());
writer_->PutInt(Heap::old_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::code_space()->Size());
writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::map_space()->Size());
writer_->PutC(']');
@ -927,6 +990,9 @@ void Serializer::PutContextStack() {
HandleScopeImplementer::instance()->RestoreContext();
contexts.Add(context);
}
for (int i = contexts.length() - 1; i >= 0; i--) {
HandleScopeImplementer::instance()->SaveContext(contexts[i]);
}
PutGlobalHandleStack(contexts);
List<Handle<Object> > security_contexts(2);
@ -935,6 +1001,10 @@ void Serializer::PutContextStack() {
HandleScopeImplementer::instance()->RestoreSecurityContext();
security_contexts.Add(context);
}
for (int i = security_contexts.length() - 1; i >= 0; i--) {
Handle<Object> context = security_contexts[i];
HandleScopeImplementer::instance()->SaveSecurityContext(context);
}
PutGlobalHandleStack(security_contexts);
}
@ -951,10 +1021,8 @@ Address Serializer::Encode(Object* o, bool* serialized) {
return reinterpret_cast<Address>(o);
} else {
HeapObject* obj = HeapObject::cast(o);
if (is_marked(obj)) {
// Already serialized: encoded address is in map.
intptr_t map_word = reinterpret_cast<intptr_t>(obj->map());
return reinterpret_cast<Address>(clear_mark_bit(map_word));
if (IsVisited(obj)) {
return GetSavedAddress(obj);
} else {
// First visit: serialize the object.
*serialized = true;
@ -973,6 +1041,8 @@ Address Serializer::PutObject(HeapObject* obj) {
// allocated during deserialization.
Address addr = Allocate(obj).Encode();
SaveAddress(obj, addr);
if (type == CODE_TYPE) {
Code* code = Code::cast(obj);
// Ensure Code objects contain Object pointers, not Addresses.
@ -980,12 +1050,6 @@ Address Serializer::PutObject(HeapObject* obj) {
LOG(CodeMoveEvent(code->address(), addr));
}
// Put the encoded address in the map() of the object, and mark the
// object. Do this to break recursion before visiting any pointers
// in the object.
obj->set_map(reinterpret_cast<Map*>(addr));
set_mark(obj);
// Write out the object prologue: type, size, and simulated address of obj.
writer_->PutC('[');
CHECK_EQ(0, size & kObjectAlignmentMask);
@ -1000,9 +1064,7 @@ Address Serializer::PutObject(HeapObject* obj) {
Address map_addr = Encode(map, &serialized);
// Visit all the pointers in the object other than the map. This
// will rewrite these pointers in place in the body of the object
// with their encoded RelativeAddresses, and recursively serialize
// any as-yet-unvisited objects.
// will recursively serialize any as-yet-unvisited objects.
obj->IterateBody(type, size, this);
// Mark end of recursively embedded objects, start of object body.
@ -1013,8 +1075,11 @@ Address Serializer::PutObject(HeapObject* obj) {
// Write out the raw contents of the object following the map
// pointer containing the now-updated pointers. No compression, but
// fast to deserialize.
ReferenceUpdater updater(obj, this);
obj->IterateBody(type, size, &updater);
writer_->PutBytes(obj->address() + HeapObject::kSize,
size - HeapObject::kSize);
updater.Update(writer_->position() - size);
#ifdef DEBUG
if (FLAG_debug_serialization) {
@ -1024,6 +1089,12 @@ Address Serializer::PutObject(HeapObject* obj) {
}
#endif
if (type == CODE_TYPE) {
Code* code = Code::cast(obj);
// Convert relocations from Object* to Address in Code objects
code->ConvertICTargetsFromObjectToAddress();
}
objects_++;
return addr;
}
@ -1038,6 +1109,9 @@ RelativeAddress Serializer::Allocate(HeapObject* obj) {
found = Heap::InSpace(obj, s);
}
CHECK(found);
if (s == NEW_SPACE) {
s = Heap::TargetSpace(obj);
}
int size = obj->Size();
return allocator_[s]->Allocate(size);
}
@ -1100,11 +1174,9 @@ void Deserializer::Deserialize() {
reference_decoder_ = new ExternalReferenceDecoder();
// By setting linear allocation only, we forbid the use of free list
// allocation which is not predicted by SimulatedAddress.
Heap::SetLinearAllocationOnly(true);
GetHeader();
Heap::IterateRoots(this);
GetContextStack();
Heap::SetLinearAllocationOnly(false);
Heap::RebuildRSets();
}
@ -1328,10 +1400,6 @@ static inline Object* ResolvePaged(int page_index,
int page_offset,
PagedSpace* space,
List<Page*>* page_list) {
#ifdef DEBUG
space->CheckLinearAllocationOnly();
#endif
ASSERT(page_index < page_list->length());
Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
return HeapObject::FromAddress(address);

View File

@ -120,6 +120,7 @@ class ExternalReferenceDecoder {
class RelativeAddress;
class SimulatedHeapSpace;
class SnapshotWriter;
class ReferenceUpdater;
class Serializer: public ObjectVisitor {
@ -149,9 +150,15 @@ class Serializer: public ObjectVisitor {
static void disable() { serialization_enabled_ = false; }
private:
friend class ReferenceUpdater;
virtual void VisitPointers(Object** start, Object** end);
virtual void VisitExternalReferences(Address* start, Address* end);
virtual void VisitRuntimeEntry(RelocInfo* rinfo);
bool IsVisited(HeapObject *obj);
Address GetSavedAddress(HeapObject *obj);
void SaveAddress(HeapObject* obj, Address addr);
void PutEncodedAddress(Address addr);
// Write the global flags into the file.
@ -195,6 +202,8 @@ class Serializer: public ObjectVisitor {
ExternalReferenceEncoder* reference_encoder_;
HashMap saved_addresses_;
DISALLOW_EVIL_CONSTRUCTORS(Serializer);
};
@ -248,7 +257,6 @@ class SnapshotReader {
class Deserializer: public ObjectVisitor {
public:
// Create a deserializer. The snapshot is held in str and has size len.
// Ownership of str is not assumed by the Deserializer.
Deserializer(const char* str, int len);
virtual ~Deserializer();

View File

@ -93,19 +93,19 @@ void Debugger::Stop(Instr* instr) {
}
static char* reg_names[] = { "r0", "r1", "r2", "r3",
"r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11",
"r12", "r13", "r14", "r15",
"pc", "lr", "sp", "ip",
"fp", "sl", ""};
static const char* reg_names[] = { "r0", "r1", "r2", "r3",
"r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11",
"r12", "r13", "r14", "r15",
"pc", "lr", "sp", "ip",
"fp", "sl", ""};
static int reg_nums[] = { 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
15, 14, 13, 12,
11, 10};
static int reg_nums[] = { 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
15, 14, 13, 12,
11, 10};
static int RegNameToRegNum(char* name) {
@ -287,7 +287,7 @@ void Debugger::Debug() {
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
asm("int $3");
v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (args == 2) {
@ -665,7 +665,7 @@ int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
static int count_bits(int bit_vector) {
int count = 0;
while (bit_vector != 0) {
if (bit_vector & 1 != 0) {
if ((bit_vector & 1) != 0) {
count++;
}
bit_vector >>= 1;
@ -1420,9 +1420,53 @@ Object* Simulator::call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
// the LR the simulation stops when returning to this call point.
set_register(lr, end_sim_pc);
// Remember the values of callee-saved registers.
// The code below assumes that r9 is not used as sb (static base) in
// simulator code and therefore is regarded as a callee-saved register.
int32_t r4_val = get_register(r4);
int32_t r5_val = get_register(r5);
int32_t r6_val = get_register(r6);
int32_t r7_val = get_register(r7);
int32_t r8_val = get_register(r8);
int32_t r9_val = get_register(r9);
int32_t r10_val = get_register(r10);
int32_t r11_val = get_register(r11);
// Setup the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
int32_t callee_saved_value = icount_;
set_register(r4, callee_saved_value);
set_register(r5, callee_saved_value);
set_register(r6, callee_saved_value);
set_register(r7, callee_saved_value);
set_register(r8, callee_saved_value);
set_register(r9, callee_saved_value);
set_register(r10, callee_saved_value);
set_register(r11, callee_saved_value);
// Start the simulation
execute();
// Check that the callee-saved registers have been preserved.
CHECK_EQ(get_register(r4), callee_saved_value);
CHECK_EQ(get_register(r5), callee_saved_value);
CHECK_EQ(get_register(r6), callee_saved_value);
CHECK_EQ(get_register(r7), callee_saved_value);
CHECK_EQ(get_register(r8), callee_saved_value);
CHECK_EQ(get_register(r9), callee_saved_value);
CHECK_EQ(get_register(r10), callee_saved_value);
CHECK_EQ(get_register(r11), callee_saved_value);
// Restore callee-saved registers with the original value.
set_register(r4, r4_val);
set_register(r5, r5_val);
set_register(r6, r6_val);
set_register(r7, r7_val);
set_register(r8, r8_val);
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
int result = get_register(r0);
return reinterpret_cast<Object*>(result);
}

View File

@ -38,7 +38,10 @@
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
// calculates the stack limit based on that value.
// NOTE: The check for overflow is not safe as there is no guarentee that the
// running thread has its stack in all memory up to address 0x00000000.
#define GENERATED_CODE_STACK_LIMIT(limit) \
(reinterpret_cast<uintptr_t>(this) - limit)
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
#endif // V8_SIMULATOR_IA32_H_

View File

@ -226,8 +226,8 @@ PagedSpace* MemoryAllocator::PageOwner(Page* page) {
}
// -----------------------------------------------------------------------------
// Space
// --------------------------------------------------------------------------
// PagedSpace
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
@ -237,6 +237,69 @@ bool PagedSpace::Contains(Address addr) {
}
// Try linear allocation in the page of alloc_info's allocation top. Does
// not contain slow case logic (eg, move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
int size_in_bytes) {
Address current_top = alloc_info->top;
Address new_top = current_top + size_in_bytes;
if (new_top > alloc_info->limit) return NULL;
alloc_info->top = new_top;
ASSERT(alloc_info->VerifyPagedAllocation());
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
Object* PagedSpace::AllocateRaw(int size_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT_OBJECT_SIZE(size_in_bytes);
HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
if (object != NULL) return object;
object = SlowAllocateRaw(size_in_bytes);
if (object != NULL) return object;
return Failure::RetryAfterGC(size_in_bytes, identity());
}
// Reallocating (and promoting) objects during a compacting collection.
Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT_OBJECT_SIZE(size_in_bytes);
HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
if (object != NULL) return object;
object = SlowMCAllocateRaw(size_in_bytes);
if (object != NULL) return object;
return Failure::RetryAfterGC(size_in_bytes, identity());
}
// Allocating during deserialization. Always roll to the next page in the
// space, which should be suitably expanded.
Object* PagedSpace::AllocateForDeserialization(int size_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT_OBJECT_SIZE(size_in_bytes);
HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
if (object != NULL) return object;
// The space should be pre-expanded.
Page* current_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(current_page->next_page()->is_valid());
object = AllocateInNextPage(current_page, size_in_bytes);
ASSERT(object != NULL);
return object;
}
// -----------------------------------------------------------------------------
// LargeObjectChunk
@ -263,7 +326,7 @@ Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;
if (new_top > alloc_info->limit) {
return Failure::RetryAfterGC(size_in_bytes, NEW_SPACE);
return Failure::RetryAfterGC(size_in_bytes, identity());
}
Object* obj = HeapObject::FromAddress(alloc_info->top);

View File

@ -44,14 +44,6 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DECLARE_bool(log_gc);
#endif
// For paged spaces, top and limit should always be in the same page and top
// should not be greater than limit.
#define ASSERT_PAGED_ALLOCATION_INFO(info) \
ASSERT((Page::FromAllocationTop((info).top) == \
Page::FromAllocationTop((info).limit)) \
&&((info).top <= (info).limit))
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
@ -59,70 +51,6 @@ DECLARE_bool(log_gc);
&& (info).top <= (space)->high() \
&& (info).limit == (space)->high())
// ----------------------------------------------------------------------------
// SpaceIterator
SpaceIterator::SpaceIterator() : current_space_(NEW_SPACE), iterator_(NULL) {
// SpaceIterator depends on AllocationSpace enumeration starts with NEW_SPACE.
ASSERT(NEW_SPACE == 0);
}
SpaceIterator::~SpaceIterator() {
// Delete active iterator if any.
if (iterator_ != NULL) delete iterator_;
}
bool SpaceIterator::has_next() {
// Iterate until no more spaces.
return current_space_ != LAST_SPACE;
}
ObjectIterator* SpaceIterator::next() {
if (iterator_ != NULL) {
delete iterator_;
iterator_ = NULL;
// Move to the next space
current_space_++;
if (current_space_ > LAST_SPACE) {
return NULL;
}
}
// Return iterator for the new current space.
return CreateIterator();
}
// Create an iterator for the space to iterate.
ObjectIterator* SpaceIterator::CreateIterator() {
ASSERT(iterator_ == NULL);
switch (current_space_) {
case NEW_SPACE:
iterator_ = new SemiSpaceIterator(Heap::new_space());
break;
case OLD_SPACE:
iterator_ = new HeapObjectIterator(Heap::old_space());
break;
case CODE_SPACE:
iterator_ = new HeapObjectIterator(Heap::code_space());
break;
case MAP_SPACE:
iterator_ = new HeapObjectIterator(Heap::map_space());
break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(Heap::lo_space());
break;
}
// Return the newly allocated iterator;
ASSERT(iterator_ != NULL);
return iterator_;
}
// ----------------------------------------------------------------------------
// HeapObjectIterator
@ -260,9 +188,9 @@ bool MemoryAllocator::Setup(int capacity) {
// Due to alignment, allocated space might be one page less than required
// number (kPagesPerChunk) of pages for old spaces.
//
// Reserve two chunk ids for semispaces, one for map space and one for old
// space.
max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 4;
// Reserve two chunk ids for semispaces, one for map space, one for old
// space, and one for code space.
max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0;
@ -298,10 +226,11 @@ void MemoryAllocator::TearDown() {
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated) {
size_t* allocated,
bool executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
void* mem = OS::Allocate(requested, allocated);
void* mem = OS::Allocate(requested, allocated, executable);
int alloced = *allocated;
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
@ -360,8 +289,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
if (requested_pages <= 0) return Page::FromAddress(NULL);
}
void* chunk = AllocateRawMemory(chunk_size, &chunk_size);
void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
if (chunk == NULL) return Page::FromAddress(NULL);
LOG(NewEvent("PagedChunk", chunk, chunk_size));
@ -388,8 +316,7 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
ASSERT(initial_chunk_->address() <= start);
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
if (!initial_chunk_->Commit(start, size)) {
if (!initial_chunk_->Commit(start, size, owner->executable())) {
return Page::FromAddress(NULL);
}
Counters::memory_allocated.Increment(size);
@ -403,7 +330,9 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
}
bool MemoryAllocator::CommitBlock(Address start, size_t size) {
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
bool executable) {
ASSERT(start != NULL);
ASSERT(size > 0);
ASSERT(initial_chunk_ != NULL);
@ -411,7 +340,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) {
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
if (!initial_chunk_->Commit(start, size)) return false;
if (!initial_chunk_->Commit(start, size, executable)) return false;
Counters::memory_allocated.Increment(size);
return true;
}
@ -545,15 +474,12 @@ void MemoryAllocator::ReportStatistics() {
// -----------------------------------------------------------------------------
// PagedSpace implementation
PagedSpace::PagedSpace(int max_capacity, AllocationSpace id) {
ASSERT(id == OLD_SPACE || id == CODE_SPACE || id == MAP_SPACE);
PagedSpace::PagedSpace(int max_capacity, AllocationSpace id, bool executable)
: Space(id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
identity_ = id;
accounting_stats_.Clear();
allocation_mode_ = LINEAR;
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
@ -627,6 +553,7 @@ Object* PagedSpace::FindObject(Address addr) {
if (!Contains(addr)) return Failure::Exception();
Page* p = Page::FromAddress(addr);
ASSERT(IsUsed(p));
Address cur = p->ObjectAreaStart();
Address end = p->AllocationTop();
while (cur < end) {
@ -636,14 +563,24 @@ Object* PagedSpace::FindObject(Address addr) {
cur = next;
}
UNREACHABLE();
return Failure::Exception();
}
bool PagedSpace::IsUsed(Page* page) {
PageIterator it(this, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
if (page == it.next()) return true;
}
return false;
}
void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
alloc_info->top = p->ObjectAreaStart();
alloc_info->limit = p->ObjectAreaEnd();
ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
ASSERT(alloc_info->VerifyPagedAllocation());
}
@ -664,19 +601,6 @@ void PagedSpace::MCResetRelocationInfo() {
}
void PagedSpace::SetLinearAllocationOnly(bool linear_only) {
if (linear_only) {
// Note that the free_list is not cleared. If we switch back to
// FREE_LIST mode it will be available for use. Resetting it
// requires correct accounting for the wasted bytes.
allocation_mode_ = LINEAR_ONLY;
} else {
ASSERT(allocation_mode_ == LINEAR_ONLY);
allocation_mode_ = LINEAR;
}
}
int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
#ifdef DEBUG
// The Contains function considers the address at the beginning of a
@ -698,6 +622,33 @@ int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
}
// Slow case for reallocating and promoting objects during a compacting
// collection. This function is not space-specific.
HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
Page* current_page = TopPageOf(mc_forwarding_info_);
if (!current_page->next_page()->is_valid()) {
if (!Expand(current_page)) {
return NULL;
}
}
// There are surely more pages in the space now.
ASSERT(current_page->next_page()->is_valid());
// We do not add the top of page block for current page to the space's
// free list---the block may contain live objects so we cannot write
// bookkeeping information to it. Instead, we will recover top of page
// blocks when we move objects to their new locations.
//
// We do however write the allocation pointer to the page. The encoding
// of forwarding addresses is as an offset in terms of live bytes, so we
// need quick access to the allocation top of each page to decode
// forwarding addresses.
current_page->mc_relocation_top = mc_forwarding_info_.top;
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
}
bool PagedSpace::Expand(Page* last_page) {
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
@ -754,7 +705,7 @@ void PagedSpace::Shrink() {
Page* current_page = top_page->next_page();
// Loop over the pages to the end of the space.
while (current_page->is_valid()) {
// Keep every odd-numbered page, one page for every two in the space.
// Advance last_page_to_keep every other step to end up at the midpoint.
if ((free_pages & 0x1) == 1) {
pages_to_keep++;
last_page_to_keep = last_page_to_keep->next_page();
@ -816,13 +767,16 @@ void PagedSpace::Print() { }
// NewSpace implementation
NewSpace::NewSpace(int initial_semispace_capacity,
int maximum_semispace_capacity) {
int maximum_semispace_capacity,
AllocationSpace id,
bool executable)
: Space(id, executable) {
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
maximum_capacity_ = maximum_semispace_capacity;
capacity_ = initial_semispace_capacity;
to_space_ = new SemiSpace(capacity_, maximum_capacity_);
from_space_ = new SemiSpace(capacity_, maximum_capacity_);
to_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
from_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
// Allocate and setup the histogram arrays if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@ -984,15 +938,20 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
SemiSpace::SemiSpace(int initial_capacity, int maximum_capacity)
: capacity_(initial_capacity), maximum_capacity_(maximum_capacity),
start_(NULL), age_mark_(NULL) {
SemiSpace::SemiSpace(int initial_capacity,
int maximum_capacity,
AllocationSpace id,
bool executable)
: Space(id, executable), capacity_(initial_capacity),
maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {
}
bool SemiSpace::Setup(Address start, int size) {
ASSERT(size == maximum_capacity_);
if (!MemoryAllocator::CommitBlock(start, capacity_)) return false;
if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
return false;
}
start_ = start;
address_mask_ = ~(size - 1);
@ -1011,7 +970,9 @@ void SemiSpace::TearDown() {
bool SemiSpace::Double() {
if (!MemoryAllocator::CommitBlock(high(), capacity_)) return false;
if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) {
return false;
}
capacity_ *= 2;
return true;
}
@ -1279,17 +1240,20 @@ void FreeListNode::set_size(int size_in_bytes) {
} else {
UNREACHABLE();
}
ASSERT(Size() == size_in_bytes);
}
Address FreeListNode::next() {
ASSERT(map() == Heap::byte_array_map());
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
}
void FreeListNode::set_next(Address next) {
ASSERT(map() == Heap::byte_array_map());
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
}
@ -1378,6 +1342,7 @@ Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
int rem = cur - index;
int rem_bytes = rem << kPointerSizeLog2;
FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
size_in_bytes);
// Distinguish the cases prev < rem < cur and rem <= prev < cur
@ -1421,7 +1386,23 @@ Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
}
MapSpaceFreeList::MapSpaceFreeList() {
#ifdef DEBUG
bool OldSpaceFreeList::Contains(FreeListNode* node) {
for (int i = 0; i < kFreeListsLength; i++) {
Address cur_addr = free_[i].head_node_;
while (cur_addr != NULL) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
if (cur_node == node) return true;
cur_addr = cur_node->next();
}
}
return false;
}
#endif
MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
owner_ = owner;
Reset();
}
@ -1448,7 +1429,7 @@ void MapSpaceFreeList::Free(Address start) {
Object* MapSpaceFreeList::Allocate() {
if (head_ == NULL) {
return Failure::RetryAfterGC(Map::kSize, MAP_SPACE);
return Failure::RetryAfterGC(Map::kSize, owner_);
}
FreeListNode* node = FreeListNode::FromAddress(head_);
@ -1478,9 +1459,8 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) {
accounting_stats_.FillWastedBytes(Waste());
}
// Clear the free list and switch to linear allocation if we are in FREE_LIST
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
if (allocation_mode_ == FREE_LIST) allocation_mode_ = LINEAR;
}
@ -1506,7 +1486,7 @@ void OldSpace::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
ASSERT(allocation_info_.VerifyPagedAllocation());
// The space is compacted and we haven't yet built free lists or
// wasted any space.
@ -1540,112 +1520,54 @@ void OldSpace::MCCommitRelocationInfo() {
}
Object* OldSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
ASSERT(HasBeenSetup());
if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
// Try linear allocation in the current page.
Address cur_top = alloc_info->top;
Address new_top = cur_top + size_in_bytes;
if (new_top <= alloc_info->limit) {
Object* obj = HeapObject::FromAddress(cur_top);
alloc_info->top = new_top;
ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
accounting_stats_.AllocateBytes(size_in_bytes);
ASSERT(Size() <= Capacity());
return obj;
}
} else {
// For now we should not try free list allocation during m-c relocation.
ASSERT(alloc_info == &allocation_info_);
int wasted_bytes;
Object* object = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
if (!object->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return object;
}
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
// Linear allocation in this space has failed. If there is another page
// in the space, move to that page and allocate there. This allocation
// should succeed (size_in_bytes should not be greater than a page's
// object area size).
Page* current_page = TopPageOf(allocation_info_);
if (current_page->next_page()->is_valid()) {
return AllocateInNextPage(current_page, size_in_bytes);
}
// Fast allocation failed.
return SlowAllocateRaw(size_in_bytes, alloc_info);
// There is no next page in this space. Try free list allocation.
int wasted_bytes;
Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result);
}
// Free list allocation failed and there is no next page. Try to expand
// the space and allocate in the new next page.
ASSERT(!current_page->next_page()->is_valid());
if (Expand(current_page)) {
return AllocateInNextPage(current_page, size_in_bytes);
}
// Finally, fail.
return NULL;
}
// Slow cases for AllocateRawInternal. In linear allocation mode, try
// to allocate in the next page in the space. If there are no more
// pages, switch to free-list allocation if permitted, otherwise try
// to grow the space. In free-list allocation mode, try to grow the
// space and switch to linear allocation.
Object* OldSpace::SlowAllocateRaw(int size_in_bytes,
AllocationInfo* alloc_info) {
if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
Page* top_page = TopPageOf(*alloc_info);
// Until we implement free-list allocation during global gc, we have two
// cases: one for normal allocation and one for m-c relocation allocation.
if (alloc_info == &allocation_info_) { // Normal allocation.
int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
// Add the extra space at the top of this page to the free list.
if (free_size > 0) {
int wasted_bytes = free_list_.Free(alloc_info->top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
alloc_info->top += free_size;
ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
}
// Move to the next page in this space if there is one; switch
// to free-list allocation, if we can; try to expand the space otherwise
if (top_page->next_page()->is_valid()) {
SetAllocationInfo(alloc_info, top_page->next_page());
} else if (allocation_mode_ == LINEAR) {
allocation_mode_ = FREE_LIST;
} else if (Expand(top_page)) {
ASSERT(top_page->next_page()->is_valid());
SetAllocationInfo(alloc_info, top_page->next_page());
} else {
return Failure::RetryAfterGC(size_in_bytes, identity());
}
} else { // Allocation during m-c relocation.
// During m-c 'allocation' while computing forwarding addresses, we do
// not yet add blocks to the free list because they still contain live
// objects. We also cache the m-c forwarding allocation pointer in the
// current page.
// If there are no more pages try to expand the space. This can only
// happen when promoting objects from the new space.
if (!top_page->next_page()->is_valid()) {
if (!Expand(top_page)) {
return Failure::RetryAfterGC(size_in_bytes, identity());
}
}
// Move to the next page.
ASSERT(top_page->next_page()->is_valid());
top_page->mc_relocation_top = alloc_info->top;
SetAllocationInfo(alloc_info, top_page->next_page());
}
} else { // Free-list allocation.
// We failed to allocate from the free list; try to expand the space and
// switch back to linear allocation.
ASSERT(alloc_info == &allocation_info_);
Page* top_page = TopPageOf(*alloc_info);
if (!top_page->next_page()->is_valid()) {
if (!Expand(top_page)) {
return Failure::RetryAfterGC(size_in_bytes, identity());
}
}
// We surely have more pages, move to the next page and switch to linear
// allocation.
ASSERT(top_page->next_page()->is_valid());
SetAllocationInfo(alloc_info, top_page->next_page());
ASSERT(allocation_mode_ == FREE_LIST);
allocation_mode_ = LINEAR;
// Add the block at the top of the page to the space's free list, set the
// allocation info to the next page (assumed to be one), and allocate
// linearly there.
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
// Add the block at the top of this page to the free list.
int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
if (free_size > 0) {
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
}
// Perform the allocation.
return AllocateRawInternal(size_in_bytes, alloc_info);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@ -1655,7 +1577,7 @@ Object* OldSpace::SlowAllocateRaw(int size_in_bytes,
void OldSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
@ -2045,10 +1967,8 @@ void MapSpace::PrepareForMarkCompact(bool will_compact) {
accounting_stats_.AllocateBytes(free_list_.available());
}
// Clear the free list and switch to linear allocation if not already
// required.
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
if (allocation_mode_ != LINEAR_ONLY) allocation_mode_ = LINEAR;
}
@ -2056,7 +1976,7 @@ void MapSpace::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
ASSERT(allocation_info_.VerifyPagedAllocation());
// The space is compacted and we haven't yet wasted any space.
ASSERT(Waste() == 0);
@ -2079,95 +1999,51 @@ void MapSpace::MCCommitRelocationInfo() {
}
Object* MapSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
ASSERT(HasBeenSetup());
// When doing free-list allocation, we implicitly assume that we always
// allocate a map-sized block.
ASSERT(size_in_bytes == Map::kSize);
if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
// Try linear allocation in the current page.
Address cur_top = alloc_info->top;
Address new_top = cur_top + size_in_bytes;
if (new_top <= alloc_info->limit) {
Object* obj = HeapObject::FromAddress(cur_top);
alloc_info->top = new_top;
ASSERT_PAGED_ALLOCATION_INFO(*alloc_info);
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
// Linear allocation in this space has failed. If there is another page
// in the space, move to that page and allocate there. This allocation
// should succeed.
Page* current_page = TopPageOf(allocation_info_);
if (current_page->next_page()->is_valid()) {
return AllocateInNextPage(current_page, size_in_bytes);
}
// There is no next page in this space. Try free list allocation. The
// map space free list implicitly assumes that all free blocks are map
// sized.
if (size_in_bytes == Map::kSize) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return obj;
}
} else {
// We should not do free list allocation during m-c compaction.
ASSERT(alloc_info == &allocation_info_);
Object* object = free_list_.Allocate();
if (!object->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return object;
return HeapObject::cast(result);
}
}
// Fast allocation failed.
return SlowAllocateRaw(size_in_bytes, alloc_info);
// Free list allocation failed and there is no next page. Try to expand
// the space and allocate in the new next page.
ASSERT(!current_page->next_page()->is_valid());
if (Expand(current_page)) {
return AllocateInNextPage(current_page, size_in_bytes);
}
// Finally, fail.
return NULL;
}
// Slow case for AllocateRawInternal. In linear allocation mode, try to
// allocate in the next page in the space. If there are no more pages, switch
// to free-list allocation. In free-list allocation mode, try to grow the
// space and switch to linear allocation.
Object* MapSpace::SlowAllocateRaw(int size_in_bytes,
AllocationInfo* alloc_info) {
if (allocation_mode_ == LINEAR_ONLY || allocation_mode_ == LINEAR) {
Page* top_page = TopPageOf(*alloc_info);
// We do not do free-list allocation during compacting GCs.
if (alloc_info == &mc_forwarding_info_) {
// We expect to always have more pages, because the map space cannot
// grow during GC. Move to the next page.
CHECK(top_page->next_page()->is_valid());
top_page->mc_relocation_top = alloc_info->top;
SetAllocationInfo(alloc_info, top_page->next_page());
} else { // Normal allocation.
// Move to the next page in this space (counting the top-of-page block
// as waste) if there is one, otherwise switch to free-list allocation if
// permitted, otherwise try to expand the heap
if (top_page->next_page()->is_valid() ||
(allocation_mode_ == LINEAR_ONLY && Expand(top_page))) {
int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
ASSERT(free_size == kPageExtra);
accounting_stats_.WasteBytes(free_size);
SetAllocationInfo(alloc_info, top_page->next_page());
} else if (allocation_mode_ == LINEAR) {
allocation_mode_ = FREE_LIST;
} else {
return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
}
}
} else { // Free-list allocation.
ASSERT(alloc_info == &allocation_info_);
// We failed to allocate from the free list (ie, it must be empty) so try
// to expand the space and switch back to linear allocation.
Page* top_page = TopPageOf(*alloc_info);
if (!top_page->next_page()->is_valid()) {
if (!Expand(top_page)) {
return Failure::RetryAfterGC(size_in_bytes, MAP_SPACE);
}
}
// We have more pages now so we can move to the next and switch to linear
// allocation.
ASSERT(top_page->next_page()->is_valid());
int free_size = top_page->ObjectAreaEnd() - alloc_info->top;
ASSERT(free_size == kPageExtra);
accounting_stats_.WasteBytes(free_size);
SetAllocationInfo(alloc_info, top_page->next_page());
ASSERT(allocation_mode_ == FREE_LIST);
allocation_mode_ = LINEAR;
}
// Perform the allocation.
return AllocateRawInternal(size_in_bytes, alloc_info);
// Move to the next page (there is assumed to be one) and allocate there.
// The top of page block is always wasted, because it is too small to hold a
// map.
HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
accounting_stats_.WasteBytes(kPageExtra);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@ -2177,7 +2053,7 @@ Object* MapSpace::SlowAllocateRaw(int size_in_bytes,
void MapSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT_PAGED_ALLOCATION_INFO(allocation_info_);
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
@ -2315,9 +2191,12 @@ HeapObject* LargeObjectIterator::next() {
// LargeObjectChunk
LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
size_t* chunk_size) {
size_t* chunk_size,
bool executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
void* mem = MemoryAllocator::AllocateRawMemory(requested, chunk_size);
void* mem = MemoryAllocator::AllocateRawMemory(requested,
chunk_size,
executable);
if (mem == NULL) return NULL;
LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
if (*chunk_size < requested) {
@ -2339,8 +2218,9 @@ int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
// -----------------------------------------------------------------------------
// LargeObjectSpace
LargeObjectSpace::LargeObjectSpace()
: first_chunk_(NULL),
LargeObjectSpace::LargeObjectSpace(AllocationSpace id, bool executable)
: Space(id, executable),
first_chunk_(NULL),
size_(0),
page_count_(0) {}
@ -2371,9 +2251,9 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
ASSERT(0 < object_size && object_size <= requested_size);
size_t chunk_size;
LargeObjectChunk* chunk =
LargeObjectChunk::New(requested_size, &chunk_size);
LargeObjectChunk::New(requested_size, &chunk_size, executable());
if (chunk == NULL) {
return Failure::RetryAfterGC(requested_size, LO_SPACE);
return Failure::RetryAfterGC(requested_size, identity());
}
size_ += chunk_size;
@ -2483,8 +2363,9 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargeObjectChunk* current = first_chunk_;
while (current != NULL) {
HeapObject* object = current->GetObject();
if (is_marked(object)) {
clear_mark(object);
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
previous = current;
current = current->next();
} else {

View File

@ -83,7 +83,7 @@ namespace v8 { namespace internal {
class PagedSpace;
class MemoryAllocator;
struct AllocationInfo;
class AllocationInfo;
// -----------------------------------------------------------------------------
// A page normally has 8K bytes. Large object pages may be larger. A page
@ -141,7 +141,7 @@ class Page {
// Returns the next page of this page.
inline Page* next_page();
// Return the end of allocation in this page.
// Return the end of allocation in this page. Undefined for unused pages.
inline Address AllocationTop();
// Returns the start address of the object area in this page.
@ -271,6 +271,22 @@ class Page {
};
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
Space(AllocationSpace id, bool executable)
: id_(id), executable_(executable) {}
// Does the space need executable memory?
bool executable() { return executable_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
private:
AllocationSpace id_;
bool executable_;
};
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator manages chunks for the paged heap spaces (old space and map
@ -322,7 +338,7 @@ class MemoryAllocator : public AllStatic {
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
static bool CommitBlock(Address start, size_t size);
static bool CommitBlock(Address start, size_t size, bool executable);
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
@ -345,7 +361,9 @@ class MemoryAllocator : public AllStatic {
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
static void* AllocateRawMemory(const size_t requested, size_t* allocated);
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
bool executable);
static void FreeRawMemory(void* buf, size_t length);
// Returns the maximum available bytes of heaps.
@ -457,28 +475,6 @@ class ObjectIterator : public Malloced {
};
// -----------------------------------------------------------------------------
// Space iterator for iterating over all spaces.
//
// For each space an object iterator is provided. The deallocation of the
// returned object iterators is handled by the space iterator.
class SpaceIterator : public Malloced {
public:
SpaceIterator();
virtual ~SpaceIterator();
bool has_next();
ObjectIterator* next();
private:
ObjectIterator* CreateIterator();
int current_space_; // from enum AllocationSpace.
ObjectIterator* iterator_; // object iterator for the current space.
};
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
@ -562,9 +558,17 @@ class PageIterator BASE_EMBEDDED {
// An abstraction of allocation and relocation pointers in a page-structured
// space.
struct AllocationInfo {
class AllocationInfo {
public:
Address top; // current allocation top
Address limit; // current allocation limit
#ifdef DEBUG
bool VerifyPagedAllocation() {
return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
&& (top <= limit);
}
#endif
};
@ -653,11 +657,13 @@ class AllocationStats BASE_EMBEDDED {
};
class PagedSpace : public Malloced {
class PagedSpace : public Space {
friend class PageIterator;
public:
// Creates a space with a maximum capacity, and an id.
PagedSpace(int max_capacity, AllocationSpace id);
PagedSpace(int max_capacity, AllocationSpace id, bool executable);
virtual ~PagedSpace() {}
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
@ -677,12 +683,15 @@ class PagedSpace : public Malloced {
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
// Finds an object that the given address falls in its body. Returns
// Failure::Exception() if the operation failed. The implementation
// iterates objects in the page containing the address, the cost is
// linear to the number of objects in the page. It may be slow.
// Given an address occupied by a live object, return that object if it is
// in this space, or Failure::Exception() if it is not. The implementation
// iterates over objects in the page containing the address, the cost is
// linear in the number of objects in the page. It may be slow.
Object* FindObject(Address addr);
// Checks whether page is currently in use by this space.
bool IsUsed(Page* page);
// Clears remembered sets of pages in this space.
void ClearRSet();
@ -705,13 +714,17 @@ class PagedSpace : public Malloced {
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top; }
AllocationSpace identity() { return identity_; }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
inline Object* AllocateRaw(int size_in_bytes);
// If 'linear_only' is true, force allocation_mode_ to
// LINEAR_ONLY. If 'linear_only' is false, allocation_mode_ is
// checked to be LINEAR_ONLY and changed to LINEAR, allowing it to
// alternate between LINEAR and FREE_LIST automatically.
void SetLinearAllocationOnly(bool linear_only);
// Allocate the requested number of bytes for relocation during mark-compact
// collection.
inline Object* MCAllocateRaw(int size_in_bytes);
// Allocate the requested number of bytes during deserialization.
inline Object* AllocateForDeserialization(int size_in_bytes);
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
@ -735,8 +748,6 @@ class PagedSpace : public Malloced {
bool EnsureCapacity(int capacity);
#ifdef DEBUG
void CheckLinearAllocationOnly() { CHECK(allocation_mode_ == LINEAR_ONLY); }
// Print meta info and objects in this space.
void Print();
@ -747,13 +758,6 @@ class PagedSpace : public Malloced {
#endif
protected:
// In LINEAR and LINEAR_ONLY mode, allocation is from the end of the last
// page. In FREE_LIST mode, allocation is from a fragment list of free
// space at the end of recent pages. LINEAR and FREE_LIST mode alternate
// automatically. LINEAR_ONLY mode is sticky until converted to LINEAR by
// an API call.
enum AllocationMode { LINEAR_ONLY, LINEAR, FREE_LIST };
// Maximum capacity of this space.
int max_capacity_;
@ -763,9 +767,6 @@ class PagedSpace : public Malloced {
// The first page in this space.
Page* first_page_;
// The allocation mode.
AllocationMode allocation_mode_;
// Normal allocation information.
AllocationInfo allocation_info_;
@ -785,13 +786,27 @@ class PagedSpace : public Malloced {
// pages are appened to the last_page;
bool Expand(Page* last_page);
// Generic fast case allocation function that tries linear allocation in
// the top page of 'alloc_info'. Returns NULL on failure.
inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
int size_in_bytes);
// During normal allocation or deserialization, roll to the next page in
// the space (there is assumed to be one) and allocate there. This
// function is space-dependent.
virtual HeapObject* AllocateInNextPage(Page* current_page,
int size_in_bytes) = 0;
// Slow path of AllocateRaw. This function is space-dependent.
virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
// Slow path of MCAllocateRaw.
HeapObject* SlowMCAllocateRaw(int size_in_bytes);
#ifdef DEBUG
void DoPrintRSet(const char* space_name);
#endif
private:
// Identity of this space.
AllocationSpace identity_;
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
@ -844,7 +859,7 @@ class HistogramInfo BASE_EMBEDDED {
// uses the memory in the from space as a marking stack when tracing live
// objects.
class SemiSpace BASE_EMBEDDED {
class SemiSpace : public Space {
public:
// Creates a space in the young generation. The constructor does not
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
@ -852,7 +867,10 @@ class SemiSpace BASE_EMBEDDED {
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
SemiSpace(int initial_capacity, int maximum_capacity);
SemiSpace(int initial_capacity,
int maximum_capacity,
AllocationSpace id,
bool executable);
// Sets up the semispace using the given chunk.
bool Setup(Address start, int size);
@ -971,7 +989,7 @@ class SemiSpaceIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
class NewSpace : public Malloced {
class NewSpace : public Space {
public:
// Create a new space with a given allocation capacity (ie, the capacity of
// *one* of the semispaces). The constructor does not allocate heap memory
@ -979,7 +997,10 @@ class NewSpace : public Malloced {
// memory of size 2 * semispace_capacity. To support fast containment
// testing in the new space, the size of this chunk must be a power of two
// and it must be aligned to its size.
NewSpace(int initial_semispace_capacity, int maximum_semispace_capacity);
NewSpace(int initial_semispace_capacity,
int maximum_semispace_capacity,
AllocationSpace id,
bool executable);
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
@ -1039,8 +1060,6 @@ class NewSpace : public Malloced {
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
Object* AllocateRaw(int size_in_bytes) {
return AllocateRawInternal(size_in_bytes, &allocation_info_);
}
@ -1270,6 +1289,11 @@ class OldSpaceFreeList BASE_EMBEDDED {
void RebuildSizeList();
bool needs_rebuild_;
#ifdef DEBUG
// Does this free list contain a free block located at the address of 'node'?
bool Contains(FreeListNode* node);
#endif
DISALLOW_EVIL_CONSTRUCTORS(OldSpaceFreeList);
};
@ -1277,7 +1301,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
// The free list for the map space.
class MapSpaceFreeList BASE_EMBEDDED {
public:
MapSpaceFreeList();
explicit MapSpaceFreeList(AllocationSpace owner);
// Clear the free list.
void Reset();
@ -1302,6 +1326,10 @@ class MapSpaceFreeList BASE_EMBEDDED {
// The head of the free list.
Address head_;
// The identity of the owning space, for building allocation Failure
// objects.
AllocationSpace owner_;
DISALLOW_EVIL_CONSTRUCTORS(MapSpaceFreeList);
};
@ -1313,8 +1341,8 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
explicit OldSpace(int max_capacity, AllocationSpace id)
: PagedSpace(max_capacity, id), free_list_(id) {
explicit OldSpace(int max_capacity, AllocationSpace id, bool executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) {
}
// Returns maximum available bytes that the old space can have.
@ -1327,23 +1355,11 @@ class OldSpace : public PagedSpace {
// pointer).
int AvailableFree() { return free_list_.available(); }
// The top of allocation in a page in this space.
// The top of allocation in a page in this space. Undefined if page is unused.
Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
}
// Allocates requested bytes. May return Failure if the space is full.
Object* AllocateRaw(int size_in_bytes) {
ASSERT_OBJECT_SIZE(size_in_bytes);
return AllocateRawInternal(size_in_bytes, &allocation_info_);
}
// Allocates requested bytes for object relocation.
Object* MCAllocateRaw(int size_in_bytes) {
ASSERT_OBJECT_SIZE(size_in_bytes);
return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
}
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
void Free(Address start, int size_in_bytes) {
@ -1376,6 +1392,14 @@ class OldSpace : public PagedSpace {
void PrintRSet();
#endif
protected:
// Virtual function in the superclass. Slow path of AllocateRaw.
HeapObject* SlowAllocateRaw(int size_in_bytes);
// Virtual function in the superclass. Allocate linearly at the start of
// the page after current_page (there is assumed to be one).
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
private:
// The space's free list.
OldSpaceFreeList free_list_;
@ -1384,14 +1408,6 @@ class OldSpace : public PagedSpace {
// object in order to know when to move to the next page.
Address mc_end_of_relocation_;
// Implementation of AllocateRaw. Allocates requested number of bytes using
// the given allocation information according to the space's current
// allocation mode.
Object* AllocateRawInternal(int size_in_bytes, AllocationInfo* alloc_info);
// Slow path of AllocateRaw functions.
Object* SlowAllocateRaw(int size_in_bytes, AllocationInfo* alloc_info);
public:
TRACK_MEMORY("OldSpace")
};
@ -1403,30 +1419,19 @@ class OldSpace : public PagedSpace {
class MapSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
explicit MapSpace(int max_capacity) : PagedSpace(max_capacity, MAP_SPACE) { }
explicit MapSpace(int max_capacity, AllocationSpace id)
: PagedSpace(max_capacity, id, false), free_list_(id) { }
// The bytes available on the free list (ie, not above the linear allocation
// pointer).
int AvailableFree() { return free_list_.available(); }
// The top of allocation in a page in this space.
// The top of allocation in a page in this space. Undefined if page is unused.
Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - kPageExtra;
}
// Allocates requested bytes. May return Failure if the space is full.
Object* AllocateRaw(int size_in_bytes) {
ASSERT_OBJECT_SIZE(size_in_bytes);
return AllocateRawInternal(size_in_bytes, &allocation_info_);
}
// Allocates requested bytes for object relocation.
Object* MCAllocateRaw(int size_in_bytes) {
ASSERT_OBJECT_SIZE(size_in_bytes);
return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
}
// Give a map-sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
@ -1459,6 +1464,14 @@ class MapSpace : public PagedSpace {
static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
protected:
// Virtual function in the superclass. Slow path of AllocateRaw.
HeapObject* SlowAllocateRaw(int size_in_bytes);
// Virtual function in the superclass. Allocate linearly at the start of
// the page after current_page (there is assumed to be one).
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
private:
// The space's free list.
MapSpaceFreeList free_list_;
@ -1466,13 +1479,6 @@ class MapSpace : public PagedSpace {
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex];
// Implementation of AllocateRaw. Allocates requested bytes using
// the given allocation information.
Object* AllocateRawInternal(int size_in_bytes, AllocationInfo* alloc_info);
// Slow path of AllocateRaw functions.
Object* SlowAllocateRaw(int size_int_bytes, AllocationInfo* alloc_info);
public:
TRACK_MEMORY("MapSpace")
};
@ -1495,7 +1501,9 @@ class LargeObjectChunk {
// object and possibly extra remembered set words) bytes after the object
// area start of that page. The allocated chunk size is set in the output
// parameter chunk_size.
static LargeObjectChunk* New(int size_in_bytes, size_t* chunk_size);
static LargeObjectChunk* New(int size_in_bytes,
size_t* chunk_size,
bool executable);
// Interpret a raw address as a large object chunk.
static LargeObjectChunk* FromAddress(Address address) {
@ -1542,10 +1550,10 @@ class LargeObjectChunk {
};
class LargeObjectSpace {
class LargeObjectSpace : public Space {
friend class LargeObjectIterator;
public:
LargeObjectSpace();
explicit LargeObjectSpace(AllocationSpace id, bool executable);
// Initializes internal data structures.
bool Setup();

View File

@ -56,6 +56,10 @@
// ECMA-262 section 15.5.4.5
%AddProperty($String.prototype, "charCodeAt", function(pos) {
var fast_answer = %_FastCharCodeAt(this, pos);
if (%_IsSmi(fast_answer)) {
return fast_answer;
}
var subject = ToString(this);
var index = TO_INTEGER(pos);
return %StringCharCodeAt(subject, index);

View File

@ -157,7 +157,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
HandleScope scope;
// Enter the JS frame but don't add additional arguments.
__ EnterJSFrame(0, 0);
__ EnterJSFrame(0);
// Push the function on the stack and call the runtime function.
__ Push(MemOperand(pp, 0));
@ -167,7 +167,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
__ mov(r1, Operand(r0));
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kArgsLengthOffset));
__ ExitJSFrame(DO_NOT_RETURN, 0);
__ ExitJSFrame(DO_NOT_RETURN);
// Do a tail-call of the compiled function.
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));

View File

@ -64,8 +64,8 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
// the stub cache only contains monomorphic stubs. Make sure that
// the bits are the least significant so they will be the ones
// masked out.
ASSERT(Code::ExtractStateFromFlags(flags) == MONOMORPHIC);
ASSERT(Code::kFlagsStateShift == 0);
ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
ASSERT(Code::kFlagsICStateShift == 0);
// Make sure that the code type is not included in the hash.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);

View File

@ -290,14 +290,10 @@ bool Top::check_break(int break_id) {
bool Top::is_break() {
ExecutionAccess access;
return is_break_no_lock();
}
bool Top::is_break_no_lock() {
return break_id_ != 0;
}
StackFrame::Id Top::break_frame_id() {
ExecutionAccess access;
return break_frame_id_;

View File

@ -179,7 +179,6 @@ class Top {
static void set_break(StackFrame::Id break_frame_id, int break_id);
static bool check_break(int break_id);
static bool is_break();
static bool is_break_no_lock();
static StackFrame::Id break_frame_id();
static int break_id();

View File

@ -231,4 +231,57 @@ int WriteChars(const char* filename,
}
StringBuilder::StringBuilder(int size) {
buffer_ = NewArray<char>(size);
size_ = size;
position_ = 0;
}
void StringBuilder::AddString(const char* s) {
AddSubstring(s, strlen(s));
}
void StringBuilder::AddSubstring(const char* s, int n) {
ASSERT(!is_finalized() && position_ + n < size_);
ASSERT(static_cast<size_t>(n) <= strlen(s));
memcpy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
void StringBuilder::AddFormatted(const char* format, ...) {
ASSERT(!is_finalized() && position_ < size_);
va_list args;
va_start(args, format);
int remaining = size_ - position_;
int n = OS::VSNPrintF(&buffer_[position_], remaining, format, args);
va_end(args);
if (n < 0 || n >= remaining) {
position_ = size_;
} else {
position_ += n;
}
}
void StringBuilder::AddPadding(char c, int count) {
for (int i = 0; i < count; i++) {
AddCharacter(c);
}
}
char* StringBuilder::Finalize() {
ASSERT(!is_finalized() && position_ < size_);
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
// buffer while building the string.
ASSERT(strlen(buffer_) == static_cast<size_t>(position_));
position_ = -1;
ASSERT(is_finalized());
return buffer_;
}
} } // namespace v8::internal

View File

@ -271,6 +271,7 @@ class Access {
template <typename T>
class Vector {
public:
Vector() : start_(NULL), length_(0) {}
Vector(T* data, int length) : start_(data), length_(length) {
ASSERT(length == 0 || (length > 0 && data != NULL));
}
@ -300,6 +301,7 @@ class Vector {
// Releases the array underlying this vector. Once disposed the
// vector is empty.
void Dispose() {
if (is_empty()) return;
DeleteArray(start_);
start_ = NULL;
length_ = 0;
@ -353,6 +355,69 @@ class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
};
// Helper class for building result strings in a character buffer. The
// purpose of the class is to use safe operations that checks the
// buffer bounds on all operations in debug mode.
class StringBuilder {
public:
// Create a string builder with a buffer of the given size. The
// buffer is allocated through NewArray<char> and must be
// deallocated by the caller of Finalize().
explicit StringBuilder(int size);
StringBuilder(char* buffer, int size)
: buffer_(buffer), size_(size), position_(0) { }
~StringBuilder() { if (!is_finalized()) Finalize(); }
int size() const { return size_; }
// Get the current position in the builder.
int position() const {
ASSERT(!is_finalized());
return position_;
}
// Reset the position.
void Reset() { position_ = 0; }
// Add a single character to the builder. It is not allowed to add
// 0-characters; use the Finalize() method to terminate the string
// instead.
void AddCharacter(char c) {
ASSERT(c != '\0');
ASSERT(!is_finalized() && position_ < size_);
buffer_[position_++] = c;
}
// Add an entire string to the builder. Uses strlen() internally to
// compute the length of the input string.
void AddString(const char* s);
// Add the first 'n' characters of the given string 's' to the
// builder. The input string must have enough characters.
void AddSubstring(const char* s, int n);
// Add formatted contents to the builder just like printf().
void AddFormatted(const char* format, ...);
// Add character padding to the builder. If count is non-positive,
// nothing is added to the builder.
void AddPadding(char c, int count);
// Finalize the string by 0-terminating it and returning the buffer.
char* Finalize();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
char* buffer_;
int size_;
int position_;
bool is_finalized() const { return position_ < 0; }
};
} } // namespace v8::internal
#endif // V8_UTILS_H_

View File

@ -1,4 +1,4 @@
# Copyright 2006-2008 Google Inc. All Rights Reserved.
# Copyright 2006 Google Inc. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -48,7 +48,7 @@ def CompressScript(lines):
# Note that we could easily compress the scripts mode but don't
# since we want it to remain readable.
lines = re.sub('//.*\n', '\n', lines) # end-of-line comments
lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
return lines
@ -87,38 +87,12 @@ def ParseValue(string):
return string
def MakeVersion(source, target):
TEMPLATE = """
#include "v8.h"
void v8::V8::GetVersion(v8::VersionInfo *info) {
info->major = %(major)s;
info->minor = %(minor)s;
info->build_major = %(build_major)s;
info->build_minor = %(build_minor)s;
info->revision = %(revision)s;
}
"""
PATTERN = re.compile('\$[a-zA-Z]+:\s*([0-9]+)\s*\$')
def VersionToInt(str):
match = PATTERN.match(str)
if match: return match.group(1)
else: return str
config = LoadConfigFrom(source)
map = { }
for key, value in config.items('VERSION'):
map[key] = VersionToInt(value)
output = TEMPLATE % map
file = open(target, "w")
file.write(output)
file.close()
def ExpandConstants(lines, constants):
for key, value in constants.items():
lines = lines.replace(key, str(value))
return lines
def ExpandMacros(lines, macros):
for name, macro in macros.items():
start = lines.find(name, 0)
@ -298,11 +272,10 @@ def JS2C(source, target, env):
if delay: id = id[:-6]
if delay:
delay_ids.append((id, len(lines)))
source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
else:
ids.append((id, len(lines)))
source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
# Build delay support functions
get_index_cases = [ ]