Add basic infrastructure for protecting V8's heap when leaving the VM

and unprotecting it when (re)entering.  The functionality is enabled
by the flag --protect-heap and requires V8 to be built with
ENABLE_HEAP_PROTECTION and ENABLE_LOGGING_AND_PROFILING defined.

Implemented on Linux and Windows but not yet for other platforms.

Review URL: http://codereview.chromium.org/53004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1595 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
kmillikin@chromium.org 2009-03-24 12:47:53 +00:00
parent 1ba34bf86b
commit bc3fb11881
15 changed files with 337 additions and 50 deletions

View File

@ -337,8 +337,20 @@ DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(oprofile, false,
"Enable JIT agent for OProfile.")
DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
//
// Heap protection flags
// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
//
#ifdef ENABLE_HEAP_PROTECTION
#undef FLAG
#define FLAG FLAG_FULL
DEFINE_bool(protect_heap, false,
"Protect/unprotect V8's heap when leaving/entring the VM.")
#endif
//
// Disassembler only flags

View File

@ -359,7 +359,8 @@ struct AccessorDescriptor {
V(JS) \
V(GC) \
V(COMPILER) \
V(OTHER)
V(OTHER) \
V(EXTERNAL)
enum StateTag {
#define DEF_STATE_TAG(name) name,

View File

@ -2861,6 +2861,30 @@ void Heap::Shrink() {
}
#ifdef ENABLE_HEAP_PROTECTION
void Heap::Protect() {
new_space_.Protect();
map_space_->Protect();
old_pointer_space_->Protect();
old_data_space_->Protect();
code_space_->Protect();
lo_space_->Protect();
}
void Heap::Unprotect() {
new_space_.Unprotect();
map_space_->Unprotect();
old_pointer_space_->Unprotect();
old_data_space_->Unprotect();
code_space_->Unprotect();
lo_space_->Unprotect();
}
#endif
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {

View File

@ -273,6 +273,12 @@ class Heap : public AllStatic {
return new_space_.allocation_limit_address();
}
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the heap by marking all spaces read-only/writable.
static void Protect();
static void Unprotect();
#endif
// Allocates and initializes a new JavaScript object based on a
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation

View File

@ -29,10 +29,12 @@
#include "v8.h"
#include "bootstrapper.h"
#include "log.h"
#include "platform.h"
#include "string-stream.h"
#include "macro-assembler.h"
#include "platform.h"
#include "serialize.h"
#include "string-stream.h"
namespace v8 { namespace internal {
@ -1115,10 +1117,23 @@ VMState::VMState(StateTag state) {
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Entering", StateToString(state_)));
if (previous_) {
if (previous_ != NULL) {
LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are leaving V8.
ASSERT(previous_ == NULL || previous_->state_ != EXTERNAL);
Heap::Protect();
} else {
// Are we entering V8?
if (previous_->state_ == EXTERNAL) Heap::Unprotect();
}
}
#endif
}
@ -1127,10 +1142,22 @@ VMState::~VMState() {
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
if (previous_) {
if (previous_ != NULL) {
LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// Are we (re)entering V8?
if (previous_->state_ != EXTERNAL) Heap::Unprotect();
} else {
// Are we leaving V8?
if (previous_->state_ == EXTERNAL) Heap::Protect();
}
}
#endif
}
#endif

View File

@ -257,6 +257,20 @@ void OS::Free(void* buf, const size_t length) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);

View File

@ -234,9 +234,9 @@ size_t OS::AllocateAlignment() {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
bool is_executable) {
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
@ -248,12 +248,29 @@ void* OS::Allocate(const size_t requested,
}
void OS::Free(void* buf, const size_t length) {
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
munmap(buf, length);
munmap(address, size);
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@ -267,7 +284,7 @@ void OS::Abort() {
void OS::DebugBreak() {
#if defined (__arm__) || defined(__thumb__)
#ifdef ARM
asm("bkpt 0");
#else
asm("int $3");
@ -418,8 +435,8 @@ bool VirtualMemory::IsReserved() {
}
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {

View File

@ -228,9 +228,9 @@ size_t OS::AllocateAlignment() {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
@ -242,12 +242,26 @@ void* OS::Allocate(const size_t requested,
}
void OS::Free(void* buf, const size_t length) {
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
munmap(buf, length);
munmap(address, size);
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
usleep(1000 * milliseconds);
}
@ -370,8 +384,8 @@ bool VirtualMemory::IsReserved() {
}
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
@ -389,6 +403,7 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
class ThreadHandle::PlatformData : public Malloced {
public:
explicit PlatformData(ThreadHandle::Kind kind) {

View File

@ -173,6 +173,20 @@ void OS::Free(void* buf, const size_t length) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
UNIMPLEMENTED();
}

View File

@ -801,12 +801,12 @@ size_t OS::AllocateAlignment() {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
bool is_executable) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, GetPageSize());
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
LPVOID mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
if (mbase == NULL) {
LOG(StringEvent("OS::Allocate", "VirtualAlloc failed"));
@ -821,13 +821,32 @@ void* OS::Allocate(const size_t requested,
}
void OS::Free(void* buf, const size_t length) {
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(buf, 0, MEM_RELEASE);
USE(length);
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): VirtualProtect has a return value which is ignored here.
DWORD old_protect;
VirtualProtect(address, size, PAGE_READONLY, &old_protect);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): VirtualProtect has a return value which is ignored here.
DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
DWORD old_protect;
VirtualProtect(address, size, new_protect, &old_protect);
}
#endif
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
}
@ -1299,8 +1318,8 @@ VirtualMemory::~VirtualMemory() {
}
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}

View File

@ -168,11 +168,17 @@ class OS {
// Returns the address of allocated memory, or NULL if failed.
static void* Allocate(const size_t requested,
size_t* allocated,
bool executable);
static void Free(void* buf, const size_t length);
bool is_executable);
static void Free(void* address, const size_t size);
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
static void Protect(void* address, size_t size);
static void Unprotect(void* address, size_t size, bool is_executable);
#endif
// Returns an indication of whether a pointer is in a space that
// has been allocated by Allocate(). This method may conservatively
// always return false, but giving more accurate information may
@ -267,7 +273,7 @@ class VirtualMemory {
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool executable);
bool Commit(void* address, size_t size, bool is_executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);

View File

@ -219,6 +219,43 @@ PagedSpace* MemoryAllocator::PageOwner(Page* page) {
}
bool MemoryAllocator::InInitialChunk(Address address) {
if (initial_chunk_ == NULL) return false;
Address start = static_cast<Address>(initial_chunk_->address());
return (start <= address) && (address < start + initial_chunk_->size());
}
#ifdef ENABLE_HEAP_PROTECTION
void MemoryAllocator::Protect(Address start, size_t size) {
OS::Protect(start, size);
}
void MemoryAllocator::Unprotect(Address start,
size_t size,
Executability executable) {
OS::Unprotect(start, size, executable);
}
void MemoryAllocator::ProtectChunkFromPage(Page* page) {
int id = GetChunkId(page);
OS::Protect(chunks_[id].address(), chunks_[id].size());
}
void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
int id = GetChunkId(page);
OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
chunks_[id].owner()->executable() == EXECUTABLE);
}
#endif
// --------------------------------------------------------------------------
// PagedSpace

View File

@ -302,9 +302,8 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
*num_pages = PagesInChunk(start, size);
ASSERT(*num_pages > 0);
ASSERT(initial_chunk_ != NULL);
ASSERT(initial_chunk_->address() <= start);
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
ASSERT(InInitialChunk(start));
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
@ -325,9 +324,8 @@ bool MemoryAllocator::CommitBlock(Address start,
ASSERT(start != NULL);
ASSERT(size > 0);
ASSERT(initial_chunk_ != NULL);
ASSERT(initial_chunk_->address() <= start);
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
ASSERT(InInitialChunk(start));
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, executable)) return false;
Counters::memory_allocated.Increment(size);
@ -407,14 +405,7 @@ void MemoryAllocator::DeleteChunk(int chunk_id) {
// We cannot free a chunk contained in the initial chunk because it was not
// allocated with AllocateRawMemory. Instead we uncommit the virtual
// memory.
bool in_initial_chunk = false;
if (initial_chunk_ != NULL) {
Address start = static_cast<Address>(initial_chunk_->address());
Address end = start + initial_chunk_->size();
in_initial_chunk = (start <= c.address()) && (c.address() < end);
}
if (in_initial_chunk) {
if (InInitialChunk(c.address())) {
// TODO(1240712): VirtualMemory::Uncommit has a return value which
// is ignored here.
initial_chunk_->Uncommit(c.address(), c.size());
@ -529,6 +520,28 @@ void PagedSpace::TearDown() {
}
#ifdef ENABLE_HEAP_PROTECTION
void PagedSpace::Protect() {
Page* page = first_page_;
while (page->is_valid()) {
MemoryAllocator::ProtectChunkFromPage(page);
page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
}
}
void PagedSpace::Unprotect() {
Page* page = first_page_;
while (page->is_valid()) {
MemoryAllocator::UnprotectChunkFromPage(page);
page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
}
}
#endif
void PagedSpace::ClearRSet() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
@ -834,6 +847,24 @@ void NewSpace::TearDown() {
}
#ifdef ENABLE_HEAP_PROTECTION
void NewSpace::Protect() {
MemoryAllocator::Protect(ToSpaceLow(), Capacity());
MemoryAllocator::Protect(FromSpaceLow(), Capacity());
}
void NewSpace::Unprotect() {
MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
to_space_.executable());
MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
from_space_.executable());
}
#endif
void NewSpace::Flip() {
SemiSpace tmp = from_space_;
from_space_ = to_space_;
@ -2242,6 +2273,30 @@ void LargeObjectSpace::TearDown() {
}
#ifdef ENABLE_HEAP_PROTECTION
void LargeObjectSpace::Protect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
MemoryAllocator::Protect(chunk->address(), chunk->size());
chunk = chunk->next();
}
}
void LargeObjectSpace::Unprotect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
bool is_code = chunk->GetObject()->IsCode();
MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
is_code ? EXECUTABLE : NOT_EXECUTABLE);
chunk = chunk->next();
}
}
#endif
Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
int object_size,
Executability executable) {

View File

@ -277,16 +277,22 @@ class Space : public Malloced {
public:
Space(AllocationSpace id, Executability executable)
: id_(id), executable_(executable) {}
virtual ~Space() {}
// Does the space need executable memory?
Executability executable() { return executable_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
virtual int Size() = 0;
#ifdef DEBUG
virtual void Verify() = 0;
virtual void Print() = 0;
#endif
private:
AllocationSpace id_;
Executability executable_;
@ -396,6 +402,17 @@ class MemoryAllocator : public AllStatic {
static Page* FindFirstPageInSameChunk(Page* p);
static Page* FindLastPageInSameChunk(Page* p);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
static inline void Protect(Address start, size_t size);
static inline void Unprotect(Address start, size_t size,
Executability executable);
// Protect/unprotect a chunk given a page in the chunk.
static inline void ProtectChunkFromPage(Page* page);
static inline void UnprotectChunkFromPage(Page* page);
#endif
#ifdef DEBUG
// Reports statistic info of the space.
static void ReportStatistics();
@ -460,6 +477,9 @@ class MemoryAllocator : public AllStatic {
// Returns the chunk id that a page belongs to.
static inline int GetChunkId(Page* p);
// True if the address lies in the initial chunk.
static inline bool InInitialChunk(Address address);
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
@ -669,7 +689,6 @@ class AllocationStats BASE_EMBEDDED {
class PagedSpace : public Space {
friend class PageIterator;
public:
// Creates a space with a maximum capacity, and an id.
PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
@ -764,6 +783,12 @@ class PagedSpace : public Space {
// Ensures that the capacity is at least 'capacity'. Returns false on failure.
bool EnsureCapacity(int capacity);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
void Protect();
void Unprotect();
#endif
#ifdef DEBUG
// Print meta info and objects in this space.
virtual void Print();
@ -834,6 +859,8 @@ class PagedSpace : public Space {
// Returns the number of total pages in this space.
int CountTotalPages();
#endif
friend class PageIterator;
};
@ -1117,6 +1144,12 @@ class NewSpace : public Space {
bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
virtual void Protect();
virtual void Unprotect();
#endif
#ifdef DEBUG
// Verify the active semispace.
virtual void Verify();
@ -1554,7 +1587,6 @@ class LargeObjectChunk {
class LargeObjectSpace : public Space {
friend class LargeObjectIterator;
public:
explicit LargeObjectSpace(AllocationSpace id);
virtual ~LargeObjectSpace() {}
@ -1606,6 +1638,12 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_chunk_ == NULL; }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
void Protect();
void Unprotect();
#endif
#ifdef DEBUG
virtual void Verify();
virtual void Print();
@ -1635,6 +1673,8 @@ class LargeObjectSpace : public Space {
// required for extra_object_bytes of extra pointers (in bytes).
static inline int ExtraRSetBytesFor(int extra_object_bytes);
friend class LargeObjectIterator;
public:
TRACK_MEMORY("LargeObjectSpace")
};

View File

@ -163,7 +163,7 @@ class SimpleProgressIndicator(ProgressIndicator):
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
print "--- CRASHED ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
@ -244,7 +244,7 @@ class CompactProgressIndicator(ProgressIndicator):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
print "--- CRASHED ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
@ -345,7 +345,7 @@ class TestCase(object):
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command, self.context, self.context.timeout)
@ -411,7 +411,7 @@ def Win32SetErrorMode(mode):
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args