v8/src/platform-linux.cc

718 lines
19 KiB
C++
Raw Normal View History

// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Platform specific code for Linux goes here. For the POSIX comaptible parts
// the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <stdlib.h>
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
#include <sys/types.h> // mmap & munmap
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
#ifdef __GLIBC__
#include <execinfo.h> // backtrace, backtrace_symbols
#endif // def __GLIBC__
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
#undef MAP_TYPE
#include "v8.h"
#include "platform.h"
namespace v8 {
namespace internal {
// 0 is never a valid thread id on Linux since tids and pids share a
// name space and pid 0 is reserved (see man 2 kill).
static const pthread_t kNoThread = (pthread_t) 0;
double ceiling(double x) {
return ceil(x);
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly can cause an overflow and the seed to be
// set to all ones. The seed will be identical for different instances that
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
}
double OS::nan_value() {
return NAN;
}
int OS::ActivationFrameAlignment() {
#ifdef V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
// runtime system.
return 8;
#else
// With gcc 4.4 the tree vectorization optimiser can generate code
// that requires 16 byte alignment such as movdqa on x86.
return 16;
#endif
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
size_t OS::AllocateAlignment() {
return sysconf(_SC_PAGESIZE);
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
munmap(address, size);
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
}
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
}
void OS::DebugBreak() {
// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
// which is the architecture of generated code).
#if defined(__arm__) || defined(__thumb__)
asm("bkpt 0");
#else
asm("int $3");
#endif
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static uintptr_t StringToULong(char* buffer) {
return strtoul(buffer, NULL, 16); // NOLINT
}
#endif
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
while (true) {
char addr_buffer[11];
addr_buffer[0] = '0';
addr_buffer[1] = 'x';
addr_buffer[10] = 0;
int result = read(fd, addr_buffer + 2, 8);
if (result < 8) break;
uintptr_t start = StringToULong(addr_buffer);
result = read(fd, addr_buffer + 2, 1);
if (result < 1) break;
if (addr_buffer[2] != '-') break;
result = read(fd, addr_buffer + 2, 8);
if (result < 8) break;
uintptr_t end = StringToULong(addr_buffer);
char buffer[MAP_LENGTH];
int bytes_read = -1;
do {
bytes_read++;
if (bytes_read >= MAP_LENGTH - 1)
break;
result = read(fd, buffer + bytes_read, 1);
if (result < 1) break;
} while (buffer[bytes_read] != '\n');
buffer[bytes_read] = 0;
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
// If there is no filename for this line then log it as an anonymous
// mapping and use the address as its name.
if (start_of_path == NULL) {
// 40 is enough to print a 64 bit address range.
ASSERT(sizeof(buffer) > 40);
snprintf(buffer,
sizeof(buffer),
"%08" V8PRIxPTR "-%08" V8PRIxPTR,
start,
end);
LOG(SharedLibraryEvent(buffer, start, end));
} else {
buffer[bytes_read] = 0;
LOG(SharedLibraryEvent(start_of_path, start, end));
}
}
close(fd);
#endif
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension.
#ifdef __GLIBC__
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
int frames_count = backtrace(addresses, frames_size);
char** symbols;
symbols = backtrace_symbols(addresses, frames_count);
if (symbols == NULL) {
DeleteArray(addresses);
return kStackWalkError;
}
for (int i = 0; i < frames_count; i++) {
frames[i].address = addresses[i];
// Format a text representation of the frame based on the information
// available.
SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
"%s",
symbols[i]);
// Make sure line termination is in place.
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
DeleteArray(addresses);
free(symbols);
return frames_count;
#else // ndef __GLIBC__
return 0;
#endif // ndef __GLIBC__
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
address_ = mmap(NULL, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
bool VirtualMemory::IsReserved() {
return address_ != MAP_FAILED;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
}
UpdateAllocatedSpaceLimits(address, size);
return true;
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
class ThreadHandle::PlatformData : public Malloced {
public:
explicit PlatformData(ThreadHandle::Kind kind) {
Initialize(kind);
}
void Initialize(ThreadHandle::Kind kind) {
switch (kind) {
case ThreadHandle::SELF: thread_ = pthread_self(); break;
case ThreadHandle::INVALID: thread_ = kNoThread; break;
}
}
pthread_t thread_; // Thread handle for pthread.
};
ThreadHandle::ThreadHandle(Kind kind) {
data_ = new PlatformData(kind);
}
void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
data_->Initialize(kind);
}
ThreadHandle::~ThreadHandle() {
delete data_;
}
bool ThreadHandle::IsSelf() const {
return pthread_equal(data_->thread_, pthread_self());
}
bool ThreadHandle::IsValid() const {
return data_->thread_ != kNoThread;
}
Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
}
Thread::~Thread() {
}
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
thread->Run();
return NULL;
}
void Thread::Start() {
pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
ASSERT(IsValid());
}
void Thread::Join() {
pthread_join(thread_handle_data()->thread_, NULL);
}
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
return static_cast<LocalStorageKey>(key);
}
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
int result = pthread_key_delete(pthread_key);
USE(result);
ASSERT(result == 0);
}
void* Thread::GetThreadLocal(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
return pthread_getspecific(pthread_key);
}
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_setspecific(pthread_key, value);
}
void Thread::YieldCPU() {
sched_yield();
}
class LinuxMutex : public Mutex {
public:
LinuxMutex() {
pthread_mutexattr_t attrs;
int result = pthread_mutexattr_init(&attrs);
ASSERT(result == 0);
result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
ASSERT(result == 0);
result = pthread_mutex_init(&mutex_, &attrs);
ASSERT(result == 0);
}
virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
virtual int Lock() {
int result = pthread_mutex_lock(&mutex_);
return result;
}
virtual int Unlock() {
int result = pthread_mutex_unlock(&mutex_);
return result;
}
private:
pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
};
Mutex* OS::CreateMutex() {
return new LinuxMutex();
}
class LinuxSemaphore : public Semaphore {
public:
explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
virtual void Wait();
virtual bool Wait(int timeout);
virtual void Signal() { sem_post(&sem_); }
private:
sem_t sem_;
};
void LinuxSemaphore::Wait() {
while (true) {
int result = sem_wait(&sem_);
if (result == 0) return; // Successfully got semaphore.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
#ifndef TIMEVAL_TO_TIMESPEC
#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
(ts)->tv_sec = (tv)->tv_sec; \
(ts)->tv_nsec = (tv)->tv_usec * 1000; \
} while (false)
#endif
bool LinuxSemaphore::Wait(int timeout) {
const long kOneSecondMicros = 1000000; // NOLINT
// Split timeout into second and nanosecond parts.
struct timeval delta;
delta.tv_usec = timeout % kOneSecondMicros;
delta.tv_sec = timeout / kOneSecondMicros;
struct timeval current_time;
// Get the current time.
if (gettimeofday(&current_time, NULL) == -1) {
return false;
}
// Calculate time for end of timeout.
struct timeval end_time;
timeradd(&current_time, &delta, &end_time);
struct timespec ts;
TIMEVAL_TO_TIMESPEC(&end_time, &ts);
// Wait for semaphore signalled or timeout.
while (true) {
int result = sem_timedwait(&sem_, &ts);
if (result == 0) return true; // Successfully got semaphore.
if (result > 0) {
// For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
errno = result;
result = -1;
}
if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
Semaphore* OS::CreateSemaphore(int count) {
return new LinuxSemaphore(count);
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
// Android runs a fairly new Linux kernel, so signal info is there,
// but the C library doesn't have the structs defined.
struct sigcontext {
uint32_t trap_no;
uint32_t error_code;
uint32_t oldmask;
uint32_t gregs[16];
uint32_t arm_cpsr;
uint32_t fault_address;
};
typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
} ucontext_t;
enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
#endif
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
TickSample sample;
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
sample.pc = mcontext.gregs[REG_EIP];
sample.sp = mcontext.gregs[REG_ESP];
sample.fp = mcontext.gregs[REG_EBP];
#elif V8_HOST_ARCH_X64
sample.pc = mcontext.gregs[REG_RIP];
sample.sp = mcontext.gregs[REG_RSP];
sample.fp = mcontext.gregs[REG_RBP];
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
sample.pc = mcontext.gregs[R15];
sample.sp = mcontext.gregs[R13];
sample.fp = mcontext.gregs[R11];
#else
sample.pc = mcontext.arm_pc;
sample.sp = mcontext.arm_sp;
sample.fp = mcontext.arm_fp;
#endif
#endif
active_sampler_->SampleStack(&sample);
}
// We always sample the VM state.
sample.state = Logger::state();
active_sampler_->Tick(&sample);
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData() {
signal_handler_installed_ = false;
}
bool signal_handler_installed_;
struct sigaction old_signal_handler_;
struct itimerval old_timer_value_;
};
Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) {
data_ = new PlatformData();
}
Sampler::~Sampler() {
delete data_;
}
void Sampler::Start() {
// There can only be one active sampler at the time on POSIX
// platforms.
if (active_sampler_ != NULL) return;
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
data_->signal_handler_installed_ = true;
// Set the itimer to generate a tick for each interval.
itimerval itimer;
itimer.it_interval.tv_sec = interval_ / 1000;
itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
// Set this sampler as the active sampler.
active_sampler_ = this;
active_ = true;
}
void Sampler::Stop() {
// Restore old signal handler
if (data_->signal_handler_installed_) {
setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
sigaction(SIGPROF, &data_->old_signal_handler_, 0);
data_->signal_handler_installed_ = false;
}
// This sampler is no longer the active sampler.
active_sampler_ = NULL;
active_ = false;
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal