2008-07-03 15:10:15 +00:00
|
|
|
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
|
|
|
// All Rights Reserved.
|
|
|
|
//
|
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// - Redistributions of source code must retain the above copyright notice,
|
|
|
|
// this list of conditions and the following disclaimer.
|
|
|
|
//
|
|
|
|
// - Redistribution in binary form must reproduce the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer in the
|
|
|
|
// documentation and/or other materials provided with the distribution.
|
|
|
|
//
|
|
|
|
// - Neither the name of Sun Microsystems or the names of contributors may
|
|
|
|
// be used to endorse or promote products derived from this software without
|
|
|
|
// specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
|
|
|
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
|
|
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
|
|
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
|
|
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
|
|
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
|
|
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
// The original source code covered by the above license above has been
|
|
|
|
// modified significantly by Google Inc.
|
2012-01-25 16:31:25 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
#ifndef V8_ASSEMBLER_H_
|
|
|
|
#define V8_ASSEMBLER_H_
|
|
|
|
|
2017-05-31 14:00:11 +00:00
|
|
|
#include <forward_list>
|
2018-01-08 12:42:55 +00:00
|
|
|
#include <iosfwd>
|
2018-03-21 11:12:49 +00:00
|
|
|
#include <map>
|
2017-05-31 14:00:11 +00:00
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/allocation.h"
|
2018-04-18 09:45:35 +00:00
|
|
|
#include "src/code-reference.h"
|
2018-03-21 11:12:49 +00:00
|
|
|
#include "src/contexts.h"
|
2016-07-18 09:23:28 +00:00
|
|
|
#include "src/deoptimize-reason.h"
|
2017-07-05 13:48:12 +00:00
|
|
|
#include "src/double.h"
|
2018-03-09 11:57:08 +00:00
|
|
|
#include "src/external-reference.h"
|
2018-03-21 11:12:49 +00:00
|
|
|
#include "src/flags.h"
|
2016-10-17 10:01:42 +00:00
|
|
|
#include "src/globals.h"
|
2017-01-20 14:21:34 +00:00
|
|
|
#include "src/label.h"
|
2018-03-21 11:12:49 +00:00
|
|
|
#include "src/objects.h"
|
2016-06-24 08:23:52 +00:00
|
|
|
#include "src/register-configuration.h"
|
2017-08-01 13:38:20 +00:00
|
|
|
#include "src/reglist.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
2012-01-25 16:31:25 +00:00
|
|
|
|
2015-08-12 07:32:36 +00:00
|
|
|
// Forward declarations.
|
2012-01-25 16:31:25 +00:00
|
|
|
class ApiFunction;
|
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2015-08-12 07:32:36 +00:00
|
|
|
// Forward declarations.
|
[builtins,x64] pc-relative builtin-to-builtin calls
This addresses one of the major remaining slowdowns with embedded
builtins on x64.
When generating code for a call to a builtin callee from a builtin
caller, we'd look up the Code target object from the builtins constant
list, calculate the location of the first instruction, and jump to it.
Note that for embedded builtin callees, the Code object is itself only
a trampoline to the off-heap code and thus an additional indirection.
An example of the call sequence in pseudo-asm:
// Load from the constants list.
mov reg, [kRootPointer, kBuiltinsConstantListOffset]
mov reg, [reg, offset_of_the_code_constant]
// Calculate first instruction and call it.
add reg, Code::kHeaderOffset
call reg
// The trampoline forwards to the off-heap area.
mov kOffHeapTrampolineRegister, <off-heap instruction_start>
jmp kOffHeapTrampolineRegister
This CL changes calls to embedded builtin targets to use pc-relative
addressing. This reduces the above instruction sequence to:
call <pc-relative offset to target instruction_start>
Embedded-to-embedded calls jump directly to the embedded instruction
stream, bypassing the trampoline. Heap-to-embedded calls (and all
calls to heap-builtins) use pc-relative addressing targeting the
on-heap Code object.
Other relevant platforms (arm,arm64,mips,mips64) do not use pc-relative
calls. For these, we'll need a different solution, e.g. a table of
embedded builtin addresses reachable from the root pointer, similar to
the external reference table.
Bug: v8:6666
Change-Id: Ic0317d454e2da37d74eaecebcdfcbc0d5f5041ad
Reviewed-on: https://chromium-review.googlesource.com/1068732
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53349}
2018-05-25 06:19:43 +00:00
|
|
|
class EmbeddedData;
|
2018-01-31 15:14:41 +00:00
|
|
|
class InstructionStream;
|
2017-06-23 12:21:39 +00:00
|
|
|
class Isolate;
|
2018-02-14 10:59:11 +00:00
|
|
|
class SCTableReference;
|
This CL enables precise source positions for all V8 compilers. It merges compiler::SourcePosition and internal::SourcePosition to a single class used throughout the codebase. The new internal::SourcePosition instances store an id identifying an inlined function in addition to a script offset.
SourcePosition::InliningId() refers to a the new table DeoptimizationInputData::InliningPositions(), which provides the following data for every inlining id:
- The inlined SharedFunctionInfo as an offset into DeoptimizationInfo::LiteralArray
- The SourcePosition of the inlining. Recursively, this yields the full inlining stack.
Before the Code object is created, the same information can be found in CompilationInfo::inlined_functions().
If SourcePosition::InliningId() is SourcePosition::kNotInlined, it refers to the outer (non-inlined) function.
So every SourcePosition has full information about its inlining stack, as long as the corresponding Code object is known. The internal represenation of a source position is a positive 64bit integer.
All compilers create now appropriate source positions for inlined functions. In the case of Turbofan, this required using AstGraphBuilderWithPositions for inlined functions too. So this class is now moved to a header file.
At the moment, the additional information in source positions is only used in --trace-deopt and --code-comments. The profiler needs to be updated, at the moment it gets the correct script offsets from the deopt info, but the wrong script id from the reconstructed deopt stack, which can lead to wrong outputs. This should be resolved by making the profiler use the new inlining information for deopts.
I activated the inlined deoptimization tests in test-cpu-profiler.cc for Turbofan, changing them to a case where the deopt stack and the inlining position agree. It is currently still broken for other cases.
The following additional changes were necessary:
- The source position table (internal::SourcePositionTableBuilder etc.) supports now 64bit source positions. Encoding source positions in a single 64bit int together with the difference encoding in the source position table results in very little overhead for the inlining id, since only 12% of the source positions in Octane have a changed inlining id.
- The class HPositionInfo was effectively dead code and is now removed.
- SourcePosition has new printing and information facilities, including computing a full inlining stack.
- I had to rename compiler/source-position.{h,cc} to compiler/compiler-source-position-table.{h,cc} to avoid clashes with the new src/source-position.cc file.
- I wrote the new wrapper PodArray for ByteArray. It is a template working with any POD-type. This is used in DeoptimizationInputData::InliningPositions().
- I removed HInlinedFunctionInfo and HGraph::inlined_function_infos, because they were only used for the now obsolete Crankshaft inlining ids.
- Crankshaft managed a list of inlined functions in Lithium: LChunk::inlined_functions. This is an analog structure to CompilationInfo::inlined_functions. So I removed LChunk::inlined_functions and made Crankshaft use CompilationInfo::inlined_functions instead, because this was necessary to register the offsets into the literal array in a uniform way. This is a safe change because LChunk::inlined_functions has no other uses and the functions in CompilationInfo::inlined_functions have a strictly longer lifespan, being created earlier (in Hydrogen already).
BUG=v8:5432
Review-Url: https://codereview.chromium.org/2451853002
Cr-Commit-Position: refs/heads/master@{#40975}
2016-11-14 17:21:37 +00:00
|
|
|
class SourcePosition;
|
2013-04-24 14:05:37 +00:00
|
|
|
class StatsCounter;
|
2015-08-12 07:32:36 +00:00
|
|
|
|
2017-08-23 03:08:51 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Optimization for far-jmp like instructions that can be replaced by shorter.
|
|
|
|
|
|
|
|
class JumpOptimizationInfo {
|
|
|
|
public:
|
|
|
|
bool is_collecting() const { return stage_ == kCollection; }
|
|
|
|
bool is_optimizing() const { return stage_ == kOptimization; }
|
|
|
|
void set_optimizing() { stage_ = kOptimization; }
|
|
|
|
|
|
|
|
bool is_optimizable() const { return optimizable_; }
|
|
|
|
void set_optimizable() { optimizable_ = true; }
|
|
|
|
|
2018-06-15 03:46:40 +00:00
|
|
|
// Used to verify the instruction sequence is always the same in two stages.
|
|
|
|
size_t hash_code() const { return hash_code_; }
|
|
|
|
void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
|
|
|
|
|
2017-08-23 03:08:51 +00:00
|
|
|
std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
enum { kCollection, kOptimization } stage_ = kCollection;
|
|
|
|
bool optimizable_ = false;
|
|
|
|
std::vector<uint32_t> farjmp_bitmap_;
|
2018-06-15 03:46:40 +00:00
|
|
|
size_t hash_code_ = 0u;
|
2017-08-23 03:08:51 +00:00
|
|
|
};
|
|
|
|
|
2018-06-13 12:47:01 +00:00
|
|
|
class HeapObjectRequest {
|
|
|
|
public:
|
|
|
|
explicit HeapObjectRequest(double heap_number, int offset = -1);
|
|
|
|
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
|
|
|
|
|
|
|
|
enum Kind { kHeapNumber, kCodeStub };
|
|
|
|
Kind kind() const { return kind_; }
|
|
|
|
|
|
|
|
double heap_number() const {
|
|
|
|
DCHECK_EQ(kind(), kHeapNumber);
|
|
|
|
return value_.heap_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
CodeStub* code_stub() const {
|
|
|
|
DCHECK_EQ(kind(), kCodeStub);
|
|
|
|
return value_.code_stub;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The code buffer offset at the time of the request.
|
|
|
|
int offset() const {
|
|
|
|
DCHECK_GE(offset_, 0);
|
|
|
|
return offset_;
|
|
|
|
}
|
|
|
|
void set_offset(int offset) {
|
|
|
|
DCHECK_LT(offset_, 0);
|
|
|
|
offset_ = offset;
|
|
|
|
DCHECK_GE(offset_, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Kind kind_;
|
|
|
|
|
|
|
|
union {
|
|
|
|
double heap_number;
|
|
|
|
CodeStub* code_stub;
|
|
|
|
} value_;
|
|
|
|
|
|
|
|
int offset_;
|
|
|
|
};
|
|
|
|
|
2011-03-22 13:20:04 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Platform independent assembler base class.
|
|
|
|
|
2015-11-25 14:23:37 +00:00
|
|
|
enum class CodeObjectRequired { kNo, kYes };
|
|
|
|
|
2018-06-07 12:12:00 +00:00
|
|
|
class AssemblerBase : public Malloced {
|
2011-03-22 13:20:04 +00:00
|
|
|
public:
|
2018-06-20 13:47:30 +00:00
|
|
|
struct Options {
|
2018-06-21 09:27:47 +00:00
|
|
|
// Recording reloc info and for external references and off-heap targets is
|
|
|
|
// needed whenever code is serialized, e.g. into the snapshot or as a WASM
|
|
|
|
// module. This flag allows this reloc info to be disabled for code that
|
|
|
|
// will not survive process destruction.
|
|
|
|
bool record_reloc_info_for_serialization = true;
|
2018-06-20 13:47:30 +00:00
|
|
|
// Enables access to exrefs by computing a delta from the root array.
|
|
|
|
// Only valid if code will not survive the process.
|
|
|
|
bool enable_root_array_delta_access = false;
|
|
|
|
// Enables specific assembler sequences only used for the simulator.
|
|
|
|
bool enable_simulator_code = false;
|
|
|
|
// Enables use of isolate-independent constants, indirected through the
|
|
|
|
// root array.
|
|
|
|
// (macro assembler feature).
|
|
|
|
bool isolate_independent_code = false;
|
|
|
|
// Enables the use of isolate-independent builtins through an off-heap
|
|
|
|
// trampoline. (macro assembler feature).
|
|
|
|
bool inline_offheap_trampolines = false;
|
|
|
|
// On some platforms, all code is within a given range in the process,
|
|
|
|
// and the start of this range is configured here.
|
|
|
|
Address code_range_start = 0;
|
2017-03-17 11:18:06 +00:00
|
|
|
};
|
|
|
|
|
2018-06-20 13:47:30 +00:00
|
|
|
static Options DefaultOptions(Isolate* isolate,
|
|
|
|
bool explicitly_support_serialization = false);
|
2011-03-22 13:20:04 +00:00
|
|
|
|
2018-06-20 13:47:30 +00:00
|
|
|
AssemblerBase(const Options& options, void* buffer, int buffer_size);
|
|
|
|
virtual ~AssemblerBase();
|
2017-03-17 11:18:06 +00:00
|
|
|
|
2018-06-20 13:47:30 +00:00
|
|
|
const Options& options() const { return options_; }
|
2012-11-09 13:10:10 +00:00
|
|
|
|
2012-11-09 13:43:48 +00:00
|
|
|
bool emit_debug_code() const { return emit_debug_code_; }
|
|
|
|
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
|
|
|
|
|
2012-11-09 13:10:10 +00:00
|
|
|
bool predictable_code_size() const { return predictable_code_size_; }
|
|
|
|
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
|
2011-03-22 13:20:04 +00:00
|
|
|
|
2013-03-05 10:48:16 +00:00
|
|
|
uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
|
|
|
|
void set_enabled_cpu_features(uint64_t features) {
|
|
|
|
enabled_cpu_features_ = features;
|
|
|
|
}
|
2016-09-23 15:29:11 +00:00
|
|
|
// Features are usually enabled by CpuFeatureScope, which also asserts that
|
|
|
|
// the features are supported before they are enabled.
|
2013-03-05 10:48:16 +00:00
|
|
|
bool IsEnabled(CpuFeature f) {
|
|
|
|
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
|
|
|
|
}
|
2016-09-23 15:29:11 +00:00
|
|
|
void EnableCpuFeature(CpuFeature f) {
|
|
|
|
enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
|
|
|
|
}
|
2013-03-05 10:48:16 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
bool is_constant_pool_available() const {
|
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
|
|
|
return constant_pool_available_;
|
2014-10-13 14:41:33 +00:00
|
|
|
} else {
|
2015-06-04 14:44:00 +00:00
|
|
|
// Embedded constant pool not supported on this architecture.
|
2014-10-13 14:41:33 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-23 03:08:51 +00:00
|
|
|
JumpOptimizationInfo* jump_optimization_info() {
|
|
|
|
return jump_optimization_info_;
|
|
|
|
}
|
|
|
|
void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
|
|
|
|
jump_optimization_info_ = jump_opt;
|
|
|
|
}
|
|
|
|
|
2012-04-12 09:23:26 +00:00
|
|
|
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
|
|
|
|
// cross-snapshotting.
|
|
|
|
static void QuietNaN(HeapObject* nan) { }
|
|
|
|
|
2012-11-22 10:28:29 +00:00
|
|
|
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
|
|
|
|
|
2014-03-20 09:10:15 +00:00
|
|
|
// This function is called when code generation is aborted, so that
|
|
|
|
// the assembler could clean up internal data structures.
|
|
|
|
virtual void AbortedCodeGeneration() { }
|
|
|
|
|
[assembler] Introduce proper AssemblerBase::Print() for improved debuggability.
While working on frame elision, I wanted to disassemble codegen in the
debugger, as the code generation is progressing. I discovered we had a
"Print" member on the x64 assembler, without any implementation. I
pulled it up to AssemblerBase and gave it an implementation that
should work for the other architectures.
Also checked that ia32, x87, arm and arm64 assemblers didn't have
such an implementation - free Print.
Arm64 has a naming conflict with the v8::internal::Disassembler. I
renamed the arm64 type with a more specific name.
Opportunistically fixed a bug in the name converter. This debug-time
printer doesn't provide a Code object, which should be OK with the
name converters, by the looks of other APIs there. All this means is that
when using the Print() API, we just get addresses dumped without any
context (like what this address may be - a stub maybe, etc). This seems
fine for the scenario.
There may be other places that assume a Code object. Since this is
a diagnostics-only scenario, for codegen developers, I feel it is
reasonable to fix such other places as we find them.
Review URL: https://codereview.chromium.org/1431933003
Cr-Commit-Position: refs/heads/master@{#31869}
2015-11-09 05:39:20 +00:00
|
|
|
// Debugging
|
2017-03-17 11:18:06 +00:00
|
|
|
void Print(Isolate* isolate);
|
[assembler] Introduce proper AssemblerBase::Print() for improved debuggability.
While working on frame elision, I wanted to disassemble codegen in the
debugger, as the code generation is progressing. I discovered we had a
"Print" member on the x64 assembler, without any implementation. I
pulled it up to AssemblerBase and gave it an implementation that
should work for the other architectures.
Also checked that ia32, x87, arm and arm64 assemblers didn't have
such an implementation - free Print.
Arm64 has a naming conflict with the v8::internal::Disassembler. I
renamed the arm64 type with a more specific name.
Opportunistically fixed a bug in the name converter. This debug-time
printer doesn't provide a Code object, which should be OK with the
name converters, by the looks of other APIs there. All this means is that
when using the Print() API, we just get addresses dumped without any
context (like what this address may be - a stub maybe, etc). This seems
fine for the scenario.
There may be other places that assume a Code object. Since this is
a diagnostics-only scenario, for codegen developers, I feel it is
reasonable to fix such other places as we find them.
Review URL: https://codereview.chromium.org/1431933003
Cr-Commit-Position: refs/heads/master@{#31869}
2015-11-09 05:39:20 +00:00
|
|
|
|
2012-11-22 10:28:29 +00:00
|
|
|
static const int kMinimalBufferSize = 4*KB;
|
|
|
|
|
2018-02-08 17:33:57 +00:00
|
|
|
static void FlushICache(void* start, size_t size);
|
2018-04-13 22:28:05 +00:00
|
|
|
static void FlushICache(Address start, size_t size) {
|
|
|
|
return FlushICache(reinterpret_cast<void*>(start), size);
|
|
|
|
}
|
2015-09-11 12:59:30 +00:00
|
|
|
|
2018-06-20 12:50:01 +00:00
|
|
|
// Used to print the name of some special registers.
|
|
|
|
static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
|
|
|
|
|
2012-11-22 10:28:29 +00:00
|
|
|
protected:
|
|
|
|
// The buffer into which code and relocation info are generated. It could
|
|
|
|
// either be owned by the assembler or be provided externally.
|
|
|
|
byte* buffer_;
|
|
|
|
int buffer_size_;
|
|
|
|
bool own_buffer_;
|
2018-06-13 12:47:01 +00:00
|
|
|
std::forward_list<HeapObjectRequest> heap_object_requests_;
|
|
|
|
// The program counter, which points into the buffer above and moves forward.
|
|
|
|
// TODO(jkummerow): This should probably have type {Address}.
|
|
|
|
byte* pc_;
|
2012-11-22 10:28:29 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
void set_constant_pool_available(bool available) {
|
|
|
|
if (FLAG_enable_embedded_constant_pool) {
|
|
|
|
constant_pool_available_ = available;
|
2014-10-13 14:41:33 +00:00
|
|
|
} else {
|
2015-06-04 14:44:00 +00:00
|
|
|
// Embedded constant pool not supported on this architecture.
|
2014-10-13 14:41:33 +00:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-13 12:47:01 +00:00
|
|
|
// {RequestHeapObject} records the need for a future heap number allocation or
|
|
|
|
// code stub generation. After code assembly, each platform's
|
|
|
|
// {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
|
|
|
|
// objects and place them where they are expected (determined by the pc offset
|
|
|
|
// associated with each request).
|
|
|
|
void RequestHeapObject(HeapObjectRequest request);
|
2012-11-22 10:28:29 +00:00
|
|
|
|
2011-03-22 13:20:04 +00:00
|
|
|
private:
|
2018-06-20 13:47:30 +00:00
|
|
|
const Options options_;
|
2013-03-05 10:48:16 +00:00
|
|
|
uint64_t enabled_cpu_features_;
|
2012-11-09 13:43:48 +00:00
|
|
|
bool emit_debug_code_;
|
2012-11-09 13:10:10 +00:00
|
|
|
bool predictable_code_size_;
|
2014-10-13 14:41:33 +00:00
|
|
|
|
|
|
|
// Indicates whether the constant pool can be accessed, which is only possible
|
|
|
|
// if the pp register points to the current code object's constant pool.
|
2015-06-04 14:44:00 +00:00
|
|
|
bool constant_pool_available_;
|
2014-10-13 14:41:33 +00:00
|
|
|
|
2017-08-23 03:08:51 +00:00
|
|
|
JumpOptimizationInfo* jump_optimization_info_;
|
|
|
|
|
2014-10-13 14:41:33 +00:00
|
|
|
// Constant pool.
|
|
|
|
friend class FrameAndConstantPoolScope;
|
|
|
|
friend class ConstantPoolUnavailableScope;
|
2012-11-09 13:10:10 +00:00
|
|
|
};
|
|
|
|
|
2014-04-16 02:06:14 +00:00
|
|
|
// Avoids emitting debug code during the lifetime of this scope object.
|
|
|
|
class DontEmitDebugCodeScope BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
|
|
|
|
: assembler_(assembler), old_value_(assembler->emit_debug_code()) {
|
|
|
|
assembler_->set_emit_debug_code(false);
|
|
|
|
}
|
|
|
|
~DontEmitDebugCodeScope() {
|
|
|
|
assembler_->set_emit_debug_code(old_value_);
|
2014-05-09 12:59:24 +00:00
|
|
|
}
|
2014-04-16 02:06:14 +00:00
|
|
|
private:
|
|
|
|
AssemblerBase* assembler_;
|
|
|
|
bool old_value_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2012-11-09 13:10:10 +00:00
|
|
|
// Avoids using instructions that vary in size in unpredictable ways between the
|
|
|
|
// snapshot and the running VM.
|
|
|
|
class PredictableCodeSizeScope {
|
|
|
|
public:
|
2012-11-22 14:59:52 +00:00
|
|
|
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
|
|
|
|
~PredictableCodeSizeScope();
|
2012-11-09 13:10:10 +00:00
|
|
|
|
|
|
|
private:
|
2018-02-19 16:56:20 +00:00
|
|
|
AssemblerBase* const assembler_;
|
|
|
|
int const expected_size_;
|
|
|
|
int const start_offset_;
|
|
|
|
bool const old_value_;
|
2011-03-22 13:20:04 +00:00
|
|
|
};
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2013-03-05 10:48:16 +00:00
|
|
|
// Enable a specified feature within a scope.
|
|
|
|
class CpuFeatureScope BASE_EMBEDDED {
|
|
|
|
public:
|
2016-09-23 15:29:11 +00:00
|
|
|
enum CheckPolicy {
|
|
|
|
kCheckSupported,
|
|
|
|
kDontCheckSupported,
|
|
|
|
};
|
|
|
|
|
2013-03-05 10:48:16 +00:00
|
|
|
#ifdef DEBUG
|
2016-09-23 15:29:11 +00:00
|
|
|
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
|
|
|
|
CheckPolicy check = kCheckSupported);
|
2013-03-05 10:48:16 +00:00
|
|
|
~CpuFeatureScope();
|
|
|
|
|
|
|
|
private:
|
|
|
|
AssemblerBase* assembler_;
|
|
|
|
uint64_t old_enabled_;
|
|
|
|
#else
|
2016-09-23 15:29:11 +00:00
|
|
|
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
|
|
|
|
CheckPolicy check = kCheckSupported) {}
|
2018-02-26 15:33:38 +00:00
|
|
|
// Define a destructor to avoid unused variable warnings.
|
|
|
|
~CpuFeatureScope() {}
|
2013-03-05 10:48:16 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2014-05-16 15:18:24 +00:00
|
|
|
// CpuFeatures keeps track of which features are supported by the target CPU.
|
|
|
|
// Supported features must be enabled by a CpuFeatureScope before use.
|
|
|
|
// Example:
|
|
|
|
// if (assembler->IsSupported(SSE3)) {
|
|
|
|
// CpuFeatureScope fscope(assembler, SSE3);
|
|
|
|
// // Generate code containing SSE3 instructions.
|
|
|
|
// } else {
|
|
|
|
// // Generate alternative code.
|
|
|
|
// }
|
|
|
|
class CpuFeatures : public AllStatic {
|
2013-09-17 13:02:25 +00:00
|
|
|
public:
|
2014-05-16 15:18:24 +00:00
|
|
|
static void Probe(bool cross_compile) {
|
|
|
|
STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
|
|
|
|
if (initialized_) return;
|
|
|
|
initialized_ = true;
|
|
|
|
ProbeImpl(cross_compile);
|
|
|
|
}
|
|
|
|
|
2014-08-05 13:26:55 +00:00
|
|
|
static unsigned SupportedFeatures() {
|
|
|
|
Probe(false);
|
|
|
|
return supported_;
|
|
|
|
}
|
|
|
|
|
2014-05-16 15:18:24 +00:00
|
|
|
static bool IsSupported(CpuFeature f) {
|
|
|
|
return (supported_ & (1u << f)) != 0;
|
|
|
|
}
|
|
|
|
|
2018-01-29 12:32:31 +00:00
|
|
|
static inline bool SupportsOptimizer();
|
2014-05-16 15:18:24 +00:00
|
|
|
|
2017-03-15 13:24:54 +00:00
|
|
|
static inline bool SupportsWasmSimd128();
|
2016-07-11 15:32:36 +00:00
|
|
|
|
2016-03-16 16:27:37 +00:00
|
|
|
static inline unsigned icache_line_size() {
|
2017-10-18 09:06:55 +00:00
|
|
|
DCHECK_NE(icache_line_size_, 0);
|
2016-03-16 16:27:37 +00:00
|
|
|
return icache_line_size_;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned dcache_line_size() {
|
2017-10-18 09:06:55 +00:00
|
|
|
DCHECK_NE(dcache_line_size_, 0);
|
2016-03-16 16:27:37 +00:00
|
|
|
return dcache_line_size_;
|
2014-05-16 15:18:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void PrintTarget();
|
|
|
|
static void PrintFeatures();
|
2013-09-17 13:02:25 +00:00
|
|
|
|
2015-11-25 19:27:23 +00:00
|
|
|
private:
|
|
|
|
friend class ExternalReference;
|
|
|
|
friend class AssemblerBase;
|
2014-06-30 13:25:46 +00:00
|
|
|
// Flush instruction cache.
|
|
|
|
static void FlushICache(void* start, size_t size);
|
|
|
|
|
2014-05-16 15:18:24 +00:00
|
|
|
// Platform-dependent implementation.
|
|
|
|
static void ProbeImpl(bool cross_compile);
|
|
|
|
|
|
|
|
static unsigned supported_;
|
2016-03-16 16:27:37 +00:00
|
|
|
static unsigned icache_line_size_;
|
|
|
|
static unsigned dcache_line_size_;
|
2014-05-16 15:18:24 +00:00
|
|
|
static bool initialized_;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
|
2013-09-17 13:02:25 +00:00
|
|
|
};
|
|
|
|
|
2014-05-20 09:21:45 +00:00
|
|
|
// Specifies whether to perform icache flush operations on RelocInfo updates.
|
|
|
|
// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an
|
|
|
|
// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be
|
|
|
|
// skipped (only use this if you will flush the icache manually before it is
|
|
|
|
// executed).
|
|
|
|
enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Relocation information
|
|
|
|
|
2008-09-18 13:42:39 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Relocation information consists of the address (pc) of the datum
|
|
|
|
// to which the relocation information applies, the relocation mode
|
|
|
|
// (rmode), and an optional data field. The relocation mode may be
|
|
|
|
// "descriptive" and not indicate a need for relocation, but simply
|
|
|
|
// describe a property of the datum. Such rmodes are useful for GC
|
|
|
|
// and nice disassembly output.
|
|
|
|
|
2014-04-29 14:14:06 +00:00
|
|
|
class RelocInfo {
|
2008-07-03 15:10:15 +00:00
|
|
|
public:
|
2011-02-15 14:36:12 +00:00
|
|
|
// This string is used to add padding comments to the reloc info in cases
|
|
|
|
// where we are not sure to have enough space for patching in during
|
|
|
|
// lazy deoptimization. This is the case if we have indirect calls for which
|
|
|
|
// we do not normally record relocation info.
|
2011-08-05 11:32:46 +00:00
|
|
|
static const char* const kFillerCommentString;
|
2011-02-15 14:36:12 +00:00
|
|
|
|
2015-07-10 13:14:36 +00:00
|
|
|
// The minimum size of a comment is equal to two bytes for the extra tagged
|
|
|
|
// pc and kPointerSize for the actual pointer to the comment.
|
|
|
|
static const int kMinRelocCommentSize = 2 + kPointerSize;
|
2011-02-22 12:28:33 +00:00
|
|
|
|
|
|
|
// The maximum size for a call instruction including pc-jump.
|
|
|
|
static const int kMaxCallSize = 6;
|
|
|
|
|
2011-03-25 10:29:34 +00:00
|
|
|
// The maximum pc delta that will use the short encoding.
|
|
|
|
static const int kMaxSmallPCDelta;
|
|
|
|
|
2018-02-26 09:42:50 +00:00
|
|
|
enum Mode : int8_t {
|
2018-06-19 15:51:48 +00:00
|
|
|
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
|
|
|
|
// and IsShareableRelocMode predicates below).
|
2018-06-07 16:26:53 +00:00
|
|
|
|
2018-06-15 09:53:38 +00:00
|
|
|
CODE_TARGET,
|
2018-06-07 16:26:53 +00:00
|
|
|
EMBEDDED_OBJECT, // LAST_GCED_ENUM
|
|
|
|
|
2017-11-21 16:40:05 +00:00
|
|
|
JS_TO_WASM_CALL,
|
2018-06-20 08:10:28 +00:00
|
|
|
WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
|
|
|
|
WASM_STUB_CALL,
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2008-09-22 13:57:03 +00:00
|
|
|
RUNTIME_ENTRY,
|
|
|
|
COMMENT,
|
2015-07-13 12:32:09 +00:00
|
|
|
|
2008-09-22 13:57:03 +00:00
|
|
|
EXTERNAL_REFERENCE, // The address of an external C++ function.
|
|
|
|
INTERNAL_REFERENCE, // An address inside the same function.
|
|
|
|
|
2015-03-13 10:07:54 +00:00
|
|
|
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
|
|
|
|
INTERNAL_REFERENCE_ENCODED,
|
|
|
|
|
2018-03-16 15:41:19 +00:00
|
|
|
// An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
|
|
|
|
OFF_HEAP_TARGET,
|
|
|
|
|
2014-03-21 09:28:26 +00:00
|
|
|
// Marks constant and veneer pools. Only used on ARM and ARM64.
|
2014-03-12 15:40:41 +00:00
|
|
|
// They use a custom noncompact encoding.
|
2012-06-14 11:16:47 +00:00
|
|
|
CONST_POOL,
|
2014-03-12 15:40:41 +00:00
|
|
|
VENEER_POOL,
|
2012-06-14 11:16:47 +00:00
|
|
|
|
This CL enables precise source positions for all V8 compilers. It merges compiler::SourcePosition and internal::SourcePosition to a single class used throughout the codebase. The new internal::SourcePosition instances store an id identifying an inlined function in addition to a script offset.
SourcePosition::InliningId() refers to a the new table DeoptimizationInputData::InliningPositions(), which provides the following data for every inlining id:
- The inlined SharedFunctionInfo as an offset into DeoptimizationInfo::LiteralArray
- The SourcePosition of the inlining. Recursively, this yields the full inlining stack.
Before the Code object is created, the same information can be found in CompilationInfo::inlined_functions().
If SourcePosition::InliningId() is SourcePosition::kNotInlined, it refers to the outer (non-inlined) function.
So every SourcePosition has full information about its inlining stack, as long as the corresponding Code object is known. The internal represenation of a source position is a positive 64bit integer.
All compilers create now appropriate source positions for inlined functions. In the case of Turbofan, this required using AstGraphBuilderWithPositions for inlined functions too. So this class is now moved to a header file.
At the moment, the additional information in source positions is only used in --trace-deopt and --code-comments. The profiler needs to be updated, at the moment it gets the correct script offsets from the deopt info, but the wrong script id from the reconstructed deopt stack, which can lead to wrong outputs. This should be resolved by making the profiler use the new inlining information for deopts.
I activated the inlined deoptimization tests in test-cpu-profiler.cc for Turbofan, changing them to a case where the deopt stack and the inlining position agree. It is currently still broken for other cases.
The following additional changes were necessary:
- The source position table (internal::SourcePositionTableBuilder etc.) supports now 64bit source positions. Encoding source positions in a single 64bit int together with the difference encoding in the source position table results in very little overhead for the inlining id, since only 12% of the source positions in Octane have a changed inlining id.
- The class HPositionInfo was effectively dead code and is now removed.
- SourcePosition has new printing and information facilities, including computing a full inlining stack.
- I had to rename compiler/source-position.{h,cc} to compiler/compiler-source-position-table.{h,cc} to avoid clashes with the new src/source-position.cc file.
- I wrote the new wrapper PodArray for ByteArray. It is a template working with any POD-type. This is used in DeoptimizationInputData::InliningPositions().
- I removed HInlinedFunctionInfo and HGraph::inlined_function_infos, because they were only used for the now obsolete Crankshaft inlining ids.
- Crankshaft managed a list of inlined functions in Lithium: LChunk::inlined_functions. This is an analog structure to CompilationInfo::inlined_functions. So I removed LChunk::inlined_functions and made Crankshaft use CompilationInfo::inlined_functions instead, because this was necessary to register the offsets into the literal array in a uniform way. This is a safe change because LChunk::inlined_functions has no other uses and the functions in CompilationInfo::inlined_functions have a strictly longer lifespan, being created earlier (in Hydrogen already).
BUG=v8:5432
Review-Url: https://codereview.chromium.org/2451853002
Cr-Commit-Position: refs/heads/master@{#40975}
2016-11-14 17:21:37 +00:00
|
|
|
DEOPT_SCRIPT_OFFSET,
|
|
|
|
DEOPT_INLINING_ID, // Deoptimization source position.
|
|
|
|
DEOPT_REASON, // Deoptimization reason index.
|
|
|
|
DEOPT_ID, // Deoptimization inlining id.
|
2015-07-10 13:14:36 +00:00
|
|
|
|
|
|
|
// This is not an actual reloc mode, but used to encode a long pc jump that
|
|
|
|
// cannot be encoded as part of another record.
|
|
|
|
PC_JUMP,
|
2015-02-05 14:51:45 +00:00
|
|
|
|
2008-09-22 13:57:03 +00:00
|
|
|
// Pseudo-types
|
2015-07-10 13:14:36 +00:00
|
|
|
NUMBER_OF_MODES,
|
2018-01-22 14:04:45 +00:00
|
|
|
NONE, // never recorded value
|
2015-02-17 14:52:16 +00:00
|
|
|
|
2012-11-08 12:18:11 +00:00
|
|
|
FIRST_REAL_RELOC_MODE = CODE_TARGET,
|
2014-03-12 15:40:41 +00:00
|
|
|
LAST_REAL_RELOC_MODE = VENEER_POOL,
|
2017-06-08 07:49:49 +00:00
|
|
|
LAST_GCED_ENUM = EMBEDDED_OBJECT,
|
2018-06-20 08:10:28 +00:00
|
|
|
FIRST_SHAREABLE_RELOC_MODE = WASM_CALL,
|
2008-09-22 13:57:03 +00:00
|
|
|
};
|
|
|
|
|
2015-07-13 12:32:09 +00:00
|
|
|
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
|
|
|
|
|
2017-03-17 11:18:06 +00:00
|
|
|
RelocInfo() = default;
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2018-05-30 09:52:01 +00:00
|
|
|
RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host,
|
|
|
|
Address constant_pool = kNullAddress)
|
|
|
|
: pc_(pc),
|
|
|
|
rmode_(rmode),
|
|
|
|
data_(data),
|
|
|
|
host_(host),
|
|
|
|
constant_pool_(constant_pool) {}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-11-08 12:18:11 +00:00
|
|
|
static inline bool IsRealRelocMode(Mode mode) {
|
2016-06-20 05:22:02 +00:00
|
|
|
return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
|
2012-11-08 12:18:11 +00:00
|
|
|
}
|
2018-06-07 16:26:53 +00:00
|
|
|
// Is the relocation mode affected by GC?
|
|
|
|
static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; }
|
|
|
|
static inline bool IsShareableRelocMode(Mode mode) {
|
|
|
|
return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
|
|
|
|
}
|
2018-06-19 15:51:48 +00:00
|
|
|
static inline bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
|
2011-10-12 15:43:41 +00:00
|
|
|
static inline bool IsEmbeddedObject(Mode mode) {
|
|
|
|
return mode == EMBEDDED_OBJECT;
|
|
|
|
}
|
2013-03-11 14:11:03 +00:00
|
|
|
static inline bool IsRuntimeEntry(Mode mode) {
|
|
|
|
return mode == RUNTIME_ENTRY;
|
|
|
|
}
|
2017-11-20 21:34:04 +00:00
|
|
|
static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
|
2018-06-04 10:12:54 +00:00
|
|
|
static inline bool IsWasmStubCall(Mode mode) {
|
|
|
|
return mode == WASM_STUB_CALL;
|
|
|
|
}
|
2008-09-22 13:57:03 +00:00
|
|
|
static inline bool IsComment(Mode mode) {
|
|
|
|
return mode == COMMENT;
|
|
|
|
}
|
2012-06-14 11:16:47 +00:00
|
|
|
static inline bool IsConstPool(Mode mode) {
|
|
|
|
return mode == CONST_POOL;
|
|
|
|
}
|
2014-03-12 15:40:41 +00:00
|
|
|
static inline bool IsVeneerPool(Mode mode) {
|
|
|
|
return mode == VENEER_POOL;
|
|
|
|
}
|
2016-06-29 13:49:19 +00:00
|
|
|
static inline bool IsDeoptPosition(Mode mode) {
|
This CL enables precise source positions for all V8 compilers. It merges compiler::SourcePosition and internal::SourcePosition to a single class used throughout the codebase. The new internal::SourcePosition instances store an id identifying an inlined function in addition to a script offset.
SourcePosition::InliningId() refers to a the new table DeoptimizationInputData::InliningPositions(), which provides the following data for every inlining id:
- The inlined SharedFunctionInfo as an offset into DeoptimizationInfo::LiteralArray
- The SourcePosition of the inlining. Recursively, this yields the full inlining stack.
Before the Code object is created, the same information can be found in CompilationInfo::inlined_functions().
If SourcePosition::InliningId() is SourcePosition::kNotInlined, it refers to the outer (non-inlined) function.
So every SourcePosition has full information about its inlining stack, as long as the corresponding Code object is known. The internal represenation of a source position is a positive 64bit integer.
All compilers create now appropriate source positions for inlined functions. In the case of Turbofan, this required using AstGraphBuilderWithPositions for inlined functions too. So this class is now moved to a header file.
At the moment, the additional information in source positions is only used in --trace-deopt and --code-comments. The profiler needs to be updated, at the moment it gets the correct script offsets from the deopt info, but the wrong script id from the reconstructed deopt stack, which can lead to wrong outputs. This should be resolved by making the profiler use the new inlining information for deopts.
I activated the inlined deoptimization tests in test-cpu-profiler.cc for Turbofan, changing them to a case where the deopt stack and the inlining position agree. It is currently still broken for other cases.
The following additional changes were necessary:
- The source position table (internal::SourcePositionTableBuilder etc.) supports now 64bit source positions. Encoding source positions in a single 64bit int together with the difference encoding in the source position table results in very little overhead for the inlining id, since only 12% of the source positions in Octane have a changed inlining id.
- The class HPositionInfo was effectively dead code and is now removed.
- SourcePosition has new printing and information facilities, including computing a full inlining stack.
- I had to rename compiler/source-position.{h,cc} to compiler/compiler-source-position-table.{h,cc} to avoid clashes with the new src/source-position.cc file.
- I wrote the new wrapper PodArray for ByteArray. It is a template working with any POD-type. This is used in DeoptimizationInputData::InliningPositions().
- I removed HInlinedFunctionInfo and HGraph::inlined_function_infos, because they were only used for the now obsolete Crankshaft inlining ids.
- Crankshaft managed a list of inlined functions in Lithium: LChunk::inlined_functions. This is an analog structure to CompilationInfo::inlined_functions. So I removed LChunk::inlined_functions and made Crankshaft use CompilationInfo::inlined_functions instead, because this was necessary to register the offsets into the literal array in a uniform way. This is a safe change because LChunk::inlined_functions has no other uses and the functions in CompilationInfo::inlined_functions have a strictly longer lifespan, being created earlier (in Hydrogen already).
BUG=v8:5432
Review-Url: https://codereview.chromium.org/2451853002
Cr-Commit-Position: refs/heads/master@{#40975}
2016-11-14 17:21:37 +00:00
|
|
|
return mode == DEOPT_SCRIPT_OFFSET || mode == DEOPT_INLINING_ID;
|
2016-06-29 13:49:19 +00:00
|
|
|
}
|
2015-02-11 17:11:23 +00:00
|
|
|
static inline bool IsDeoptReason(Mode mode) {
|
|
|
|
return mode == DEOPT_REASON;
|
|
|
|
}
|
2016-05-11 14:05:41 +00:00
|
|
|
static inline bool IsDeoptId(Mode mode) {
|
|
|
|
return mode == DEOPT_ID;
|
|
|
|
}
|
2008-09-22 13:57:03 +00:00
|
|
|
static inline bool IsExternalReference(Mode mode) {
|
|
|
|
return mode == EXTERNAL_REFERENCE;
|
|
|
|
}
|
|
|
|
static inline bool IsInternalReference(Mode mode) {
|
|
|
|
return mode == INTERNAL_REFERENCE;
|
|
|
|
}
|
2015-02-17 14:52:16 +00:00
|
|
|
static inline bool IsInternalReferenceEncoded(Mode mode) {
|
|
|
|
return mode == INTERNAL_REFERENCE_ENCODED;
|
|
|
|
}
|
2018-03-16 15:41:19 +00:00
|
|
|
static inline bool IsOffHeapTarget(Mode mode) {
|
|
|
|
return mode == OFF_HEAP_TARGET;
|
|
|
|
}
|
2018-01-22 14:04:45 +00:00
|
|
|
static inline bool IsNone(Mode mode) { return mode == NONE; }
|
2017-01-10 19:07:34 +00:00
|
|
|
static inline bool IsWasmReference(Mode mode) {
|
2018-03-14 12:58:16 +00:00
|
|
|
return IsWasmPtrReference(mode);
|
2017-01-10 19:07:34 +00:00
|
|
|
}
|
|
|
|
static inline bool IsWasmPtrReference(Mode mode) {
|
2018-05-28 08:28:46 +00:00
|
|
|
return mode == WASM_CALL || mode == JS_TO_WASM_CALL;
|
2017-01-10 19:07:34 +00:00
|
|
|
}
|
|
|
|
|
2018-06-21 09:27:47 +00:00
|
|
|
static inline bool IsOnlyForSerializer(Mode mode) {
|
|
|
|
return mode == EXTERNAL_REFERENCE || mode == OFF_HEAP_TARGET;
|
|
|
|
}
|
|
|
|
|
2018-01-30 08:43:43 +00:00
|
|
|
static constexpr int ModeMask(Mode mode) { return 1 << mode; }
|
2008-09-22 13:57:03 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Accessors
|
2018-04-13 22:28:05 +00:00
|
|
|
Address pc() const { return pc_; }
|
2008-09-22 13:57:03 +00:00
|
|
|
Mode rmode() const { return rmode_; }
|
2010-09-24 08:25:31 +00:00
|
|
|
intptr_t data() const { return data_; }
|
2011-09-19 18:36:47 +00:00
|
|
|
Code* host() const { return host_; }
|
2018-01-16 18:33:05 +00:00
|
|
|
Address constant_pool() const { return constant_pool_; }
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2015-07-15 09:22:33 +00:00
|
|
|
// Apply a relocation by delta bytes. When the code object is moved, PC
|
|
|
|
// relative addresses have to be updated as well as absolute addresses
|
|
|
|
// inside the code (internal references).
|
|
|
|
// Do not forget to flush the icache afterwards!
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE void apply(intptr_t delta);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-05-20 13:54:31 +00:00
|
|
|
// Is the pointer this relocation info refers to coded like a plain pointer
|
2012-01-16 12:38:59 +00:00
|
|
|
// or is it strange in some way (e.g. relative or patched into a series of
|
2010-05-20 13:54:31 +00:00
|
|
|
// instructions).
|
|
|
|
bool IsCodedSpecially();
|
|
|
|
|
2018-03-21 08:00:28 +00:00
|
|
|
// The static pendant to IsCodedSpecially, just for off-heap targets. Used
|
|
|
|
// during deserialization, when we don't actually have a RelocInfo handy.
|
|
|
|
static bool OffHeapTargetIsCodedSpecially();
|
|
|
|
|
2014-03-10 18:47:57 +00:00
|
|
|
// If true, the pointer this relocation info refers to is an entry in the
|
|
|
|
// constant pool, otherwise the pointer is embedded in the instruction stream.
|
|
|
|
bool IsInConstantPool();
|
|
|
|
|
2018-06-19 11:39:25 +00:00
|
|
|
// Returns the deoptimization id for the entry associated with the reloc info
|
|
|
|
// where {kind} is the deoptimization kind.
|
|
|
|
// This is only used for printing RUNTIME_ENTRY relocation info.
|
|
|
|
int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
|
|
|
|
|
2017-11-20 21:34:04 +00:00
|
|
|
Address wasm_call_address() const;
|
2018-06-04 10:12:54 +00:00
|
|
|
Address wasm_stub_call_address() const;
|
|
|
|
Address js_to_wasm_address() const;
|
2017-08-19 16:35:05 +00:00
|
|
|
|
2018-06-04 10:12:54 +00:00
|
|
|
uint32_t wasm_stub_call_tag() const;
|
2016-05-04 20:19:28 +00:00
|
|
|
|
2017-11-20 21:34:04 +00:00
|
|
|
void set_wasm_call_address(
|
2018-02-13 11:46:17 +00:00
|
|
|
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
2018-06-04 10:12:54 +00:00
|
|
|
void set_wasm_stub_call_address(
|
|
|
|
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
2017-11-21 16:40:05 +00:00
|
|
|
void set_js_to_wasm_address(
|
2018-02-13 11:46:17 +00:00
|
|
|
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
2017-08-19 16:35:05 +00:00
|
|
|
|
2018-06-04 10:12:54 +00:00
|
|
|
void set_target_address(
|
|
|
|
Address target,
|
|
|
|
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
|
|
|
|
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
|
|
|
|
2009-01-13 14:38:12 +00:00
|
|
|
// this relocation applies to;
|
2013-03-13 11:40:26 +00:00
|
|
|
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE Address target_address();
|
|
|
|
V8_INLINE HeapObject* target_object();
|
|
|
|
V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
|
|
|
|
V8_INLINE void set_target_object(
|
2018-05-31 12:26:53 +00:00
|
|
|
Heap* heap, HeapObject* target,
|
2016-06-20 05:22:02 +00:00
|
|
|
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
|
2018-06-22 11:19:13 +00:00
|
|
|
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
|
|
|
V8_INLINE Address target_runtime_entry(Assembler* origin);
|
|
|
|
V8_INLINE void set_target_runtime_entry(
|
2018-02-13 11:46:17 +00:00
|
|
|
Address target,
|
2016-06-20 05:22:02 +00:00
|
|
|
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
|
2018-06-22 11:19:13 +00:00
|
|
|
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
|
|
|
V8_INLINE Address target_off_heap_target();
|
|
|
|
V8_INLINE Cell* target_cell();
|
|
|
|
V8_INLINE Handle<Cell> target_cell_handle();
|
|
|
|
V8_INLINE void set_target_cell(
|
2016-06-20 05:22:02 +00:00
|
|
|
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
|
2018-06-22 11:19:13 +00:00
|
|
|
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
|
|
|
V8_INLINE void set_target_external_reference(
|
|
|
|
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2014-03-10 18:47:57 +00:00
|
|
|
// Returns the address of the constant pool entry where the target address
|
|
|
|
// is held. This should only be called if IsInConstantPool returns true.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE Address constant_pool_entry_address();
|
2014-03-10 18:47:57 +00:00
|
|
|
|
2010-05-26 08:34:07 +00:00
|
|
|
// Read the address of the word containing the target_address in an
|
|
|
|
// instruction stream. What this means exactly is architecture-independent.
|
|
|
|
// The only architecture-independent user of this function is the serializer.
|
|
|
|
// The serializer uses it to find out how many raw bytes of instruction to
|
|
|
|
// output before the next target. Architecture-independent code shouldn't
|
|
|
|
// dereference the pointer it gets back from this.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE Address target_address_address();
|
2014-03-10 18:47:57 +00:00
|
|
|
|
2010-05-26 08:34:07 +00:00
|
|
|
// This indicates how much space a target takes up when deserializing a code
|
|
|
|
// stream. For most architectures this is just the size of a pointer. For
|
|
|
|
// an instruction like movw/movt where the target bits are mixed into the
|
|
|
|
// instruction bits the size of the target will be zero, indicating that the
|
|
|
|
// serializer should not step forwards in memory after a target is resolved
|
|
|
|
// and written. In this case the target_address_address function above
|
|
|
|
// should return the end of the instructions to be patched, allowing the
|
|
|
|
// deserializer to deserialize the instructions as raw bytes and put them in
|
|
|
|
// place, ready to be patched with the target.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE int target_address_size();
|
2009-01-13 14:38:12 +00:00
|
|
|
|
2015-03-05 13:46:31 +00:00
|
|
|
// Read the reference in the instruction this relocation
|
|
|
|
// applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE Address target_external_reference();
|
2015-03-05 13:46:31 +00:00
|
|
|
|
2015-03-18 13:38:32 +00:00
|
|
|
// Read the reference in the instruction this relocation
|
2015-03-05 13:46:31 +00:00
|
|
|
// applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE Address target_internal_reference();
|
2015-03-18 13:38:32 +00:00
|
|
|
|
|
|
|
// Return the reference address this relocation applies to;
|
|
|
|
// can only be called if rmode_ is INTERNAL_REFERENCE.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE Address target_internal_reference_address();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-11-05 10:14:48 +00:00
|
|
|
// Wipe out a relocation to a fixed value, used for making snapshots
|
|
|
|
// reproducible.
|
2018-06-22 11:19:13 +00:00
|
|
|
V8_INLINE void WipeOut();
|
2013-11-05 10:14:48 +00:00
|
|
|
|
2016-04-19 07:06:37 +00:00
|
|
|
template <typename ObjectVisitor>
|
2018-02-09 18:35:58 +00:00
|
|
|
inline void Visit(ObjectVisitor* v);
|
2010-05-26 08:34:07 +00:00
|
|
|
|
2013-02-20 13:12:26 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
// Check whether the given code contains relocation information that
|
|
|
|
// either is position-relative or movable by the garbage collector.
|
2018-02-13 11:46:17 +00:00
|
|
|
static bool RequiresRelocation(const CodeDesc& desc);
|
2013-02-20 13:12:26 +00:00
|
|
|
#endif
|
|
|
|
|
2008-08-06 10:02:49 +00:00
|
|
|
#ifdef ENABLE_DISASSEMBLER
|
|
|
|
// Printing
|
2008-09-22 13:57:03 +00:00
|
|
|
static const char* RelocModeName(Mode rmode);
|
2014-09-30 10:29:32 +00:00
|
|
|
void Print(Isolate* isolate, std::ostream& os); // NOLINT
|
2008-08-06 10:02:49 +00:00
|
|
|
#endif // ENABLE_DISASSEMBLER
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2014-05-06 11:25:37 +00:00
|
|
|
void Verify(Isolate* isolate);
|
2008-07-03 15:10:15 +00:00
|
|
|
#endif
|
|
|
|
|
2015-07-15 09:22:33 +00:00
|
|
|
static const int kApplyMask; // Modes affected by apply. Depends on arch.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
private:
|
2018-04-30 13:42:39 +00:00
|
|
|
// On ARM/ARM64, note that pc_ is the address of the instruction referencing
|
|
|
|
// the constant pool and not the address of the constant pool entry.
|
2018-04-13 22:28:05 +00:00
|
|
|
Address pc_;
|
2008-09-22 13:57:03 +00:00
|
|
|
Mode rmode_;
|
2018-01-19 08:14:09 +00:00
|
|
|
intptr_t data_ = 0;
|
2011-09-19 18:36:47 +00:00
|
|
|
Code* host_;
|
2018-04-13 22:28:05 +00:00
|
|
|
Address constant_pool_ = kNullAddress;
|
2008-07-03 15:10:15 +00:00
|
|
|
friend class RelocIterator;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// RelocInfoWriter serializes a stream of relocation info. It writes towards
|
|
|
|
// lower addresses.
|
|
|
|
class RelocInfoWriter BASE_EMBEDDED {
|
|
|
|
public:
|
2017-10-13 16:33:03 +00:00
|
|
|
RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
byte* pos() const { return pos_; }
|
|
|
|
byte* last_pc() const { return last_pc_; }
|
|
|
|
|
|
|
|
void Write(const RelocInfo* rinfo);
|
|
|
|
|
|
|
|
// Update the state of the stream after reloc info buffer
|
|
|
|
// and/or code is moved while the stream is active.
|
|
|
|
void Reposition(byte* pos, byte* pc) {
|
|
|
|
pos_ = pos;
|
|
|
|
last_pc_ = pc;
|
|
|
|
}
|
|
|
|
|
2009-06-23 09:50:51 +00:00
|
|
|
// Max size (bytes) of a written RelocInfo. Longest encoding is
|
2015-07-10 13:14:36 +00:00
|
|
|
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
|
2018-01-18 14:11:51 +00:00
|
|
|
static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
private:
|
2015-07-10 13:14:36 +00:00
|
|
|
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
|
|
|
|
|
|
|
|
inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
|
2017-06-20 13:30:17 +00:00
|
|
|
inline void WriteShortData(intptr_t data_delta);
|
2015-07-10 13:14:36 +00:00
|
|
|
|
|
|
|
inline void WriteMode(RelocInfo::Mode rmode);
|
|
|
|
inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
|
|
|
|
inline void WriteIntData(int data_delta);
|
|
|
|
inline void WriteData(intptr_t data_delta);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
byte* pos_;
|
|
|
|
byte* last_pc_;
|
2015-02-12 12:15:14 +00:00
|
|
|
|
2008-08-28 09:55:41 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// A RelocIterator iterates over relocation information.
|
|
|
|
// Typical use:
|
|
|
|
//
|
|
|
|
// for (RelocIterator it(code); !it.done(); it.next()) {
|
|
|
|
// // do something with it.rinfo() here
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// A mask can be specified to skip unwanted modes.
|
|
|
|
class RelocIterator: public Malloced {
|
|
|
|
public:
|
|
|
|
// Create a new iterator positioned at
|
|
|
|
// the beginning of the reloc info.
|
|
|
|
// Relocation information with mode k is included in the
|
|
|
|
// iteration iff bit k of mode_mask is set.
|
|
|
|
explicit RelocIterator(Code* code, int mode_mask = -1);
|
[builtins,x64] pc-relative builtin-to-builtin calls
This addresses one of the major remaining slowdowns with embedded
builtins on x64.
When generating code for a call to a builtin callee from a builtin
caller, we'd look up the Code target object from the builtins constant
list, calculate the location of the first instruction, and jump to it.
Note that for embedded builtin callees, the Code object is itself only
a trampoline to the off-heap code and thus an additional indirection.
An example of the call sequence in pseudo-asm:
// Load from the constants list.
mov reg, [kRootPointer, kBuiltinsConstantListOffset]
mov reg, [reg, offset_of_the_code_constant]
// Calculate first instruction and call it.
add reg, Code::kHeaderOffset
call reg
// The trampoline forwards to the off-heap area.
mov kOffHeapTrampolineRegister, <off-heap instruction_start>
jmp kOffHeapTrampolineRegister
This CL changes calls to embedded builtin targets to use pc-relative
addressing. This reduces the above instruction sequence to:
call <pc-relative offset to target instruction_start>
Embedded-to-embedded calls jump directly to the embedded instruction
stream, bypassing the trampoline. Heap-to-embedded calls (and all
calls to heap-builtins) use pc-relative addressing targeting the
on-heap Code object.
Other relevant platforms (arm,arm64,mips,mips64) do not use pc-relative
calls. For these, we'll need a different solution, e.g. a table of
embedded builtin addresses reachable from the root pointer, similar to
the external reference table.
Bug: v8:6666
Change-Id: Ic0317d454e2da37d74eaecebcdfcbc0d5f5041ad
Reviewed-on: https://chromium-review.googlesource.com/1068732
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53349}
2018-05-25 06:19:43 +00:00
|
|
|
#ifdef V8_EMBEDDED_BUILTINS
|
|
|
|
explicit RelocIterator(EmbeddedData* embedded_data, Code* code,
|
|
|
|
int mode_mask);
|
|
|
|
#endif // V8_EMBEDDED_BUILTINS
|
2008-07-03 15:10:15 +00:00
|
|
|
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
|
2018-04-18 09:45:35 +00:00
|
|
|
explicit RelocIterator(const CodeReference code_reference,
|
|
|
|
int mode_mask = -1);
|
2017-11-10 22:18:12 +00:00
|
|
|
explicit RelocIterator(Vector<byte> instructions,
|
|
|
|
Vector<const byte> reloc_info, Address const_pool,
|
|
|
|
int mode_mask = -1);
|
|
|
|
RelocIterator(RelocIterator&&) = default;
|
|
|
|
RelocIterator& operator=(RelocIterator&&) = default;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Iteration
|
2010-09-24 08:25:31 +00:00
|
|
|
bool done() const { return done_; }
|
2008-07-03 15:10:15 +00:00
|
|
|
void next();
|
|
|
|
|
|
|
|
// Return pointer valid until next next().
|
|
|
|
RelocInfo* rinfo() {
|
2014-08-04 11:34:54 +00:00
|
|
|
DCHECK(!done());
|
2008-07-03 15:10:15 +00:00
|
|
|
return &rinfo_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2018-05-15 14:35:47 +00:00
|
|
|
RelocIterator(Code* host, Address pc, Address constant_pool, const byte* pos,
|
|
|
|
const byte* end, int mode_mask);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Advance* moves the position before/after reading.
|
|
|
|
// *Read* reads from current byte(s) into rinfo_.
|
|
|
|
// *Get* just reads and returns info on current byte.
|
|
|
|
void Advance(int bytes = 1) { pos_ -= bytes; }
|
|
|
|
int AdvanceGetTag();
|
2015-07-10 13:14:36 +00:00
|
|
|
RelocInfo::Mode GetMode();
|
|
|
|
|
|
|
|
void AdvanceReadLongPCJump();
|
|
|
|
|
|
|
|
void ReadShortTaggedPC();
|
2017-06-20 13:30:17 +00:00
|
|
|
void ReadShortData();
|
2015-07-10 13:14:36 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void AdvanceReadPC();
|
2015-07-10 08:49:14 +00:00
|
|
|
void AdvanceReadInt();
|
2008-07-03 15:10:15 +00:00
|
|
|
void AdvanceReadData();
|
|
|
|
|
|
|
|
// If the given mode is wanted, set it in rinfo_ and return true.
|
|
|
|
// Else return false. Used for efficiently skipping unwanted modes.
|
2008-09-22 13:57:03 +00:00
|
|
|
bool SetMode(RelocInfo::Mode mode) {
|
2010-11-30 10:55:24 +00:00
|
|
|
return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2017-11-10 22:18:12 +00:00
|
|
|
const byte* pos_;
|
|
|
|
const byte* end_;
|
2008-07-03 15:10:15 +00:00
|
|
|
RelocInfo rinfo_;
|
2018-01-19 08:14:09 +00:00
|
|
|
bool done_ = false;
|
|
|
|
const int mode_mask_;
|
|
|
|
|
2008-08-28 09:55:41 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Utility functions
|
|
|
|
|
2010-12-08 14:32:40 +00:00
|
|
|
// Computes pow(x, y) with the special cases in the spec for Math.pow.
|
2015-11-25 16:37:11 +00:00
|
|
|
double power_helper(Isolate* isolate, double x, double y);
|
2010-12-08 14:32:40 +00:00
|
|
|
double power_double_int(double x, int y);
|
|
|
|
double power_double_double(double x, double y);
|
|
|
|
|
2015-11-26 14:12:04 +00:00
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Constant pool support
|
|
|
|
|
|
|
|
class ConstantPoolEntry {
|
|
|
|
public:
|
|
|
|
ConstantPoolEntry() {}
|
2018-03-12 17:35:21 +00:00
|
|
|
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
|
|
|
|
RelocInfo::Mode rmode = RelocInfo::NONE)
|
2015-06-04 14:44:00 +00:00
|
|
|
: position_(position),
|
|
|
|
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
|
2018-03-12 17:35:21 +00:00
|
|
|
value_(value),
|
|
|
|
rmode_(rmode) {}
|
|
|
|
ConstantPoolEntry(int position, Double value,
|
|
|
|
RelocInfo::Mode rmode = RelocInfo::NONE)
|
2017-07-05 13:48:12 +00:00
|
|
|
: position_(position),
|
|
|
|
merged_index_(SHARING_ALLOWED),
|
2018-03-12 17:35:21 +00:00
|
|
|
value64_(value.AsUint64()),
|
|
|
|
rmode_(rmode) {}
|
2015-06-04 14:44:00 +00:00
|
|
|
|
|
|
|
int position() const { return position_; }
|
|
|
|
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
|
|
|
|
bool is_merged() const { return merged_index_ >= 0; }
|
|
|
|
int merged_index(void) const {
|
|
|
|
DCHECK(is_merged());
|
|
|
|
return merged_index_;
|
|
|
|
}
|
|
|
|
void set_merged_index(int index) {
|
2017-05-23 18:30:32 +00:00
|
|
|
DCHECK(sharing_ok());
|
2015-06-04 14:44:00 +00:00
|
|
|
merged_index_ = index;
|
|
|
|
DCHECK(is_merged());
|
|
|
|
}
|
|
|
|
int offset(void) const {
|
2017-10-18 09:06:55 +00:00
|
|
|
DCHECK_GE(merged_index_, 0);
|
2015-06-04 14:44:00 +00:00
|
|
|
return merged_index_;
|
|
|
|
}
|
|
|
|
void set_offset(int offset) {
|
2017-10-18 09:06:55 +00:00
|
|
|
DCHECK_GE(offset, 0);
|
2015-06-04 14:44:00 +00:00
|
|
|
merged_index_ = offset;
|
|
|
|
}
|
|
|
|
intptr_t value() const { return value_; }
|
2017-07-05 13:48:12 +00:00
|
|
|
uint64_t value64() const { return value64_; }
|
2018-03-12 17:35:21 +00:00
|
|
|
RelocInfo::Mode rmode() const { return rmode_; }
|
2015-06-04 14:44:00 +00:00
|
|
|
|
|
|
|
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
|
|
|
|
|
|
|
|
static int size(Type type) {
|
|
|
|
return (type == INTPTR) ? kPointerSize : kDoubleSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum Access { REGULAR, OVERFLOWED };
|
|
|
|
|
|
|
|
private:
|
|
|
|
int position_;
|
|
|
|
int merged_index_;
|
|
|
|
union {
|
|
|
|
intptr_t value_;
|
2017-07-05 13:48:12 +00:00
|
|
|
uint64_t value64_;
|
2015-06-04 14:44:00 +00:00
|
|
|
};
|
2018-03-12 17:35:21 +00:00
|
|
|
// TODO(leszeks): The way we use this, it could probably be packed into
|
|
|
|
// merged_index_ if size is a concern.
|
|
|
|
RelocInfo::Mode rmode_;
|
2015-06-04 14:44:00 +00:00
|
|
|
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Embedded constant pool support
|
|
|
|
|
|
|
|
class ConstantPoolBuilder BASE_EMBEDDED {
|
|
|
|
public:
|
|
|
|
ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
|
|
|
|
|
|
|
|
// Add pointer-sized constant to the embedded constant pool
|
|
|
|
ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
|
|
|
|
bool sharing_ok) {
|
|
|
|
ConstantPoolEntry entry(position, value, sharing_ok);
|
|
|
|
return AddEntry(entry, ConstantPoolEntry::INTPTR);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add double constant to the embedded constant pool
|
2017-07-07 14:04:32 +00:00
|
|
|
ConstantPoolEntry::Access AddEntry(int position, Double value) {
|
|
|
|
ConstantPoolEntry entry(position, value);
|
2015-06-04 14:44:00 +00:00
|
|
|
return AddEntry(entry, ConstantPoolEntry::DOUBLE);
|
|
|
|
}
|
|
|
|
|
2017-07-07 14:04:32 +00:00
|
|
|
// Add double constant to the embedded constant pool
|
|
|
|
ConstantPoolEntry::Access AddEntry(int position, double value) {
|
|
|
|
return AddEntry(position, Double(value));
|
|
|
|
}
|
|
|
|
|
2015-06-04 14:44:00 +00:00
|
|
|
// Previews the access type required for the next new entry to be added.
|
|
|
|
ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
|
|
|
|
|
|
|
|
bool IsEmpty() {
|
|
|
|
return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
|
|
|
|
info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
|
|
|
|
info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
|
|
|
|
info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the constant pool. Invoke only after all entries have been
|
|
|
|
// added and all instructions have been emitted.
|
|
|
|
// Returns position of the emitted pool (zero implies no constant pool).
|
|
|
|
int Emit(Assembler* assm);
|
|
|
|
|
|
|
|
// Returns the label associated with the start of the constant pool.
|
|
|
|
// Linking to this label in the function prologue may provide an
|
|
|
|
// efficient means of constant pool pointer register initialization
|
|
|
|
// on some architectures.
|
|
|
|
inline Label* EmittedPosition() { return &emitted_label_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
|
|
|
|
ConstantPoolEntry::Type type);
|
|
|
|
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
|
|
|
|
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
|
|
|
|
ConstantPoolEntry::Type type);
|
|
|
|
|
|
|
|
struct PerTypeEntryInfo {
|
|
|
|
PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
|
|
|
|
bool overflow() const {
|
|
|
|
return (overflow_start >= 0 &&
|
|
|
|
overflow_start < static_cast<int>(entries.size()));
|
|
|
|
}
|
|
|
|
int regular_reach_bits;
|
|
|
|
int regular_count;
|
|
|
|
int overflow_start;
|
|
|
|
std::vector<ConstantPoolEntry> entries;
|
|
|
|
std::vector<ConstantPoolEntry> shared_entries;
|
|
|
|
};
|
|
|
|
|
|
|
|
Label emitted_label_; // Records pc_offset of emitted pool
|
|
|
|
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
|
|
|
|
};
|
|
|
|
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
// Base type for CPU Registers.
|
|
|
|
//
|
|
|
|
// 1) We would prefer to use an enum for registers, but enum values are
|
|
|
|
// assignment-compatible with int, which has caused code-generation bugs.
|
|
|
|
//
|
|
|
|
// 2) By not using an enum, we are possibly preventing the compiler from
|
|
|
|
// doing certain constant folds, which may significantly reduce the
|
|
|
|
// code generated for some assembly instructions (because they boil down
|
|
|
|
// to a few constants). If this is a problem, we could change the code
|
|
|
|
// such that we use an enum in optimized mode, and the class in debug
|
|
|
|
// mode. This way we get the compile-time error checking in debug mode
|
|
|
|
// and best performance in optimized code.
|
|
|
|
template <typename SubType, int kAfterLastRegister>
|
|
|
|
class RegisterBase {
|
2017-10-24 16:37:00 +00:00
|
|
|
// Internal enum class; used for calling constexpr methods, where we need to
|
|
|
|
// pass an integral type as template parameter.
|
|
|
|
enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
|
|
|
|
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
public:
|
|
|
|
static constexpr int kCode_no_reg = -1;
|
|
|
|
static constexpr int kNumRegisters = kAfterLastRegister;
|
|
|
|
|
|
|
|
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
|
|
|
|
|
|
|
|
template <int code>
|
|
|
|
static constexpr SubType from_code() {
|
|
|
|
static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
|
|
|
|
return SubType{code};
|
|
|
|
}
|
|
|
|
|
2017-10-24 16:37:00 +00:00
|
|
|
constexpr operator RegisterCode() const {
|
|
|
|
return static_cast<RegisterCode>(reg_code_);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <RegisterCode reg_code>
|
|
|
|
static constexpr int code() {
|
|
|
|
static_assert(
|
|
|
|
reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
|
|
|
|
"must be valid reg");
|
|
|
|
return static_cast<int>(reg_code);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <RegisterCode reg_code>
|
2018-05-03 13:47:36 +00:00
|
|
|
static constexpr RegList bit() {
|
|
|
|
return RegList{1} << code<reg_code>();
|
2017-10-24 16:37:00 +00:00
|
|
|
}
|
|
|
|
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
static SubType from_code(int code) {
|
|
|
|
DCHECK_LE(0, code);
|
|
|
|
DCHECK_GT(kNumRegisters, code);
|
|
|
|
return SubType{code};
|
|
|
|
}
|
|
|
|
|
2018-05-03 13:47:36 +00:00
|
|
|
// Constexpr version (pass registers as template parameters).
|
2017-10-24 16:37:00 +00:00
|
|
|
template <RegisterCode... reg_codes>
|
|
|
|
static constexpr RegList ListOf() {
|
|
|
|
return CombineRegLists(RegisterBase::bit<reg_codes>()...);
|
|
|
|
}
|
|
|
|
|
2018-05-03 13:47:36 +00:00
|
|
|
// Non-constexpr version (pass registers as method parameters).
|
|
|
|
template <typename... Register>
|
|
|
|
static RegList ListOf(Register... regs) {
|
|
|
|
return CombineRegLists(regs.bit()...);
|
|
|
|
}
|
|
|
|
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
bool is_valid() const { return reg_code_ != kCode_no_reg; }
|
|
|
|
|
|
|
|
int code() const {
|
|
|
|
DCHECK(is_valid());
|
|
|
|
return reg_code_;
|
|
|
|
}
|
|
|
|
|
2018-05-03 13:47:36 +00:00
|
|
|
RegList bit() const { return RegList{1} << code(); }
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
|
2018-01-12 09:09:30 +00:00
|
|
|
inline constexpr bool operator==(SubType other) const {
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
return reg_code_ == other.reg_code_;
|
|
|
|
}
|
2018-01-12 09:09:30 +00:00
|
|
|
inline constexpr bool operator!=(SubType other) const {
|
|
|
|
return reg_code_ != other.reg_code_;
|
|
|
|
}
|
[assembler] Make Register et al. real classes
Up to now, each architecture defined all Register types as structs,
with lots of redundancy. An often found comment noted that they cannot
be classes due to initialization order problems. As these problems are
gone with C++11 constexpr constants, I now tried making Registers
classes again.
All register types now inherit from RegisterBase, which provides a
default set of methods and named constructors (like ::from_code,
code(), bit(), is_valid(), ...).
This design allows to guarantee an interesting property: Each register
is either valid, or it's the no_reg register. There are no other
invalid registers. This is guaranteed statically by the constexpr
constructor, and dynamically by ::from_code.
I decided to disallow the default constructor completely, so instead of
"Register reg;" you now need "Register reg = no_reg;". This makes
explicit how the Register is initialized.
I did this change to the x64, ia32, arm, arm64, mips and mips64 ports.
Overall, code got much more compact and more safe. In theory, it should
also increase performance (since the is_valid() check is simpler), but
this is probably not measurable.
R=mstarzinger@chromium.org
Change-Id: I5ccfa4050daf4e146a557970e9d37fd3d2788d4a
Reviewed-on: https://chromium-review.googlesource.com/650927
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47847}
2017-09-06 08:05:07 +00:00
|
|
|
|
|
|
|
protected:
|
|
|
|
explicit constexpr RegisterBase(int code) : reg_code_(code) {}
|
|
|
|
int reg_code_;
|
|
|
|
};
|
|
|
|
|
2018-01-08 12:42:55 +00:00
|
|
|
template <typename SubType, int kAfterLastRegister>
|
|
|
|
inline std::ostream& operator<<(std::ostream& os,
|
|
|
|
RegisterBase<SubType, kAfterLastRegister> reg) {
|
|
|
|
return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
|
|
|
|
}
|
|
|
|
|
2015-09-30 13:46:56 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|
2008-07-03 15:10:15 +00:00
|
|
|
#endif // V8_ASSEMBLER_H_
|