[wasm] Rename more "type" to "kind"
This renames more fields holding "ValueKind" or methods operating on "ValueKind" to contain "kind" instead of "type". It also renames "is_reference_type" to "is_reference" on both ValueType and ValueKind, as the "_type" prefix is kind of implicit from the argument. R=manoskouk@chromium.org, jgruber@chromium.org Bug: v8:11477 Change-Id: I7809f1af6e983aebca96e03fe1fbc6ccaa22db72 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2718063 Commit-Queue: Clemens Backes <clemensb@chromium.org> Reviewed-by: Manos Koukoutos <manoskouk@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Cr-Commit-Position: refs/heads/master@{#73070}
This commit is contained in:
parent
98cbf95368
commit
52cc7ba9bc
@ -1181,7 +1181,7 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
|
||||
static_cast<const JSToWasmFrameStateDescriptor*>(descriptor);
|
||||
translations_.BeginJSToWasmBuiltinContinuationFrame(
|
||||
bailout_id, shared_info_id, height,
|
||||
js_to_wasm_descriptor->return_type());
|
||||
js_to_wasm_descriptor->return_kind());
|
||||
break;
|
||||
}
|
||||
case FrameStateType::kJavaScriptBuiltinContinuation: {
|
||||
|
@ -1135,7 +1135,7 @@ JSToWasmFrameStateDescriptor::JSToWasmFrameStateDescriptor(
|
||||
: FrameStateDescriptor(zone, type, bailout_id, state_combine,
|
||||
parameters_count, locals_count, stack_count,
|
||||
shared_info, outer_state),
|
||||
return_type_(wasm::WasmReturnTypeFromSignature(wasm_signature)) {}
|
||||
return_kind_(wasm::WasmReturnTypeFromSignature(wasm_signature)) {}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
|
||||
return os << rpo.ToSize();
|
||||
|
@ -1370,10 +1370,10 @@ class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
|
||||
FrameStateDescriptor* outer_state,
|
||||
const wasm::FunctionSig* wasm_signature);
|
||||
|
||||
base::Optional<wasm::ValueKind> return_type() const { return return_type_; }
|
||||
base::Optional<wasm::ValueKind> return_kind() const { return return_kind_; }
|
||||
|
||||
private:
|
||||
base::Optional<wasm::ValueKind> return_type_;
|
||||
base::Optional<wasm::ValueKind> return_kind_;
|
||||
};
|
||||
|
||||
// A deoptimization entry is a pair of the reason why we deoptimize and the
|
||||
|
@ -180,10 +180,10 @@ FrameState CreateStubBuiltinContinuationFrameState(
|
||||
FrameState CreateJSWasmCallBuiltinContinuationFrameState(
|
||||
JSGraph* jsgraph, Node* context, Node* outer_frame_state,
|
||||
const wasm::FunctionSig* signature) {
|
||||
base::Optional<wasm::ValueKind> wasm_return_type =
|
||||
base::Optional<wasm::ValueKind> wasm_return_kind =
|
||||
wasm::WasmReturnTypeFromSignature(signature);
|
||||
Node* node_return_type =
|
||||
jsgraph->SmiConstant(wasm_return_type ? wasm_return_type.value() : -1);
|
||||
jsgraph->SmiConstant(wasm_return_kind ? wasm_return_kind.value() : -1);
|
||||
Node* lazy_deopt_parameters[] = {node_return_type};
|
||||
return CreateStubBuiltinContinuationFrameState(
|
||||
jsgraph, Builtins::kJSToWasmLazyDeoptContinuation, context,
|
||||
|
@ -378,7 +378,7 @@ class WasmGraphAssembler : public GraphAssembler {
|
||||
return StoreUnaligned(rep, base, offset, value);
|
||||
} else {
|
||||
WriteBarrierKind write_barrier =
|
||||
type.is_reference_type() ? kPointerWriteBarrier : kNoWriteBarrier;
|
||||
type.is_reference() ? kPointerWriteBarrier : kNoWriteBarrier;
|
||||
StoreRepresentation store_rep(rep, write_barrier);
|
||||
return Store(store_rep, base, offset, value);
|
||||
}
|
||||
@ -3595,7 +3595,7 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
|
||||
|
||||
Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
|
||||
const wasm::WasmGlobal& global = env_->module->globals[index];
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
if (global.mutability && global.imported) {
|
||||
Node* base = nullptr;
|
||||
Node* offset = nullptr;
|
||||
@ -3623,7 +3623,7 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
|
||||
|
||||
Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
|
||||
const wasm::WasmGlobal& global = env_->module->globals[index];
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
if (global.mutability && global.imported) {
|
||||
Node* base = nullptr;
|
||||
Node* offset = nullptr;
|
||||
|
@ -1557,10 +1557,10 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
TranslatedValue Deoptimizer::TranslatedValueForWasmReturnType(
|
||||
base::Optional<wasm::ValueKind> wasm_call_return_type) {
|
||||
if (wasm_call_return_type) {
|
||||
switch (wasm_call_return_type.value()) {
|
||||
TranslatedValue Deoptimizer::TranslatedValueForWasmReturnKind(
|
||||
base::Optional<wasm::ValueKind> wasm_call_return_kind) {
|
||||
if (wasm_call_return_kind) {
|
||||
switch (wasm_call_return_kind.value()) {
|
||||
case wasm::kI32:
|
||||
return TranslatedValue::NewInt32(
|
||||
&translated_state_,
|
||||
@ -1658,8 +1658,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
|
||||
// This TranslatedValue will be written in the output frame in place of the
|
||||
// hole and we'll use ContinueToCodeStubBuiltin in place of
|
||||
// ContinueToCodeStubBuiltinWithResult.
|
||||
TranslatedValue result = TranslatedValueForWasmReturnType(
|
||||
translated_frame->wasm_call_return_type());
|
||||
TranslatedValue result = TranslatedValueForWasmReturnKind(
|
||||
translated_frame->wasm_call_return_kind());
|
||||
translated_frame->Add(result);
|
||||
}
|
||||
|
||||
|
@ -164,8 +164,8 @@ class Deoptimizer : public Malloced {
|
||||
static Builtins::Name TrampolineForBuiltinContinuation(
|
||||
BuiltinContinuationMode mode, bool must_handle_result);
|
||||
|
||||
TranslatedValue TranslatedValueForWasmReturnType(
|
||||
base::Optional<wasm::ValueKind> wasm_call_return_type);
|
||||
TranslatedValue TranslatedValueForWasmReturnKind(
|
||||
base::Optional<wasm::ValueKind> wasm_call_return_kind);
|
||||
|
||||
void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame,
|
||||
int frame_index,
|
||||
|
@ -268,9 +268,9 @@ void TranslationArrayPrintSingleFrame(std::ostream& os,
|
||||
namespace {
|
||||
|
||||
// Decodes the return type of a Wasm function as the integer value of
|
||||
// wasm::ValueKind, or kNoWasmReturnType if the function returns void.
|
||||
base::Optional<wasm::ValueKind> DecodeWasmReturnType(int code) {
|
||||
if (code != kNoWasmReturnType) {
|
||||
// wasm::ValueKind, or kNoWasmReturnKind if the function returns void.
|
||||
base::Optional<wasm::ValueKind> DecodeWasmReturnKind(int code) {
|
||||
if (code != kNoWasmReturnKind) {
|
||||
return {static_cast<wasm::ValueKind>(code)};
|
||||
}
|
||||
return {};
|
||||
@ -658,10 +658,10 @@ TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
|
||||
|
||||
TranslatedFrame TranslatedFrame::JSToWasmBuiltinContinuationFrame(
|
||||
BytecodeOffset bytecode_offset, SharedFunctionInfo shared_info, int height,
|
||||
base::Optional<wasm::ValueKind> return_type) {
|
||||
base::Optional<wasm::ValueKind> return_kind) {
|
||||
TranslatedFrame frame(kJSToWasmBuiltinContinuation, shared_info, height);
|
||||
frame.bytecode_offset_ = bytecode_offset;
|
||||
frame.return_type_ = return_type;
|
||||
frame.return_kind_ = return_kind;
|
||||
return frame;
|
||||
}
|
||||
|
||||
@ -810,8 +810,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
|
||||
SharedFunctionInfo shared_info =
|
||||
SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
|
||||
int height = iterator->Next();
|
||||
base::Optional<wasm::ValueKind> return_type =
|
||||
DecodeWasmReturnType(iterator->Next());
|
||||
base::Optional<wasm::ValueKind> return_kind =
|
||||
DecodeWasmReturnKind(iterator->Next());
|
||||
if (trace_file != nullptr) {
|
||||
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
|
||||
PrintF(trace_file, " reading JS to Wasm builtin continuation frame %s",
|
||||
@ -819,10 +819,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
|
||||
PrintF(trace_file,
|
||||
" => bailout_id=%d, height=%d return_type=%d; inputs:\n",
|
||||
bailout_id.ToInt(), height,
|
||||
return_type.has_value() ? return_type.value() : -1);
|
||||
return_kind.has_value() ? return_kind.value() : -1);
|
||||
}
|
||||
return TranslatedFrame::JSToWasmBuiltinContinuationFrame(
|
||||
bailout_id, shared_info, height, return_type);
|
||||
bailout_id, shared_info, height, return_kind);
|
||||
}
|
||||
|
||||
case TranslationOpcode::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
|
||||
|
@ -251,9 +251,9 @@ class TranslatedFrame {
|
||||
const_reference front() const { return values_.front(); }
|
||||
|
||||
// Only for Kind == kJSToWasmBuiltinContinuation
|
||||
base::Optional<wasm::ValueKind> wasm_call_return_type() const {
|
||||
base::Optional<wasm::ValueKind> wasm_call_return_kind() const {
|
||||
DCHECK_EQ(kind(), kJSToWasmBuiltinContinuation);
|
||||
return return_type_;
|
||||
return return_kind_;
|
||||
}
|
||||
|
||||
private:
|
||||
@ -315,7 +315,7 @@ class TranslatedFrame {
|
||||
ValuesContainer values_;
|
||||
|
||||
// Only for Kind == kJSToWasmBuiltinContinuation
|
||||
base::Optional<wasm::ValueKind> return_type_;
|
||||
base::Optional<wasm::ValueKind> return_kind_;
|
||||
};
|
||||
|
||||
// Auxiliary class for translating deoptimization values.
|
||||
@ -442,8 +442,8 @@ class TranslatedState {
|
||||
FeedbackSlot feedback_slot_;
|
||||
};
|
||||
|
||||
// Return type encoding for a Wasm function returning void.
|
||||
const int kNoWasmReturnType = -1;
|
||||
// Return kind encoding for a Wasm function returning void.
|
||||
const int kNoWasmReturnKind = -1;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -23,10 +23,10 @@ constexpr int kCompressedDataOffset =
|
||||
constexpr int kTranslationArrayElementSize = kInt32Size;
|
||||
|
||||
// Encodes the return type of a Wasm function as the integer value of
|
||||
// wasm::ValueKind, or kNoWasmReturnType if the function returns void.
|
||||
int EncodeWasmReturnType(base::Optional<wasm::ValueKind> return_type) {
|
||||
return return_type ? static_cast<int>(return_type.value())
|
||||
: kNoWasmReturnType;
|
||||
// wasm::ValueKind, or kNoWasmReturnKind if the function returns void.
|
||||
int EncodeWasmReturnKind(base::Optional<wasm::ValueKind> return_kind) {
|
||||
return return_kind ? static_cast<int>(return_kind.value())
|
||||
: kNoWasmReturnKind;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -131,7 +131,7 @@ void TranslationArrayBuilder::BeginJSToWasmBuiltinContinuationFrame(
|
||||
Add(bytecode_offset.ToInt());
|
||||
Add(literal_id);
|
||||
Add(height);
|
||||
Add(EncodeWasmReturnType(return_type));
|
||||
Add(EncodeWasmReturnKind(return_type));
|
||||
DCHECK_EQ(TranslationOpcodeOperandCount(opcode), 4);
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ class TranslationArrayBuilder {
|
||||
unsigned height);
|
||||
void BeginJSToWasmBuiltinContinuationFrame(
|
||||
BytecodeOffset bailout_id, int literal_id, unsigned height,
|
||||
base::Optional<wasm::ValueKind> return_type);
|
||||
base::Optional<wasm::ValueKind> return_kind);
|
||||
void BeginJavaScriptBuiltinContinuationFrame(BytecodeOffset bailout_id,
|
||||
int literal_id, unsigned height);
|
||||
void BeginJavaScriptBuiltinContinuationWithCatchFrame(
|
||||
|
@ -1970,7 +1970,7 @@ void WasmValueObject::WasmValueObjectPrint(std::ostream& os) { // NOLINT
|
||||
|
||||
void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
|
||||
PrintHeader(os, "WasmGlobalObject");
|
||||
if (type().is_reference_type()) {
|
||||
if (type().is_reference()) {
|
||||
os << "\n - tagged_buffer: " << Brief(tagged_buffer());
|
||||
} else {
|
||||
os << "\n - untagged_buffer: " << Brief(untagged_buffer());
|
||||
@ -1978,7 +1978,7 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
|
||||
os << "\n - offset: " << offset();
|
||||
os << "\n - raw_type: " << raw_type();
|
||||
os << "\n - is_mutable: " << is_mutable();
|
||||
os << "\n - type: " << type().kind();
|
||||
os << "\n - type: " << type();
|
||||
os << "\n - is_mutable: " << is_mutable();
|
||||
os << "\n";
|
||||
}
|
||||
|
@ -789,7 +789,7 @@ class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
|
||||
template <typename ObjectVisitor>
|
||||
static inline void IterateBody(Map map, HeapObject obj, int object_size,
|
||||
ObjectVisitor* v) {
|
||||
if (!WasmArray::GcSafeType(map)->element_type().is_reference_type()) return;
|
||||
if (!WasmArray::GcSafeType(map)->element_type().is_reference()) return;
|
||||
IteratePointers(obj, WasmArray::kHeaderSize, object_size, v);
|
||||
}
|
||||
|
||||
@ -813,7 +813,7 @@ class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
|
||||
WasmStruct wasm_struct = WasmStruct::cast(obj);
|
||||
wasm::StructType* type = WasmStruct::GcSafeType(map);
|
||||
for (uint32_t i = 0; i < type->field_count(); i++) {
|
||||
if (!type->field(i).is_reference_type()) continue;
|
||||
if (!type->field(i).is_reference()) continue;
|
||||
int offset = static_cast<int>(type->field_offset(i));
|
||||
v->VisitPointer(wasm_struct, wasm_struct.RawField(offset));
|
||||
}
|
||||
|
@ -544,7 +544,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
|
||||
}
|
||||
|
||||
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
|
||||
return kind == kS128 || is_reference_type(kind);
|
||||
return kind == kS128 || is_reference(kind);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
|
||||
@ -1373,7 +1373,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
|
||||
|
||||
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
|
||||
DCHECK_NE(dst, src);
|
||||
DCHECK(kind == kI32 || is_reference_type(kind));
|
||||
DCHECK(kind == kI32 || is_reference(kind));
|
||||
TurboAssembler::Move(dst, src);
|
||||
}
|
||||
|
||||
@ -2193,9 +2193,8 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
|
||||
DCHECK_EQ(kind, kI32);
|
||||
cmp(lhs, Operand(0));
|
||||
} else {
|
||||
DCHECK(kind == kI32 ||
|
||||
(is_reference_type(kind) &&
|
||||
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
|
||||
DCHECK(kind == kI32 || (is_reference(kind) && (liftoff_cond == kEqual ||
|
||||
liftoff_cond == kUnequal)));
|
||||
cmp(lhs, rhs);
|
||||
}
|
||||
b(label, cond);
|
||||
|
@ -369,7 +369,7 @@ int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
|
||||
}
|
||||
|
||||
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
|
||||
return kind == kS128 || is_reference_type(kind);
|
||||
return kind == kS128 || is_reference(kind);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
|
||||
@ -869,7 +869,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
|
||||
if (kind == kI32) {
|
||||
Mov(dst.W(), src.W());
|
||||
} else {
|
||||
DCHECK(kI64 == kind || is_reference_type(kind));
|
||||
DCHECK(kI64 == kind || is_reference(kind));
|
||||
Mov(dst.X(), src.X());
|
||||
}
|
||||
}
|
||||
|
@ -262,12 +262,11 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
|
||||
}
|
||||
|
||||
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
|
||||
return is_reference_type(kind) ? kSystemPointerSize
|
||||
: element_size_bytes(kind);
|
||||
return is_reference(kind) ? kSystemPointerSize : element_size_bytes(kind);
|
||||
}
|
||||
|
||||
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
|
||||
return is_reference_type(kind);
|
||||
return is_reference(kind);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
|
||||
@ -1120,7 +1119,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
|
||||
|
||||
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
|
||||
DCHECK_NE(dst, src);
|
||||
DCHECK(kI32 == kind || is_reference_type(kind));
|
||||
DCHECK(kI32 == kind || is_reference(kind));
|
||||
mov(dst, src);
|
||||
}
|
||||
|
||||
|
@ -93,11 +93,11 @@ class StackTransferRecipe {
|
||||
|
||||
#if DEBUG
|
||||
bool CheckCompatibleStackSlotTypes(ValueKind dst, ValueKind src) {
|
||||
if (is_object_reference_type(dst)) {
|
||||
if (is_object_reference(dst)) {
|
||||
// Since Liftoff doesn't do accurate type tracking (e.g. on loop back
|
||||
// edges), we only care that pointer types stay amongst pointer types.
|
||||
// It's fine if ref/optref overwrite each other.
|
||||
DCHECK(is_object_reference_type(src));
|
||||
DCHECK(is_object_reference(src));
|
||||
} else {
|
||||
// All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
|
||||
DCHECK_EQ(dst, src);
|
||||
@ -533,7 +533,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
|
||||
ZoneVector<int>* slots, LiftoffRegList* spills,
|
||||
SpillLocation spill_location) {
|
||||
for (const auto& slot : stack_state) {
|
||||
if (!is_reference_type(slot.kind())) continue;
|
||||
if (!is_reference(slot.kind())) continue;
|
||||
|
||||
if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
|
||||
// Registers get spilled just before the call to the runtime. In {spills}
|
||||
@ -552,7 +552,7 @@ void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
|
||||
for (const auto& slot : stack_state) {
|
||||
DCHECK(!slot.is_reg());
|
||||
|
||||
if (is_reference_type(slot.kind())) {
|
||||
if (is_reference(slot.kind())) {
|
||||
safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
|
||||
}
|
||||
}
|
||||
@ -582,8 +582,8 @@ LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
|
||||
}
|
||||
|
||||
LiftoffAssembler::~LiftoffAssembler() {
|
||||
if (num_locals_ > kInlineLocalTypes) {
|
||||
base::Free(more_local_types_);
|
||||
if (num_locals_ > kInlineLocalKinds) {
|
||||
base::Free(more_local_kinds_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1203,10 +1203,10 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
|
||||
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
|
||||
DCHECK_EQ(0, num_locals_); // only call this once.
|
||||
num_locals_ = num_locals;
|
||||
if (num_locals > kInlineLocalTypes) {
|
||||
more_local_types_ = reinterpret_cast<ValueKind*>(
|
||||
if (num_locals > kInlineLocalKinds) {
|
||||
more_local_kinds_ = reinterpret_cast<ValueKind*>(
|
||||
base::Malloc(num_locals * sizeof(ValueKind)));
|
||||
DCHECK_NOT_NULL(more_local_types_);
|
||||
DCHECK_NOT_NULL(more_local_kinds_);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1398,16 +1398,16 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
|
||||
int GetTotalFrameSize() const { return max_used_spill_offset_; }
|
||||
|
||||
ValueKind local_type(uint32_t index) {
|
||||
ValueKind local_kind(uint32_t index) {
|
||||
DCHECK_GT(num_locals_, index);
|
||||
ValueKind* locals =
|
||||
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
|
||||
num_locals_ <= kInlineLocalKinds ? local_kinds_ : more_local_kinds_;
|
||||
return locals[index];
|
||||
}
|
||||
|
||||
void set_local_type(uint32_t index, ValueKind kind) {
|
||||
void set_local_kind(uint32_t index, ValueKind kind) {
|
||||
ValueKind* locals =
|
||||
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
|
||||
num_locals_ <= kInlineLocalKinds ? local_kinds_ : more_local_kinds_;
|
||||
locals[index] = kind;
|
||||
}
|
||||
|
||||
@ -1430,10 +1430,10 @@ class LiftoffAssembler : public TurboAssembler {
|
||||
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
|
||||
|
||||
uint32_t num_locals_ = 0;
|
||||
static constexpr uint32_t kInlineLocalTypes = 16;
|
||||
static constexpr uint32_t kInlineLocalKinds = 16;
|
||||
union {
|
||||
ValueKind local_types_[kInlineLocalTypes];
|
||||
ValueKind* more_local_types_;
|
||||
ValueKind local_kinds_[kInlineLocalKinds];
|
||||
ValueKind* more_local_kinds_;
|
||||
};
|
||||
static_assert(sizeof(ValueKind) == 1,
|
||||
"Reconsider this inlining if ValueKind gets bigger");
|
||||
|
@ -578,7 +578,7 @@ class LiftoffCompiler {
|
||||
__ set_num_locals(num_locals);
|
||||
for (int i = 0; i < num_locals; ++i) {
|
||||
ValueKind kind = decoder->local_type(i).kind();
|
||||
__ set_local_type(i, kind);
|
||||
__ set_local_kind(i, kind);
|
||||
}
|
||||
}
|
||||
|
||||
@ -658,7 +658,7 @@ class LiftoffCompiler {
|
||||
// because other types cannot be initialized to constants.
|
||||
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
|
||||
++param_idx) {
|
||||
ValueKind kind = __ local_type(param_idx);
|
||||
ValueKind kind = __ local_kind(param_idx);
|
||||
if (kind != kI32 && kind != kI64) return true;
|
||||
}
|
||||
return false;
|
||||
@ -680,7 +680,7 @@ class LiftoffCompiler {
|
||||
|
||||
void StartFunctionBody(FullDecoder* decoder, Control* block) {
|
||||
for (uint32_t i = 0; i < __ num_locals(); ++i) {
|
||||
if (!CheckSupportedType(decoder, __ local_type(i), "param")) return;
|
||||
if (!CheckSupportedType(decoder, __ local_kind(i), "param")) return;
|
||||
}
|
||||
|
||||
// Parameter 0 is the instance parameter.
|
||||
@ -717,7 +717,7 @@ class LiftoffCompiler {
|
||||
// Input 0 is the code target, 1 is the instance. First parameter at 2.
|
||||
uint32_t input_idx = kInstanceParameterIndex + 1;
|
||||
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
|
||||
input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
|
||||
input_idx += ProcessParameter(__ local_kind(param_idx), input_idx);
|
||||
}
|
||||
int params_size = __ TopSpillOffset();
|
||||
DCHECK_EQ(input_idx, descriptor_->InputCount());
|
||||
@ -727,7 +727,7 @@ class LiftoffCompiler {
|
||||
if (SpillLocalsInitially(decoder, num_params)) {
|
||||
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
|
||||
++param_idx) {
|
||||
ValueKind kind = __ local_type(param_idx);
|
||||
ValueKind kind = __ local_kind(param_idx);
|
||||
__ PushStack(kind);
|
||||
}
|
||||
int spill_size = __ TopSpillOffset() - params_size;
|
||||
@ -735,7 +735,7 @@ class LiftoffCompiler {
|
||||
} else {
|
||||
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
|
||||
++param_idx) {
|
||||
ValueKind kind = __ local_type(param_idx);
|
||||
ValueKind kind = __ local_kind(param_idx);
|
||||
__ PushConstant(kind, int32_t{0});
|
||||
}
|
||||
}
|
||||
@ -745,8 +745,8 @@ class LiftoffCompiler {
|
||||
Register null_ref_reg = no_reg;
|
||||
for (uint32_t local_index = num_params; local_index < __ num_locals();
|
||||
++local_index) {
|
||||
ValueKind kind = __ local_type(local_index);
|
||||
if (is_reference_type(kind)) {
|
||||
ValueKind kind = __ local_kind(local_index);
|
||||
if (is_reference(kind)) {
|
||||
if (null_ref_reg == no_reg) {
|
||||
null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
|
||||
LoadNullValue(null_ref_reg, {});
|
||||
@ -1214,20 +1214,20 @@ class LiftoffCompiler {
|
||||
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
|
||||
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
|
||||
ExternalReference ext_ref = fallback_fn();
|
||||
ValueKind sig_reps[] = {kind};
|
||||
ValueKindSig sig(0, 1, sig_reps);
|
||||
ValueKind sig_kinds[] = {kind};
|
||||
ValueKindSig sig(0, 1, sig_kinds);
|
||||
GenerateCCall(&dst, &sig, kind, &src, ext_ref);
|
||||
};
|
||||
EmitUnOp<kind, kind>(emit_with_c_fallback);
|
||||
}
|
||||
|
||||
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
|
||||
template <ValueKind dst_type, ValueKind src_kind,
|
||||
template <ValueKind dst_kind, ValueKind src_kind,
|
||||
TypeConversionTrapping can_trap>
|
||||
void EmitTypeConversion(FullDecoder* decoder, WasmOpcode opcode,
|
||||
ExternalReference (*fallback_fn)()) {
|
||||
static constexpr RegClass src_rc = reg_class_for(src_kind);
|
||||
static constexpr RegClass dst_rc = reg_class_for(dst_type);
|
||||
static constexpr RegClass dst_rc = reg_class_for(dst_kind);
|
||||
LiftoffRegister src = __ PopToRegister();
|
||||
LiftoffRegister dst = src_rc == dst_rc
|
||||
? __ GetUnusedRegister(dst_rc, {src}, {})
|
||||
@ -1241,20 +1241,20 @@ class LiftoffCompiler {
|
||||
ExternalReference ext_ref = fallback_fn();
|
||||
if (can_trap) {
|
||||
// External references for potentially trapping conversions return int.
|
||||
ValueKind sig_reps[] = {kI32, src_kind};
|
||||
ValueKindSig sig(1, 1, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, src_kind};
|
||||
ValueKindSig sig(1, 1, sig_kinds);
|
||||
LiftoffRegister ret_reg =
|
||||
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
|
||||
LiftoffRegister dst_regs[] = {ret_reg, dst};
|
||||
GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
|
||||
GenerateCCall(dst_regs, &sig, dst_kind, &src, ext_ref);
|
||||
__ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
|
||||
} else {
|
||||
ValueKind sig_reps[] = {src_kind};
|
||||
ValueKindSig sig(0, 1, sig_reps);
|
||||
GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
|
||||
ValueKind sig_kinds[] = {src_kind};
|
||||
ValueKindSig sig(0, 1, sig_kinds);
|
||||
GenerateCCall(&dst, &sig, dst_kind, &src, ext_ref);
|
||||
}
|
||||
}
|
||||
__ PushRegister(dst_type, dst);
|
||||
__ PushRegister(dst_kind, dst);
|
||||
}
|
||||
|
||||
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
|
||||
@ -1272,9 +1272,9 @@ class LiftoffCompiler {
|
||||
case kExpr##opcode: \
|
||||
return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
|
||||
&ExternalReference::wasm_##fn);
|
||||
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_kind, ext_ref, can_trap) \
|
||||
#define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap) \
|
||||
case kExpr##opcode: \
|
||||
return EmitTypeConversion<k##dst_type, k##src_kind, can_trap>( \
|
||||
return EmitTypeConversion<k##dst_kind, k##src_kind, can_trap>( \
|
||||
decoder, kExpr##opcode, ext_ref);
|
||||
switch (opcode) {
|
||||
CASE_I32_UNOP(I32Clz, i32_clz)
|
||||
@ -1505,9 +1505,9 @@ class LiftoffCompiler {
|
||||
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
|
||||
LiftoffRegister args[] = {lhs, rhs}; \
|
||||
auto ext_ref = ExternalReference::ext_ref_fn(); \
|
||||
ValueKind sig_reps[] = {k##kind, k##kind, k##kind}; \
|
||||
ValueKind sig_kinds[] = {k##kind, k##kind, k##kind}; \
|
||||
const bool out_via_stack = k##kind == kI64; \
|
||||
ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
|
||||
ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_kinds); \
|
||||
ValueKind out_arg_kind = out_via_stack ? kI64 : kStmt; \
|
||||
GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
|
||||
});
|
||||
@ -1818,8 +1818,8 @@ class LiftoffCompiler {
|
||||
WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc;
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_);
|
||||
ValueKind sig_reps[] = {kRef, kI32};
|
||||
ValueKindSig sig(1, 1, sig_reps);
|
||||
ValueKind sig_kinds[] = {kRef, kI32};
|
||||
ValueKindSig sig(1, 1, sig_kinds);
|
||||
LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
|
||||
__ LoadConstant(func_index_reg, WasmValue(function_index));
|
||||
LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
|
||||
@ -1919,7 +1919,7 @@ class LiftoffCompiler {
|
||||
state.dec_used(slot_reg);
|
||||
dst_slot->MakeStack();
|
||||
}
|
||||
DCHECK_EQ(kind, __ local_type(local_index));
|
||||
DCHECK_EQ(kind, __ local_kind(local_index));
|
||||
RegClass rc = reg_class_for(kind);
|
||||
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
|
||||
__ Fill(dst_reg, src_slot.offset(), kind);
|
||||
@ -2023,7 +2023,7 @@ class LiftoffCompiler {
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_reference_type(kind)) {
|
||||
if (is_reference(kind)) {
|
||||
if (global->mutability && global->imported) {
|
||||
LiftoffRegList pinned;
|
||||
Register base = no_reg;
|
||||
@ -2066,7 +2066,7 @@ class LiftoffCompiler {
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_reference_type(kind)) {
|
||||
if (is_reference(kind)) {
|
||||
if (global->mutability && global->imported) {
|
||||
LiftoffRegList pinned;
|
||||
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
|
||||
@ -2115,8 +2115,8 @@ class LiftoffCompiler {
|
||||
GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
|
||||
|
||||
ValueKind result_kind = env_->module->tables[imm.index].type.kind();
|
||||
ValueKind sig_reps[] = {result_kind, kI32, kI32};
|
||||
ValueKindSig sig(1, 2, sig_reps);
|
||||
ValueKind sig_kinds[] = {result_kind, kI32, kI32};
|
||||
ValueKindSig sig(1, 2, sig_kinds);
|
||||
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
|
||||
__ CallRuntimeStub(target);
|
||||
@ -2148,8 +2148,8 @@ class LiftoffCompiler {
|
||||
GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
|
||||
|
||||
ValueKind table_kind = env_->module->tables[imm.index].type.kind();
|
||||
ValueKind sig_reps[] = {kI32, kI32, table_kind};
|
||||
ValueKindSig sig(0, 3, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kI32, table_kind};
|
||||
ValueKindSig sig(0, 3, sig_kinds);
|
||||
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
|
||||
__ CallRuntimeStub(target);
|
||||
@ -3460,11 +3460,11 @@ class LiftoffCompiler {
|
||||
__ PushRegister(result_kind, dst);
|
||||
}
|
||||
|
||||
template <ValueKind src2_type, typename EmitFn>
|
||||
template <ValueKind src2_kind, typename EmitFn>
|
||||
void EmitSimdReplaceLaneOp(EmitFn fn,
|
||||
const SimdLaneImmediate<validate>& imm) {
|
||||
static constexpr RegClass src1_rc = reg_class_for(kS128);
|
||||
static constexpr RegClass src2_rc = reg_class_for(src2_type);
|
||||
static constexpr RegClass src2_rc = reg_class_for(src2_kind);
|
||||
static constexpr RegClass result_rc = reg_class_for(kS128);
|
||||
// On backends which need fp pair, src1_rc and result_rc end up being
|
||||
// kFpRegPair, which is != kFpReg, but we still want to pin src2 when it is
|
||||
@ -3925,8 +3925,8 @@ class LiftoffCompiler {
|
||||
}
|
||||
}
|
||||
|
||||
ValueKind sig_reps[] = {kPointerValueType, kind, kI64};
|
||||
ValueKindSig sig(0, 3, sig_reps);
|
||||
ValueKind sig_kinds[] = {kPointerValueType, kind, kI64};
|
||||
ValueKindSig sig(0, 3, sig_kinds);
|
||||
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor,
|
||||
{index, expected_value, timeout});
|
||||
@ -3961,8 +3961,8 @@ class LiftoffCompiler {
|
||||
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
|
||||
}
|
||||
|
||||
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32};
|
||||
ValueKindSig sig(1, 2, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kPointerValueType, kI32};
|
||||
ValueKindSig sig(1, 2, sig_kinds);
|
||||
auto call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
|
||||
|
||||
@ -4124,8 +4124,8 @@ class LiftoffCompiler {
|
||||
__ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
|
||||
|
||||
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
|
||||
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
|
||||
ValueKindSig sig(1, 5, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
|
||||
ValueKindSig sig(1, 5, sig_kinds);
|
||||
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
|
||||
segment_index, size};
|
||||
// We don't need the instance anymore after the call. We can use the
|
||||
@ -4167,8 +4167,8 @@ class LiftoffCompiler {
|
||||
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
|
||||
__ FillInstanceInto(instance);
|
||||
ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
|
||||
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
|
||||
ValueKindSig sig(1, 4, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kPointerValueType, kI32, kI32, kI32};
|
||||
ValueKindSig sig(1, 4, sig_kinds);
|
||||
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
|
||||
// We don't need the instance anymore after the call. We can use the
|
||||
// register for the result.
|
||||
@ -4189,8 +4189,8 @@ class LiftoffCompiler {
|
||||
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
|
||||
__ FillInstanceInto(instance);
|
||||
ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
|
||||
ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
|
||||
ValueKindSig sig(1, 4, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kPointerValueType, kI32, kI32, kI32};
|
||||
ValueKindSig sig(1, 4, sig_kinds);
|
||||
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
|
||||
// We don't need the instance anymore after the call. We can use the
|
||||
// register for the result.
|
||||
@ -4232,8 +4232,8 @@ class LiftoffCompiler {
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
|
||||
|
||||
ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
|
||||
ValueKindSig sig(0, 5, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
|
||||
ValueKindSig sig(0, 5, sig_kinds);
|
||||
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor,
|
||||
{dst, src, size, table_index, segment_index});
|
||||
@ -4289,8 +4289,8 @@ class LiftoffCompiler {
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
|
||||
|
||||
ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
|
||||
ValueKindSig sig(0, 5, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
|
||||
ValueKindSig sig(0, 5, sig_kinds);
|
||||
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor,
|
||||
{dst, src, size, table_dst_index, table_src_index});
|
||||
@ -4325,8 +4325,8 @@ class LiftoffCompiler {
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
|
||||
compilation_zone_);
|
||||
ValueKind sig_reps[] = {kRef, rtt.type.kind()};
|
||||
ValueKindSig sig(1, 1, sig_reps);
|
||||
ValueKind sig_kinds[] = {kRef, rtt.type.kind()};
|
||||
ValueKindSig sig(1, 1, sig_kinds);
|
||||
LiftoffAssembler::VarState rtt_value =
|
||||
__ cache_state()->stack_state.end()[-1];
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
|
||||
@ -4398,7 +4398,7 @@ class LiftoffCompiler {
|
||||
}
|
||||
|
||||
void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
|
||||
ValueKind rtt_type, bool initial_value_on_stack) {
|
||||
ValueKind rtt_kind, bool initial_value_on_stack) {
|
||||
// Max length check.
|
||||
{
|
||||
LiftoffRegister length =
|
||||
@ -4416,8 +4416,8 @@ class LiftoffCompiler {
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
|
||||
compilation_zone_);
|
||||
ValueKind sig_reps[] = {kRef, rtt_type, kI32, kI32};
|
||||
ValueKindSig sig(1, 3, sig_reps);
|
||||
ValueKind sig_kinds[] = {kRef, rtt_kind, kI32, kI32};
|
||||
ValueKindSig sig(1, 3, sig_kinds);
|
||||
LiftoffAssembler::VarState rtt_var =
|
||||
__ cache_state()->stack_state.end()[-1];
|
||||
LiftoffAssembler::VarState length_var =
|
||||
@ -4583,12 +4583,12 @@ class LiftoffCompiler {
|
||||
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
|
||||
Value* result) {
|
||||
ValueKind parent_value_kind = parent.type.kind();
|
||||
ValueKind rtt_value_type = kRttWithDepth;
|
||||
ValueKind rtt_value_kind = kRttWithDepth;
|
||||
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
|
||||
ValueKind sig_reps[] = {rtt_value_type, kI32, parent_value_kind};
|
||||
ValueKindSig sig(1, 2, sig_reps);
|
||||
ValueKind sig_kinds[] = {rtt_value_kind, kI32, parent_value_kind};
|
||||
ValueKindSig sig(1, 2, sig_kinds);
|
||||
LiftoffAssembler::VarState parent_var =
|
||||
__ cache_state()->stack_state.end()[-1];
|
||||
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
|
||||
@ -4599,7 +4599,7 @@ class LiftoffCompiler {
|
||||
DefineSafepoint();
|
||||
// Drop the parent RTT.
|
||||
__ cache_state()->stack_state.pop_back(1);
|
||||
__ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
|
||||
__ PushRegister(rtt_value_kind, LiftoffRegister(kReturnRegister0));
|
||||
}
|
||||
|
||||
enum NullSucceeds : bool { // --
|
||||
@ -4682,8 +4682,8 @@ class LiftoffCompiler {
|
||||
compiler::CallDescriptor* call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>(
|
||||
compilation_zone_);
|
||||
ValueKind sig_reps[] = {kI32, kOptRef, rtt.type.kind()};
|
||||
ValueKindSig sig(1, 2, sig_reps);
|
||||
ValueKind sig_kinds[] = {kI32, kOptRef, rtt.type.kind()};
|
||||
ValueKindSig sig(1, 2, sig_kinds);
|
||||
LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0);
|
||||
LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0);
|
||||
__ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state});
|
||||
@ -5295,8 +5295,8 @@ class LiftoffCompiler {
|
||||
compiler::CallDescriptor* builtin_call_descriptor =
|
||||
GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>(
|
||||
compilation_zone_);
|
||||
ValueKind sig_reps[] = {kOptRef, kOptRef, kOptRef};
|
||||
ValueKindSig builtin_sig(1, 2, sig_reps);
|
||||
ValueKind sig_kinds[] = {kOptRef, kOptRef, kOptRef};
|
||||
ValueKindSig builtin_sig(1, 2, sig_kinds);
|
||||
LiftoffRegister current_instance = instance;
|
||||
__ FillInstanceInto(current_instance.gp());
|
||||
LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
|
||||
@ -5384,7 +5384,7 @@ class LiftoffCompiler {
|
||||
void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
|
||||
int offset, ValueKind kind, bool is_signed,
|
||||
LiftoffRegList pinned) {
|
||||
if (is_reference_type(kind)) {
|
||||
if (is_reference(kind)) {
|
||||
__ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
|
||||
} else {
|
||||
// Primitive kind.
|
||||
@ -5396,7 +5396,7 @@ class LiftoffCompiler {
|
||||
void StoreObjectField(Register obj, Register offset_reg, int offset,
|
||||
LiftoffRegister value, LiftoffRegList pinned,
|
||||
ValueKind kind) {
|
||||
if (is_reference_type(kind)) {
|
||||
if (is_reference(kind)) {
|
||||
__ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
|
||||
} else {
|
||||
// Primitive kind.
|
||||
|
@ -244,12 +244,11 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
|
||||
}
|
||||
|
||||
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
|
||||
return is_reference_type(kind) ? kSystemPointerSize
|
||||
: element_size_bytes(kind);
|
||||
return is_reference(kind) ? kSystemPointerSize : element_size_bytes(kind);
|
||||
}
|
||||
|
||||
bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
|
||||
return is_reference_type(kind);
|
||||
return is_reference(kind);
|
||||
}
|
||||
|
||||
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
|
||||
@ -816,7 +815,7 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
|
||||
if (kind == kI32) {
|
||||
movl(dst, src);
|
||||
} else {
|
||||
DCHECK(kI64 == kind || is_reference_type(kind));
|
||||
DCHECK(kI64 == kind || is_reference(kind));
|
||||
movq(dst, src);
|
||||
}
|
||||
}
|
||||
|
@ -2802,7 +2802,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
|
||||
Value fval = Pop(1);
|
||||
Value tval = Pop(0, fval.type);
|
||||
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
|
||||
if (!VALIDATE(!type.is_reference_type())) {
|
||||
if (!VALIDATE(!type.is_reference())) {
|
||||
this->DecodeError(
|
||||
"select without type is only valid for value type inputs");
|
||||
return 0;
|
||||
@ -3214,8 +3214,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
|
||||
// We are in unreachable code, maintain the polymorphic stack.
|
||||
return 1;
|
||||
}
|
||||
if (!VALIDATE(func_type.is_object_reference_type() &&
|
||||
func_type.has_index() &&
|
||||
if (!VALIDATE(func_type.is_object_reference() && func_type.has_index() &&
|
||||
this->module_->has_signature(func_type.ref_index()))) {
|
||||
PopTypeError(0, func_ref, "function reference");
|
||||
return 0;
|
||||
@ -3237,8 +3236,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
|
||||
// We are in unreachable code, maintain the polymorphic stack.
|
||||
return 1;
|
||||
}
|
||||
if (!VALIDATE(func_type.is_object_reference_type() &&
|
||||
func_type.has_index() &&
|
||||
if (!VALIDATE(func_type.is_object_reference() && func_type.has_index() &&
|
||||
this->module_->has_signature(func_type.ref_index()))) {
|
||||
PopTypeError(0, func_ref, "function reference");
|
||||
return 0;
|
||||
|
@ -973,7 +973,7 @@ class WasmGraphBuildingInterface {
|
||||
const WasmModule* module) {
|
||||
StaticKnowledge result;
|
||||
result.object_can_be_null = object_type.is_nullable();
|
||||
DCHECK(object_type.is_object_reference_type()); // Checked by validation.
|
||||
DCHECK(object_type.is_object_reference()); // Checked by validation.
|
||||
// In the bottom case, the result is irrelevant.
|
||||
result.reference_kind =
|
||||
rtt_type != kWasmBottom && module->has_signature(rtt_type.ref_index())
|
||||
|
@ -1436,7 +1436,7 @@ class ModuleDecoderImpl : public Decoder {
|
||||
for (WasmGlobal& global : module->globals) {
|
||||
if (global.mutability && global.imported) {
|
||||
global.index = num_imported_mutable_globals++;
|
||||
} else if (global.type.is_reference_type()) {
|
||||
} else if (global.type.is_reference()) {
|
||||
global.offset = tagged_offset;
|
||||
// All entries in the tagged_globals_buffer have size 1.
|
||||
tagged_offset++;
|
||||
@ -1892,7 +1892,7 @@ class ModuleDecoderImpl : public Decoder {
|
||||
} else {
|
||||
const byte* position = pc();
|
||||
ValueType result = consume_value_type();
|
||||
if (!result.is_reference_type()) {
|
||||
if (!result.is_reference()) {
|
||||
error(position, "expected reference type");
|
||||
}
|
||||
return result;
|
||||
|
@ -1268,7 +1268,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
|
||||
DCHECK_LT(global.index, module_->num_imported_mutable_globals);
|
||||
Handle<Object> buffer;
|
||||
Address address_or_offset;
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
static_assert(sizeof(global_object->offset()) <= sizeof(Address),
|
||||
"The offset into the globals buffer does not fit into "
|
||||
"the imported_mutable_globals array");
|
||||
@ -1347,7 +1347,7 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
const char* error_message;
|
||||
if (!wasm::TypecheckJSObject(isolate_, module_, value, global.type,
|
||||
&error_message)) {
|
||||
@ -1605,7 +1605,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
|
||||
uint32_t old_offset =
|
||||
module_->globals[global.init.immediate().index].offset;
|
||||
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
DCHECK(enabled_.has_reftypes());
|
||||
tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
|
||||
} else {
|
||||
@ -1758,7 +1758,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
|
||||
if (global.mutability && global.imported) {
|
||||
Handle<FixedArray> buffers_array(
|
||||
instance->imported_mutable_globals_buffers(), isolate_);
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
tagged_buffer = handle(
|
||||
FixedArray::cast(buffers_array->get(global.index)), isolate_);
|
||||
// For externref globals we store the relative offset in the
|
||||
@ -1782,7 +1782,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
|
||||
offset = static_cast<uint32_t>(global_addr - backing_store);
|
||||
}
|
||||
} else {
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
|
||||
} else {
|
||||
untagged_buffer =
|
||||
|
@ -179,12 +179,12 @@ enum ValueKind : uint8_t {
|
||||
#undef DEF_ENUM
|
||||
};
|
||||
|
||||
constexpr bool is_reference_type(ValueKind kind) {
|
||||
constexpr bool is_reference(ValueKind kind) {
|
||||
return kind == kRef || kind == kOptRef || kind == kRtt ||
|
||||
kind == kRttWithDepth;
|
||||
}
|
||||
|
||||
constexpr bool is_object_reference_type(ValueKind kind) {
|
||||
constexpr bool is_object_reference(ValueKind kind) {
|
||||
return kind == kRef || kind == kOptRef;
|
||||
}
|
||||
|
||||
@ -305,12 +305,10 @@ class ValueType {
|
||||
}
|
||||
|
||||
/******************************** Type checks *******************************/
|
||||
constexpr bool is_reference_type() const {
|
||||
return wasm::is_reference_type(kind());
|
||||
}
|
||||
constexpr bool is_reference() const { return wasm::is_reference(kind()); }
|
||||
|
||||
constexpr bool is_object_reference_type() const {
|
||||
return wasm::is_object_reference_type(kind());
|
||||
constexpr bool is_object_reference() const {
|
||||
return wasm::is_object_reference(kind());
|
||||
}
|
||||
|
||||
constexpr bool is_nullable() const { return kind() == kOptRef; }
|
||||
@ -324,7 +322,7 @@ class ValueType {
|
||||
constexpr bool has_depth() const { return kind() == kRttWithDepth; }
|
||||
|
||||
constexpr bool has_index() const {
|
||||
return is_rtt() || (is_object_reference_type() && heap_type().is_index());
|
||||
return is_rtt() || (is_object_reference() && heap_type().is_index());
|
||||
}
|
||||
|
||||
constexpr bool is_defaultable() const { return wasm::is_defaultable(kind()); }
|
||||
@ -340,12 +338,12 @@ class ValueType {
|
||||
/***************************** Field Accessors ******************************/
|
||||
constexpr ValueKind kind() const { return KindField::decode(bit_field_); }
|
||||
constexpr HeapType::Representation heap_representation() const {
|
||||
CONSTEXPR_DCHECK(is_object_reference_type());
|
||||
CONSTEXPR_DCHECK(is_object_reference());
|
||||
return static_cast<HeapType::Representation>(
|
||||
HeapTypeField::decode(bit_field_));
|
||||
}
|
||||
constexpr HeapType heap_type() const {
|
||||
CONSTEXPR_DCHECK(is_object_reference_type());
|
||||
CONSTEXPR_DCHECK(is_object_reference());
|
||||
return HeapType(heap_representation());
|
||||
}
|
||||
constexpr uint8_t depth() const {
|
||||
@ -357,7 +355,7 @@ class ValueType {
|
||||
return HeapTypeField::decode(bit_field_);
|
||||
}
|
||||
constexpr Nullability nullability() const {
|
||||
CONSTEXPR_DCHECK(is_object_reference_type());
|
||||
CONSTEXPR_DCHECK(is_object_reference());
|
||||
return kind() == kOptRef ? kNullable : kNonNullable;
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ double WasmGlobalObject::GetF64() {
|
||||
|
||||
Handle<Object> WasmGlobalObject::GetRef() {
|
||||
// We use this getter for externref and funcref.
|
||||
DCHECK(type().is_reference_type());
|
||||
DCHECK(type().is_reference());
|
||||
return handle(tagged_buffer().get(offset()), GetIsolate());
|
||||
}
|
||||
|
||||
|
@ -1030,7 +1030,7 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
|
||||
global_obj->set_is_mutable(is_mutable);
|
||||
}
|
||||
|
||||
if (type.is_reference_type()) {
|
||||
if (type.is_reference()) {
|
||||
DCHECK(maybe_untagged_buffer.is_null());
|
||||
Handle<FixedArray> tagged_buffer;
|
||||
if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
|
||||
@ -1566,7 +1566,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
|
||||
// static
|
||||
uint8_t* WasmInstanceObject::GetGlobalStorage(
|
||||
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
|
||||
DCHECK(!global.type.is_reference_type());
|
||||
DCHECK(!global.type.is_reference());
|
||||
if (global.mutability && global.imported) {
|
||||
return reinterpret_cast<byte*>(
|
||||
instance->imported_mutable_globals()[global.index]);
|
||||
@ -1579,7 +1579,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage(
|
||||
std::pair<Handle<FixedArray>, uint32_t>
|
||||
WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
|
||||
const wasm::WasmGlobal& global) {
|
||||
DCHECK(global.type.is_reference_type());
|
||||
DCHECK(global.type.is_reference());
|
||||
Isolate* isolate = instance->GetIsolate();
|
||||
if (global.mutability && global.imported) {
|
||||
Handle<FixedArray> buffer(
|
||||
@ -1597,7 +1597,7 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
|
||||
wasm::WasmValue WasmInstanceObject::GetGlobalValue(
|
||||
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
|
||||
Isolate* isolate = instance->GetIsolate();
|
||||
if (global.type.is_reference_type()) {
|
||||
if (global.type.is_reference()) {
|
||||
Handle<FixedArray> global_buffer; // The buffer of the global.
|
||||
uint32_t global_index = 0; // The index into the buffer.
|
||||
std::tie(global_buffer, global_index) =
|
||||
@ -2115,7 +2115,7 @@ namespace wasm {
|
||||
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
|
||||
Handle<Object> value, ValueType expected,
|
||||
const char** error_message) {
|
||||
DCHECK(expected.is_reference_type());
|
||||
DCHECK(expected.is_reference());
|
||||
switch (expected.kind()) {
|
||||
case kOptRef:
|
||||
if (value->IsNull(isolate)) return true;
|
||||
|
@ -303,14 +303,14 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
|
||||
break;
|
||||
}
|
||||
|
||||
DCHECK(subtype.is_object_reference_type());
|
||||
DCHECK(subtype.is_object_reference());
|
||||
|
||||
bool compatible_references = subtype.is_nullable()
|
||||
? supertype.is_nullable()
|
||||
: supertype.is_object_reference_type();
|
||||
: supertype.is_object_reference();
|
||||
if (!compatible_references) return false;
|
||||
|
||||
DCHECK(supertype.is_object_reference_type());
|
||||
DCHECK(supertype.is_object_reference());
|
||||
|
||||
// Now check that sub_heap and super_heap are subtype-related.
|
||||
|
||||
|
@ -293,7 +293,7 @@ class WasmGenerator {
|
||||
kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
|
||||
}
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void br_if(DataRange* data) {
|
||||
// There is always at least the block representing the function body.
|
||||
DCHECK(!blocks_.empty());
|
||||
@ -305,9 +305,9 @@ class WasmGenerator {
|
||||
builder_->EmitWithI32V(
|
||||
kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
|
||||
ConsumeAndGenerate(break_types,
|
||||
wanted_type == kStmt
|
||||
wanted_kind == kStmt
|
||||
? Vector<ValueType>{}
|
||||
: VectorOf({ValueType::Primitive(wanted_type)}),
|
||||
: VectorOf({ValueType::Primitive(wanted_kind)}),
|
||||
data);
|
||||
}
|
||||
|
||||
@ -424,13 +424,13 @@ class WasmGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
template <WasmOpcode memory_op, ValueKind... arg_types>
|
||||
template <WasmOpcode memory_op, ValueKind... arg_kinds>
|
||||
void memop(DataRange* data) {
|
||||
const uint8_t align = data->get<uint8_t>() % (max_alignment(memory_op) + 1);
|
||||
const uint32_t offset = data->get<uint32_t>();
|
||||
|
||||
// Generate the index and the arguments, if any.
|
||||
Generate<kI32, arg_types...>(data);
|
||||
Generate<kI32, arg_kinds...>(data);
|
||||
|
||||
if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(memory_op >> 8))) {
|
||||
DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix);
|
||||
@ -496,14 +496,14 @@ class WasmGenerator {
|
||||
|
||||
enum CallDirect : bool { kCallDirect = true, kCallIndirect = false };
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void call(DataRange* data) {
|
||||
call(data, ValueType::Primitive(wanted_type), kCallDirect);
|
||||
call(data, ValueType::Primitive(wanted_kind), kCallDirect);
|
||||
}
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void call_indirect(DataRange* data) {
|
||||
call(data, ValueType::Primitive(wanted_type), kCallIndirect);
|
||||
call(data, ValueType::Primitive(wanted_kind), kCallIndirect);
|
||||
}
|
||||
|
||||
void Convert(ValueType src, ValueType dst) {
|
||||
@ -545,7 +545,7 @@ class WasmGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
void call(DataRange* data, ValueType wanted_type, CallDirect call_direct) {
|
||||
void call(DataRange* data, ValueType wanted_kind, CallDirect call_direct) {
|
||||
uint8_t random_byte = data->get<uint8_t>();
|
||||
int func_index = random_byte % functions_.size();
|
||||
uint32_t sig_index = functions_[func_index];
|
||||
@ -579,12 +579,12 @@ class WasmGenerator {
|
||||
builder_->EmitByte(0); // Table index.
|
||||
}
|
||||
}
|
||||
if (sig->return_count() == 0 && wanted_type != kWasmStmt) {
|
||||
if (sig->return_count() == 0 && wanted_kind != kWasmStmt) {
|
||||
// The call did not generate a value. Thus just generate it here.
|
||||
Generate(wanted_type, data);
|
||||
Generate(wanted_kind, data);
|
||||
return;
|
||||
}
|
||||
if (wanted_type == kWasmStmt) {
|
||||
if (wanted_kind == kWasmStmt) {
|
||||
// The call did generate values, but we did not want one.
|
||||
for (size_t i = 0; i < sig->return_count(); ++i) {
|
||||
builder_->Emit(kExprDrop);
|
||||
@ -593,7 +593,7 @@ class WasmGenerator {
|
||||
}
|
||||
auto return_types = VectorOf(sig->returns().begin(), sig->return_count());
|
||||
auto wanted_types =
|
||||
VectorOf(&wanted_type, wanted_type == kWasmStmt ? 0 : 1);
|
||||
VectorOf(&wanted_kind, wanted_kind == kWasmStmt ? 0 : 1);
|
||||
ConsumeAndGenerate(return_types, wanted_types, data);
|
||||
}
|
||||
|
||||
@ -616,34 +616,34 @@ class WasmGenerator {
|
||||
return {index, type};
|
||||
}
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void local_op(DataRange* data, WasmOpcode opcode) {
|
||||
Var local = GetRandomLocal(data);
|
||||
// If there are no locals and no parameters, just generate any value (if a
|
||||
// value is needed), or do nothing.
|
||||
if (!local.is_valid()) {
|
||||
if (wanted_type == kStmt) return;
|
||||
return Generate<wanted_type>(data);
|
||||
if (wanted_kind == kStmt) return;
|
||||
return Generate<wanted_kind>(data);
|
||||
}
|
||||
|
||||
if (opcode != kExprLocalGet) Generate(local.type, data);
|
||||
builder_->EmitWithU32V(opcode, local.index);
|
||||
if (wanted_type != kStmt && local.type.kind() != wanted_type) {
|
||||
Convert(local.type, ValueType::Primitive(wanted_type));
|
||||
if (wanted_kind != kStmt && local.type.kind() != wanted_kind) {
|
||||
Convert(local.type, ValueType::Primitive(wanted_kind));
|
||||
}
|
||||
}
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void get_local(DataRange* data) {
|
||||
static_assert(wanted_type != kStmt, "illegal type");
|
||||
local_op<wanted_type>(data, kExprLocalGet);
|
||||
static_assert(wanted_kind != kStmt, "illegal type");
|
||||
local_op<wanted_kind>(data, kExprLocalGet);
|
||||
}
|
||||
|
||||
void set_local(DataRange* data) { local_op<kStmt>(data, kExprLocalSet); }
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void tee_local(DataRange* data) {
|
||||
local_op<wanted_type>(data, kExprLocalTee);
|
||||
local_op<wanted_kind>(data, kExprLocalTee);
|
||||
}
|
||||
|
||||
template <size_t num_bytes>
|
||||
@ -669,39 +669,39 @@ class WasmGenerator {
|
||||
return {index, type};
|
||||
}
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void global_op(DataRange* data) {
|
||||
constexpr bool is_set = wanted_type == kStmt;
|
||||
constexpr bool is_set = wanted_kind == kStmt;
|
||||
Var global = GetRandomGlobal(data, is_set);
|
||||
// If there are no globals, just generate any value (if a value is needed),
|
||||
// or do nothing.
|
||||
if (!global.is_valid()) {
|
||||
if (wanted_type == kStmt) return;
|
||||
return Generate<wanted_type>(data);
|
||||
if (wanted_kind == kStmt) return;
|
||||
return Generate<wanted_kind>(data);
|
||||
}
|
||||
|
||||
if (is_set) Generate(global.type, data);
|
||||
builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet,
|
||||
global.index);
|
||||
if (!is_set && global.type.kind() != wanted_type) {
|
||||
Convert(global.type, ValueType::Primitive(wanted_type));
|
||||
if (!is_set && global.type.kind() != wanted_kind) {
|
||||
Convert(global.type, ValueType::Primitive(wanted_kind));
|
||||
}
|
||||
}
|
||||
|
||||
template <ValueKind wanted_type>
|
||||
template <ValueKind wanted_kind>
|
||||
void get_global(DataRange* data) {
|
||||
static_assert(wanted_type != kStmt, "illegal type");
|
||||
global_op<wanted_type>(data);
|
||||
static_assert(wanted_kind != kStmt, "illegal type");
|
||||
global_op<wanted_kind>(data);
|
||||
}
|
||||
|
||||
template <ValueKind select_type>
|
||||
template <ValueKind select_kind>
|
||||
void select_with_type(DataRange* data) {
|
||||
static_assert(select_type != kStmt, "illegal type for select");
|
||||
Generate<select_type, select_type, kI32>(data);
|
||||
static_assert(select_kind != kStmt, "illegal kind for select");
|
||||
Generate<select_kind, select_kind, kI32>(data);
|
||||
// num_types is always 1.
|
||||
uint8_t num_types = 1;
|
||||
builder_->EmitWithU8U8(kExprSelectWithType, num_types,
|
||||
ValueType::Primitive(select_type).value_type_code());
|
||||
ValueType::Primitive(select_kind).value_type_code());
|
||||
}
|
||||
|
||||
void set_global(DataRange* data) { global_op<kStmt>(data); }
|
||||
|
Loading…
Reference in New Issue
Block a user