[torque] Load and store bitfields
This change implements support for reading and writing bitfields from Torque code, and adds a couple of unit tests for this functionality. As Tobias suggested, the LocationReference for a bitfield access contains a nested LocationReference to where the bitfield struct is stored, so that store operations can read the original value, update part of it, and write it back. Bug: v8:7793 Change-Id: I1004a5c7fcb6cf58df5ad50109b114bf89c80efc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1957841 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Tobias Tebbi <tebbi@chromium.org> Cr-Commit-Position: refs/heads/master@{#65487}
This commit is contained in:
parent
b297fcc50d
commit
d5f180b70c
@ -7437,24 +7437,38 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
|
||||
|
||||
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
|
||||
uint32_t shift, uint32_t mask) {
|
||||
return UncheckedCast<Uint32T>(Word32Shr(
|
||||
Word32And(word32, Int32Constant(mask)), static_cast<int>(shift)));
|
||||
DCHECK_EQ((mask >> shift) << shift, mask);
|
||||
return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
|
||||
Int32Constant(mask >> shift)));
|
||||
}
|
||||
|
||||
TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
|
||||
uint32_t shift, uint32_t mask) {
|
||||
return Unsigned(
|
||||
WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift)));
|
||||
DCHECK_EQ((mask >> shift) << shift, mask);
|
||||
return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
|
||||
IntPtrConstant(mask >> shift)));
|
||||
}
|
||||
|
||||
TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
|
||||
TNode<Uint32T> value,
|
||||
uint32_t shift, uint32_t mask) {
|
||||
DCHECK_EQ((mask >> shift) << shift, mask);
|
||||
// Ensure the {value} fits fully in the mask.
|
||||
CSA_ASSERT(this, Uint32LessThanOrEqual(value, Uint32Constant(mask >> shift)));
|
||||
TNode<Word32T> encoded_value = Word32Shl(value, Int32Constant(shift));
|
||||
TNode<Word32T> inverted_mask = Int32Constant(~mask);
|
||||
return Word32Or(Word32And(word, inverted_mask), encoded_value);
|
||||
}
|
||||
|
||||
TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
|
||||
TNode<WordT> value, uint32_t shift,
|
||||
uint32_t mask) {
|
||||
TNode<UintPtrT> value,
|
||||
uint32_t shift, uint32_t mask) {
|
||||
DCHECK_EQ((mask >> shift) << shift, mask);
|
||||
// Ensure the {value} fits fully in the mask.
|
||||
CSA_ASSERT(this,
|
||||
UintPtrLessThanOrEqual(value, UintPtrConstant(mask >> shift)));
|
||||
TNode<WordT> encoded_value = WordShl(value, static_cast<int>(shift));
|
||||
TNode<IntPtrT> inverted_mask = IntPtrConstant(~static_cast<intptr_t>(mask));
|
||||
// Ensure the {value} fits fully in the mask.
|
||||
CSA_ASSERT(this, WordEqual(WordAnd(encoded_value, inverted_mask),
|
||||
IntPtrConstant(0)));
|
||||
return WordOr(WordAnd(word, inverted_mask), encoded_value);
|
||||
}
|
||||
|
||||
@ -10320,7 +10334,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
|
||||
StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
|
||||
// Should match AllocationSite::Initialize.
|
||||
TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
|
||||
IntPtrConstant(0), IntPtrConstant(GetInitialFastElementsKind()));
|
||||
IntPtrConstant(0), UintPtrConstant(GetInitialFastElementsKind()));
|
||||
StoreObjectFieldNoWriteBarrier(
|
||||
site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
|
||||
SmiTag(Signed(field)));
|
||||
|
@ -2744,14 +2744,39 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
|
||||
// Returns a node that contains the updated values of a |BitField|.
|
||||
template <typename BitField>
|
||||
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<WordT> value) {
|
||||
TNode<Word32T> UpdateWord32(TNode<Word32T> word, TNode<Uint32T> value) {
|
||||
return UpdateWord32(word, value, BitField::kShift, BitField::kMask);
|
||||
}
|
||||
|
||||
// Returns a node that contains the updated values of a |BitField|.
|
||||
template <typename BitField>
|
||||
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value) {
|
||||
return UpdateWord(word, value, BitField::kShift, BitField::kMask);
|
||||
}
|
||||
|
||||
// Returns a node that contains the updated values of a |BitField|.
|
||||
template <typename BitField>
|
||||
TNode<Word32T> UpdateWordInWord32(TNode<Word32T> word,
|
||||
TNode<UintPtrT> value) {
|
||||
return UncheckedCast<Uint32T>(TruncateIntPtrToInt32(
|
||||
Signed(UpdateWord<BitField>(ChangeUint32ToWord(word), value))));
|
||||
}
|
||||
|
||||
// Returns a node that contains the updated values of a |BitField|.
|
||||
template <typename BitField>
|
||||
TNode<WordT> UpdateWord32InWord(TNode<WordT> word, TNode<Uint32T> value) {
|
||||
return UpdateWord<BitField>(word, ChangeUint32ToWord(value));
|
||||
}
|
||||
|
||||
// Returns a node that contains the updated {value} inside {word} starting
|
||||
// at {shift} and fitting in {mask}.
|
||||
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<WordT> value, uint32_t shift,
|
||||
uint32_t mask);
|
||||
TNode<Word32T> UpdateWord32(TNode<Word32T> word, TNode<Uint32T> value,
|
||||
uint32_t shift, uint32_t mask);
|
||||
|
||||
// Returns a node that contains the updated {value} inside {word} starting
|
||||
// at {shift} and fitting in {mask}.
|
||||
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value,
|
||||
uint32_t shift, uint32_t mask);
|
||||
|
||||
// Returns true if any of the |T|'s bits in given |word32| are set.
|
||||
template <typename T>
|
||||
|
@ -30,7 +30,7 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
|
||||
allow_lazy_compilation: bool: 1 bit;
|
||||
needs_home_object: bool: 1 bit;
|
||||
is_asm_wasm_broken: bool: 1 bit;
|
||||
function_map_index: int32: 5 bit;
|
||||
function_map_index: uint32: 5 bit;
|
||||
disabled_optimization_reason: BailoutReason: 4 bit;
|
||||
requires_instance_members_initializer: bool: 1 bit;
|
||||
construct_as_builtin: bool: 1 bit;
|
||||
|
@ -721,6 +721,67 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
|
||||
<< object << ", " << offset << "}, " << value << ");\n";
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::string GetBitFieldSpecialization(const BitFieldStructType* container,
|
||||
const BitField& field) {
|
||||
std::string suffix = field.num_bits == 1 ? "Bit" : "Bits";
|
||||
return "TorqueGenerated" + container->name() +
|
||||
"Fields::" + CamelifyString(field.name_and_type.name) + suffix;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
|
||||
Stack<std::string>* stack) {
|
||||
std::string result_name = FreshNodeName();
|
||||
|
||||
std::string bit_field_struct = stack->Pop();
|
||||
stack->Push(result_name);
|
||||
|
||||
const BitFieldStructType* source_type = instruction.bit_field_struct_type;
|
||||
const Type* result_type = instruction.bit_field.name_and_type.type;
|
||||
bool source_uintptr = source_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
|
||||
bool result_uintptr = result_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
|
||||
std::string source_word_type = source_uintptr ? "WordT" : "Word32T";
|
||||
std::string decoder =
|
||||
source_uintptr
|
||||
? (result_uintptr ? "DecodeWord" : "DecodeWord32FromWord")
|
||||
: (result_uintptr ? "DecodeWordFromWord32" : "DecodeWord32");
|
||||
|
||||
out_ << " " << result_type->GetGeneratedTypeName() << result_name
|
||||
<< " = ca_.UncheckedCast<" << result_type->GetGeneratedTNodeTypeName()
|
||||
<< ">(CodeStubAssembler(state_)." << decoder << "<"
|
||||
<< GetBitFieldSpecialization(source_type, instruction.bit_field)
|
||||
<< ">(ca_.UncheckedCast<" << source_word_type << ">(" << bit_field_struct
|
||||
<< ")));\n";
|
||||
}
|
||||
|
||||
void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
|
||||
Stack<std::string>* stack) {
|
||||
std::string result_name = FreshNodeName();
|
||||
|
||||
std::string value = stack->Pop();
|
||||
std::string bit_field_struct = stack->Pop();
|
||||
stack->Push(result_name);
|
||||
|
||||
const BitFieldStructType* struct_type = instruction.bit_field_struct_type;
|
||||
const Type* field_type = instruction.bit_field.name_and_type.type;
|
||||
bool struct_uintptr = struct_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
|
||||
bool field_uintptr = field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
|
||||
std::string struct_word_type = struct_uintptr ? "WordT" : "Word32T";
|
||||
std::string field_word_type = field_uintptr ? "UintPtrT" : "Uint32T";
|
||||
std::string encoder =
|
||||
struct_uintptr ? (field_uintptr ? "UpdateWord" : "UpdateWord32InWord")
|
||||
: (field_uintptr ? "UpdateWordInWord32" : "UpdateWord32");
|
||||
|
||||
out_ << " " << struct_type->GetGeneratedTypeName() << result_name
|
||||
<< " = ca_.UncheckedCast<" << struct_type->GetGeneratedTNodeTypeName()
|
||||
<< ">(CodeStubAssembler(state_)." << encoder << "<"
|
||||
<< GetBitFieldSpecialization(struct_type, instruction.bit_field)
|
||||
<< ">(ca_.UncheckedCast<" << struct_word_type << ">(" << bit_field_struct
|
||||
<< "), ca_.UncheckedCast<" << field_word_type << ">(" << value
|
||||
<< ")));\n";
|
||||
}
|
||||
|
||||
// static
|
||||
void CSAGenerator::EmitCSAValue(VisitResult result,
|
||||
const Stack<std::string>& values,
|
||||
|
@ -1931,6 +1931,12 @@ LocationReference ImplementationVisitor::GetLocationReference(
|
||||
ProjectStructField(reference.temporary(), fieldname),
|
||||
reference.temporary_description());
|
||||
}
|
||||
if (reference.ReferencedType()->IsBitFieldStructType()) {
|
||||
const BitFieldStructType* bitfield_struct =
|
||||
BitFieldStructType::cast(reference.ReferencedType());
|
||||
const BitField& field = bitfield_struct->LookupField(fieldname);
|
||||
return LocationReference::BitFieldAccess(reference, field);
|
||||
}
|
||||
if (reference.IsHeapReference()) {
|
||||
VisitResult ref = reference.heap_reference();
|
||||
auto generic_type = StructType::MatchUnaryGeneric(
|
||||
@ -2114,6 +2120,14 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
|
||||
assembler().Emit(LoadReferenceInstruction{reference.ReferencedType()});
|
||||
DCHECK_EQ(1, LoweredSlotCount(reference.ReferencedType()));
|
||||
return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
|
||||
} else if (reference.IsBitFieldAccess()) {
|
||||
// First fetch the bitfield struct, then get the bits out of it.
|
||||
VisitResult bit_field_struct =
|
||||
GenerateFetchFromLocation(reference.bit_field_struct_location());
|
||||
assembler().Emit(LoadBitFieldInstruction{
|
||||
BitFieldStructType::cast(bit_field_struct.type()),
|
||||
reference.bit_field()});
|
||||
return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
|
||||
} else {
|
||||
if (reference.IsHeapSlice()) {
|
||||
ReportError(
|
||||
@ -2157,6 +2171,21 @@ void ImplementationVisitor::GenerateAssignToLocation(
|
||||
silenced_float_value.stack_range(), referenced_type);
|
||||
}
|
||||
assembler().Emit(StoreReferenceInstruction{referenced_type});
|
||||
} else if (reference.IsBitFieldAccess()) {
|
||||
// First fetch the bitfield struct, then set the updated bits, then store it
|
||||
// back to where we found it.
|
||||
VisitResult bit_field_struct =
|
||||
GenerateFetchFromLocation(reference.bit_field_struct_location());
|
||||
VisitResult converted_value =
|
||||
GenerateImplicitConvert(reference.ReferencedType(), assignment_value);
|
||||
GenerateCopy(bit_field_struct);
|
||||
GenerateCopy(converted_value);
|
||||
assembler().Emit(StoreBitFieldInstruction{
|
||||
BitFieldStructType::cast(bit_field_struct.type()),
|
||||
reference.bit_field()});
|
||||
GenerateAssignToLocation(
|
||||
reference.bit_field_struct_location(),
|
||||
VisitResult(bit_field_struct.type(), assembler().TopRange(1)));
|
||||
} else {
|
||||
DCHECK(reference.IsTemporary());
|
||||
ReportError("cannot assign to temporary ",
|
||||
|
@ -83,6 +83,13 @@ class LocationReference {
|
||||
result.call_arguments_ = {object};
|
||||
return result;
|
||||
}
|
||||
static LocationReference BitFieldAccess(const LocationReference& object,
|
||||
BitField field) {
|
||||
LocationReference result;
|
||||
result.bit_field_struct_ = std::make_shared<LocationReference>(object);
|
||||
result.bit_field_ = std::move(field);
|
||||
return result;
|
||||
}
|
||||
|
||||
bool IsConst() const { return temporary_.has_value(); }
|
||||
|
||||
@ -106,15 +113,32 @@ class LocationReference {
|
||||
DCHECK(IsHeapSlice());
|
||||
return *heap_slice_;
|
||||
}
|
||||
bool IsBitFieldAccess() const {
|
||||
bool is_bitfield_access = bit_field_struct_ != nullptr;
|
||||
DCHECK_EQ(is_bitfield_access, bit_field_.has_value());
|
||||
return is_bitfield_access;
|
||||
}
|
||||
const LocationReference& bit_field_struct_location() const {
|
||||
DCHECK(IsBitFieldAccess());
|
||||
return *bit_field_struct_;
|
||||
}
|
||||
const BitField& bit_field() const {
|
||||
DCHECK(IsBitFieldAccess());
|
||||
return *bit_field_;
|
||||
}
|
||||
|
||||
const Type* ReferencedType() const {
|
||||
if (IsHeapReference()) {
|
||||
return *Type::MatchUnaryGeneric(heap_reference().type(),
|
||||
TypeOracle::GetReferenceGeneric());
|
||||
} else if (IsHeapSlice()) {
|
||||
}
|
||||
if (IsHeapSlice()) {
|
||||
return *Type::MatchUnaryGeneric(heap_slice().type(),
|
||||
TypeOracle::GetSliceGeneric());
|
||||
}
|
||||
if (IsBitFieldAccess()) {
|
||||
return bit_field_->name_and_type.type;
|
||||
}
|
||||
return GetVisitResult().type();
|
||||
}
|
||||
|
||||
@ -164,6 +188,13 @@ class LocationReference {
|
||||
VisitResultVector call_arguments_;
|
||||
base::Optional<Binding<LocalValue>*> binding_;
|
||||
|
||||
// The location of the bitfield struct that contains this bitfield, if this
|
||||
// reference is a bitfield access. Uses a shared_ptr so that LocationReference
|
||||
// is copyable, allowing us to set this field equal to a copy of a
|
||||
// stack-allocated LocationReference.
|
||||
std::shared_ptr<const LocationReference> bit_field_struct_;
|
||||
base::Optional<BitField> bit_field_;
|
||||
|
||||
LocationReference() = default;
|
||||
};
|
||||
|
||||
|
@ -313,6 +313,19 @@ void StoreReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
|
||||
ExpectSubtype(stack->Pop(), TypeOracle::GetHeapObjectType());
|
||||
}
|
||||
|
||||
void LoadBitFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
|
||||
ControlFlowGraph* cfg) const {
|
||||
ExpectType(bit_field_struct_type, stack->Pop());
|
||||
stack->Push(bit_field.name_and_type.type);
|
||||
}
|
||||
|
||||
void StoreBitFieldInstruction::TypeInstruction(Stack<const Type*>* stack,
|
||||
ControlFlowGraph* cfg) const {
|
||||
ExpectSubtype(bit_field.name_and_type.type, stack->Pop());
|
||||
ExpectType(bit_field_struct_type, stack->Pop());
|
||||
stack->Push(bit_field_struct_type);
|
||||
}
|
||||
|
||||
bool CallRuntimeInstruction::IsBlockTerminator() const {
|
||||
return is_tailcall || runtime_function->signature().return_type ==
|
||||
TypeOracle::GetNeverType();
|
||||
|
@ -33,6 +33,8 @@ class RuntimeFunction;
|
||||
V(CreateFieldReferenceInstruction) \
|
||||
V(LoadReferenceInstruction) \
|
||||
V(StoreReferenceInstruction) \
|
||||
V(LoadBitFieldInstruction) \
|
||||
V(StoreBitFieldInstruction) \
|
||||
V(CallCsaMacroInstruction) \
|
||||
V(CallIntrinsicInstruction) \
|
||||
V(NamespaceConstantInstruction) \
|
||||
@ -227,6 +229,29 @@ struct StoreReferenceInstruction : InstructionBase {
|
||||
const Type* type;
|
||||
};
|
||||
|
||||
// Pops a bitfield struct; pushes a bitfield value extracted from it.
|
||||
struct LoadBitFieldInstruction : InstructionBase {
|
||||
TORQUE_INSTRUCTION_BOILERPLATE()
|
||||
LoadBitFieldInstruction(const BitFieldStructType* bit_field_struct_type,
|
||||
BitField bit_field)
|
||||
: bit_field_struct_type(bit_field_struct_type),
|
||||
bit_field(std::move(bit_field)) {}
|
||||
const BitFieldStructType* bit_field_struct_type;
|
||||
BitField bit_field;
|
||||
};
|
||||
|
||||
// Pops a bitfield value and a bitfield struct; pushes a new bitfield struct
|
||||
// containing the updated value.
|
||||
struct StoreBitFieldInstruction : InstructionBase {
|
||||
TORQUE_INSTRUCTION_BOILERPLATE()
|
||||
StoreBitFieldInstruction(const BitFieldStructType* bit_field_struct_type,
|
||||
BitField bit_field)
|
||||
: bit_field_struct_type(bit_field_struct_type),
|
||||
bit_field(std::move(bit_field)) {}
|
||||
const BitFieldStructType* bit_field_struct_type;
|
||||
BitField bit_field;
|
||||
};
|
||||
|
||||
struct CallIntrinsicInstruction : InstructionBase {
|
||||
TORQUE_INSTRUCTION_BOILERPLATE()
|
||||
CallIntrinsicInstruction(Intrinsic* intrinsic,
|
||||
|
@ -258,6 +258,15 @@ std::string BitFieldStructType::ToExplicitString() const {
|
||||
return "bitfield struct " + name();
|
||||
}
|
||||
|
||||
const BitField& BitFieldStructType::LookupField(const std::string& name) const {
|
||||
for (const BitField& field : fields_) {
|
||||
if (field.name_and_type.name == name) {
|
||||
return field;
|
||||
}
|
||||
}
|
||||
ReportError("Couldn't find bitfield ", name);
|
||||
}
|
||||
|
||||
void AggregateType::CheckForDuplicateFields() const {
|
||||
// Check the aggregate hierarchy and currently defined class for duplicate
|
||||
// field declarations.
|
||||
@ -499,8 +508,7 @@ std::vector<Field> ClassType::ComputeAllFields() const {
|
||||
|
||||
void ClassType::GenerateAccessors() {
|
||||
// For each field, construct AST snippets that implement a CSA accessor
|
||||
// function and define a corresponding '.field' operator. The
|
||||
// implementation iterator will turn the snippets into code.
|
||||
// function. The implementation iterator will turn the snippets into code.
|
||||
for (auto& field : fields_) {
|
||||
if (field.index || field.name_and_type.type == TypeOracle::GetVoidType()) {
|
||||
continue;
|
||||
@ -791,12 +799,11 @@ bool IsAllowedAsBitField(const Type* type) {
|
||||
// compelling use case.
|
||||
return false;
|
||||
}
|
||||
// Any integer-ish type, including bools and enums which inherit from integer
|
||||
// types, are allowed.
|
||||
// Any unsigned integer-ish type, including bools and enums which inherit from
|
||||
// unsigned integer types, are allowed. Currently decoding signed integers is
|
||||
// not supported.
|
||||
return type->IsSubtypeOf(TypeOracle::GetUint32Type()) ||
|
||||
type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) ||
|
||||
type->IsSubtypeOf(TypeOracle::GetInt32Type()) ||
|
||||
type->IsSubtypeOf(TypeOracle::GetIntPtrType()) ||
|
||||
type->IsSubtypeOf(TypeOracle::GetBoolType());
|
||||
}
|
||||
|
||||
|
@ -481,6 +481,8 @@ class V8_EXPORT_PRIVATE BitFieldStructType final : public Type {
|
||||
const std::string& name() const { return decl_->name->value; }
|
||||
const std::vector<BitField>& fields() const { return fields_; }
|
||||
|
||||
const BitField& LookupField(const std::string& name) const;
|
||||
|
||||
private:
|
||||
friend class TypeOracle;
|
||||
BitFieldStructType(Namespace* nspace, const Type* parent,
|
||||
|
@ -631,6 +631,95 @@ TEST(TestBranchOnBoolOptimization) {
|
||||
asm_tester.GenerateCode();
|
||||
}
|
||||
|
||||
TEST(TestBitFieldLoad) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate(CcTest::i_isolate());
|
||||
i::HandleScope scope(isolate);
|
||||
const int kNumParams = 5;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
TestTorqueAssembler m(asm_tester.state());
|
||||
{
|
||||
// Untag all of the parameters to get plain integer values.
|
||||
TNode<Uint8T> val =
|
||||
m.UncheckedCast<Uint8T>(m.Unsigned(m.SmiToInt32(m.Parameter(0))));
|
||||
TNode<BoolT> expected_a =
|
||||
m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(1))));
|
||||
TNode<Uint16T> expected_b =
|
||||
m.UncheckedCast<Uint16T>(m.Unsigned(m.SmiToInt32(m.Parameter(2))));
|
||||
TNode<Uint32T> expected_c =
|
||||
m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter(3))));
|
||||
TNode<BoolT> expected_d =
|
||||
m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(4))));
|
||||
|
||||
// Call the Torque-defined macro, which verifies that reading each bitfield
|
||||
// out of val yields the correct result.
|
||||
m.TestBitFieldLoad(val, expected_a, expected_b, expected_c, expected_d);
|
||||
m.Return(m.UndefinedConstant());
|
||||
}
|
||||
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
|
||||
|
||||
// Test every possible bit combination for this 8-bit value.
|
||||
for (int a = 0; a <= 1; ++a) {
|
||||
for (int b = 0; b <= 7; ++b) {
|
||||
for (int c = 0; c <= 7; ++c) {
|
||||
for (int d = 0; d <= 1; ++d) {
|
||||
int val = a | ((b & 7) << 1) | (c << 4) | (d << 7);
|
||||
ft.Call(ft.Val(val), ft.Val(a), ft.Val(b), ft.Val(c), ft.Val(d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TestBitFieldStore) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate(CcTest::i_isolate());
|
||||
i::HandleScope scope(isolate);
|
||||
const int kNumParams = 1;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
TestTorqueAssembler m(asm_tester.state());
|
||||
{
|
||||
// Untag the parameters to get a plain integer value.
|
||||
TNode<Uint8T> val =
|
||||
m.UncheckedCast<Uint8T>(m.Unsigned(m.SmiToInt32(m.Parameter(0))));
|
||||
|
||||
m.TestBitFieldStore(val);
|
||||
m.Return(m.UndefinedConstant());
|
||||
}
|
||||
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
|
||||
|
||||
// Test every possible bit combination for this 8-bit value.
|
||||
for (int i = 0; i < 256; ++i) {
|
||||
ft.Call(ft.Val(i));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(TestBitFieldUintptrOps) {
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate(CcTest::i_isolate());
|
||||
i::HandleScope scope(isolate);
|
||||
const int kNumParams = 2;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
TestTorqueAssembler m(asm_tester.state());
|
||||
{
|
||||
// Untag the parameters to get a plain integer value.
|
||||
TNode<Uint32T> val2 =
|
||||
m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter(0))));
|
||||
TNode<UintPtrT> val3 = m.UncheckedCast<UintPtrT>(
|
||||
m.ChangeUint32ToWord(m.Unsigned(m.SmiToInt32(m.Parameter(1)))));
|
||||
|
||||
m.TestBitFieldUintptrOps(val2, val3);
|
||||
m.Return(m.UndefinedConstant());
|
||||
}
|
||||
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
|
||||
|
||||
// Construct the expected test values.
|
||||
int val2 = 3 | (61 << 5);
|
||||
int val3 = 1 | (500 << 1) | (0x1cc << 10);
|
||||
|
||||
ft.Call(ft.Val(val2), ft.Val(val3));
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -1028,4 +1028,80 @@ namespace test {
|
||||
}
|
||||
}
|
||||
|
||||
bitfield struct TestBitFieldStruct extends uint8 {
|
||||
a: bool: 1 bit;
|
||||
b: uint16: 3 bit;
|
||||
c: uint32: 3 bit;
|
||||
d: bool: 1 bit;
|
||||
}
|
||||
|
||||
@export
|
||||
macro TestBitFieldLoad(
|
||||
val: TestBitFieldStruct, expectedA: bool, expectedB: uint16,
|
||||
expectedC: uint32, expectedD: bool) {
|
||||
check(val.a == expectedA);
|
||||
check(val.b == expectedB);
|
||||
check(val.c == expectedC);
|
||||
check(val.d == expectedD);
|
||||
}
|
||||
|
||||
@export
|
||||
macro TestBitFieldStore(val: TestBitFieldStruct) {
|
||||
let val: TestBitFieldStruct = val; // Get a mutable local copy.
|
||||
const a: bool = val.a;
|
||||
const b: uint16 = val.b;
|
||||
let c: uint32 = val.c;
|
||||
const d: bool = val.d;
|
||||
|
||||
val.a = !a;
|
||||
TestBitFieldLoad(val, !a, b, c, d);
|
||||
|
||||
c = Unsigned(7 - Signed(val.c));
|
||||
val.c = c;
|
||||
TestBitFieldLoad(val, !a, b, c, d);
|
||||
|
||||
val.d = val.b == val.c;
|
||||
TestBitFieldLoad(val, !a, b, c, b == c);
|
||||
}
|
||||
|
||||
// Some other bitfield structs, to verify getting uintptr values out of word32
|
||||
// structs and vice versa.
|
||||
bitfield struct TestBitFieldStruct2 extends uint32 {
|
||||
a: uintptr: 5 bit;
|
||||
b: uintptr: 6 bit;
|
||||
}
|
||||
bitfield struct TestBitFieldStruct3 extends uintptr {
|
||||
c: bool: 1 bit;
|
||||
d: uint32: 9 bit;
|
||||
e: uintptr: 17 bit;
|
||||
}
|
||||
|
||||
@export
|
||||
macro TestBitFieldUintptrOps(
|
||||
val2: TestBitFieldStruct2, val3: TestBitFieldStruct3) {
|
||||
let val2: TestBitFieldStruct2 = val2; // Get a mutable local copy.
|
||||
let val3: TestBitFieldStruct3 = val3; // Get a mutable local copy.
|
||||
|
||||
// Caller is expected to provide these exact values, so we can verify
|
||||
// reading values before starting to write anything.
|
||||
check(val2.a == 3);
|
||||
check(val2.b == 61);
|
||||
check(val3.c);
|
||||
check(val3.d == 500);
|
||||
check(val3.e == 0x1cc);
|
||||
|
||||
val2.b = 16;
|
||||
check(val2.a == 3);
|
||||
check(val2.b == 16);
|
||||
|
||||
val2.b++;
|
||||
check(val2.a == 3);
|
||||
check(val2.b == 17);
|
||||
|
||||
val3.d = 99;
|
||||
val3.e = 1234;
|
||||
check(val3.c);
|
||||
check(val3.d == 99);
|
||||
check(val3.e == 1234);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user