Merge pull request #10148 from jorgbrown/sync-stage

Integrate from Piper for C++, Java, and Python

* Modernize conformance_cpp.cc.
* Don't request 64-byte alignment unless the toolchain supports it.
This commit is contained in:
Jorg Brown 2022-06-18 19:32:48 -07:00 committed by GitHub
commit cecbb0a70e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1053 additions and 744 deletions

View File

@ -3,6 +3,9 @@ Unreleased version
* Handle reflection for message splitting.
* make metadata fields lazy.
* Extend visibility of plugin library to upb
* Modernize conformance_cpp.cc.
* Don't request 64-byte alignment unless the toolchain supports it.
2022-05-27 version 21.1 (C++/Java/Python/PHP/Objective-C/C#/Ruby)

View File

@ -32,165 +32,176 @@
#include <stdarg.h>
#include <unistd.h>
#include <memory>
#include <string>
#include <utility>
#include <google/protobuf/stubs/logging.h>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/message.h>
#include <google/protobuf/text_format.h>
#include <google/protobuf/util/json_util.h>
#include <google/protobuf/util/type_resolver_util.h>
#include <google/protobuf/stubs/status.h>
#include <google/protobuf/stubs/statusor.h>
#include "conformance.pb.h"
#include "conformance.pb.h"
#include <google/protobuf/test_messages_proto2.pb.h>
#include <google/protobuf/test_messages_proto3.pb.h>
#include <google/protobuf/test_messages_proto3.pb.h>
#include <google/protobuf/util/type_resolver.h>
#include <google/protobuf/stubs/status.h>
#include <google/protobuf/stubs/status_macros.h>
using conformance::ConformanceRequest;
using conformance::ConformanceResponse;
using google::protobuf::Descriptor;
using google::protobuf::DescriptorPool;
using google::protobuf::Message;
using google::protobuf::MessageFactory;
using google::protobuf::TextFormat;
using google::protobuf::util::BinaryToJsonString;
using google::protobuf::util::JsonParseOptions;
using google::protobuf::util::JsonToBinaryString;
using google::protobuf::util::NewTypeResolverForDescriptorPool;
using google::protobuf::util::TypeResolver;
using protobuf_test_messages::proto3::TestAllTypesProto3;
using protobuf_test_messages::proto2::TestAllTypesProto2;
using std::string;
static const char kTypeUrlPrefix[] = "type.googleapis.com";
const char* kFailures[] = {
};
static string GetTypeUrl(const Descriptor* message) {
return string(kTypeUrlPrefix) + "/" + message->full_name();
}
int test_count = 0;
bool verbose = false;
TypeResolver* type_resolver;
string* type_url;
// Must be included last.
#include <google/protobuf/port_def.inc>
namespace google {
namespace protobuf {
namespace {
using ::conformance::ConformanceRequest;
using ::conformance::ConformanceResponse;
using ::google::protobuf::util::BinaryToJsonString;
using ::google::protobuf::util::JsonParseOptions;
using ::google::protobuf::util::JsonToBinaryString;
using ::google::protobuf::util::NewTypeResolverForDescriptorPool;
using ::google::protobuf::util::TypeResolver;
using ::protobuf_test_messages::proto2::TestAllTypesProto2;
using ::protobuf_test_messages::proto3::TestAllTypesProto3;
using util::Status;
bool CheckedRead(int fd, void *buf, size_t len) {
size_t ofs = 0;
util::Status ReadFd(int fd, char* buf, size_t len) {
while (len > 0) {
ssize_t bytes_read = read(fd, (char*)buf + ofs, len);
ssize_t bytes_read = read(fd, buf, len);
if (bytes_read == 0) return false;
if (bytes_read == 0) {
return util::DataLossError("unexpected EOF");
}
if (bytes_read < 0) {
GOOGLE_LOG(FATAL) << "Error reading from test runner: " << strerror(errno);
return util::ErrnoToStatus(errno, "error reading from test runner");
}
len -= bytes_read;
ofs += bytes_read;
buf += bytes_read;
}
return true;
return util::OkStatus();
}
void CheckedWrite(int fd, const void *buf, size_t len) {
util::Status WriteFd(int fd, const void* buf, size_t len) {
if (write(fd, buf, len) != len) {
GOOGLE_LOG(FATAL) << "Error writing to test runner: " << strerror(errno);
return util::ErrnoToStatus(errno, "error reading to test runner");
}
return util::OkStatus();
}
void DoTest(const ConformanceRequest& request, ConformanceResponse* response) {
Message *test_message;
google::protobuf::LinkMessageReflection<TestAllTypesProto2>();
google::protobuf::LinkMessageReflection<TestAllTypesProto3>();
const Descriptor *descriptor = DescriptorPool::generated_pool()->FindMessageTypeByName(
request.message_type());
if (!descriptor) {
GOOGLE_LOG(FATAL) << "No such message type: " << request.message_type();
class Harness {
public:
Harness() {
google::protobuf::LinkMessageReflection<TestAllTypesProto2>();
google::protobuf::LinkMessageReflection<TestAllTypesProto3>();
resolver_.reset(NewTypeResolverForDescriptorPool(
"type.googleapis.com", DescriptorPool::generated_pool()));
type_url_ = StrCat("type.googleapis.com/",
TestAllTypesProto3::GetDescriptor()->full_name());
}
test_message = MessageFactory::generated_factory()->GetPrototype(descriptor)->New();
util::StatusOr<ConformanceResponse> RunTest(
const ConformanceRequest& request);
// Returns Ok(true) if we're done processing requests.
util::StatusOr<bool> ServeConformanceRequest();
private:
bool verbose_ = false;
std::unique_ptr<TypeResolver> resolver_;
std::string type_url_;
};
util::StatusOr<ConformanceResponse> Harness::RunTest(
const ConformanceRequest& request) {
const Descriptor* descriptor =
DescriptorPool::generated_pool()->FindMessageTypeByName(
request.message_type());
if (descriptor == nullptr) {
return util::NotFoundError(
StrCat("No such message type: ", request.message_type()));
}
std::unique_ptr<Message> test_message(
MessageFactory::generated_factory()->GetPrototype(descriptor)->New());
ConformanceResponse response;
switch (request.payload_case()) {
case ConformanceRequest::kProtobufPayload: {
if (!test_message->ParseFromString(request.protobuf_payload())) {
// Getting parse details would involve something like:
// http://stackoverflow.com/questions/22121922/how-can-i-get-more-details-about-errors-generated-during-protobuf-parsing-c
response->set_parse_error("Parse error (no more details available).");
return;
response.set_parse_error("parse error (no more details available)");
return response;
}
break;
}
case ConformanceRequest::kJsonPayload: {
string proto_binary;
JsonParseOptions options;
options.ignore_unknown_fields =
(request.test_category() ==
conformance::JSON_IGNORE_UNKNOWN_PARSING_TEST);
conformance::JSON_IGNORE_UNKNOWN_PARSING_TEST);
std::string proto_binary;
util::Status status =
JsonToBinaryString(type_resolver, *type_url, request.json_payload(),
JsonToBinaryString(resolver_.get(), type_url_, request.json_payload(),
&proto_binary, options);
if (!status.ok()) {
response->set_parse_error(string("Parse error: ") +
std::string(status.message()));
return;
response.set_parse_error(
StrCat("parse error: ", status.message()));
return response;
}
if (!test_message->ParseFromString(proto_binary)) {
response->set_runtime_error(
"Parsing JSON generates invalid proto output.");
return;
response.set_runtime_error(
"parsing JSON generated invalid proto output");
return response;
}
break;
}
case ConformanceRequest::kTextPayload: {
if (!TextFormat::ParseFromString(request.text_payload(), test_message)) {
response->set_parse_error("Parse error");
return;
if (!TextFormat::ParseFromString(request.text_payload(),
test_message.get())) {
response.set_parse_error("parse error (no more details available)");
return response;
}
break;
}
case ConformanceRequest::PAYLOAD_NOT_SET:
GOOGLE_LOG(FATAL) << "Request didn't have payload.";
break;
return util::InvalidArgumentError("request didn't have payload");
default:
GOOGLE_LOG(FATAL) << "unknown payload type: " << request.payload_case();
break;
}
conformance::FailureSet failures;
if (descriptor == failures.GetDescriptor()) {
for (const char* s : kFailures) failures.add_failure(s);
test_message = &failures;
return util::InvalidArgumentError(
StrCat("unknown payload type", request.payload_case()));
}
switch (request.requested_output_format()) {
case conformance::UNSPECIFIED:
GOOGLE_LOG(FATAL) << "Unspecified output format";
break;
return util::InvalidArgumentError("unspecified output format");
case conformance::PROTOBUF: {
GOOGLE_CHECK(test_message->SerializeToString(
response->mutable_protobuf_payload()));
GOOGLE_CHECK(
test_message->SerializeToString(response.mutable_protobuf_payload()));
break;
}
case conformance::JSON: {
string proto_binary;
std::string proto_binary;
GOOGLE_CHECK(test_message->SerializeToString(&proto_binary));
util::Status status =
BinaryToJsonString(type_resolver, *type_url, proto_binary,
response->mutable_json_payload());
BinaryToJsonString(resolver_.get(), type_url_, proto_binary,
response.mutable_json_payload());
if (!status.ok()) {
response->set_serialize_error(
string("Failed to serialize JSON output: ") +
std::string(status.message()));
return;
response.set_serialize_error(StrCat(
"failed to serialize JSON output: ", status.message()));
}
break;
}
@ -199,70 +210,66 @@ void DoTest(const ConformanceRequest& request, ConformanceResponse* response) {
TextFormat::Printer printer;
printer.SetHideUnknownFields(!request.print_unknown_fields());
GOOGLE_CHECK(printer.PrintToString(*test_message,
response->mutable_text_payload()));
response.mutable_text_payload()));
break;
}
default:
GOOGLE_LOG(FATAL) << "Unknown output format: "
<< request.requested_output_format();
return util::InvalidArgumentError(StrCat(
"unknown output format", request.requested_output_format()));
}
return response;
}
bool DoTestIo() {
string serialized_input;
string serialized_output;
util::StatusOr<bool> Harness::ServeConformanceRequest() {
uint32_t in_len;
if (!ReadFd(STDIN_FILENO, reinterpret_cast<char*>(&in_len), sizeof(in_len))
.ok()) {
// EOF means we're done.
return true;
}
std::string serialized_input;
serialized_input.resize(in_len);
RETURN_IF_ERROR(ReadFd(STDIN_FILENO, serialized_input.data(), in_len));
ConformanceRequest request;
ConformanceResponse response;
uint32_t bytes;
GOOGLE_CHECK(request.ParseFromString(serialized_input));
if (!CheckedRead(STDIN_FILENO, &bytes, sizeof(uint32_t))) {
// EOF.
return false;
util::StatusOr<ConformanceResponse> response = RunTest(request);
RETURN_IF_ERROR(response.status());
std::string serialized_output;
response->SerializeToString(&serialized_output);
uint32_t out_len = static_cast<uint32_t>(serialized_output.size());
RETURN_IF_ERROR(WriteFd(STDOUT_FILENO, &out_len, sizeof(out_len)));
RETURN_IF_ERROR(WriteFd(STDOUT_FILENO, serialized_output.data(), out_len));
if (verbose_) {
GOOGLE_LOG(INFO) << "conformance-cpp: request=" << request.ShortDebugString()
<< ", response=" << response->ShortDebugString();
}
serialized_input.resize(bytes);
if (!CheckedRead(STDIN_FILENO, (char*)serialized_input.c_str(), bytes)) {
GOOGLE_LOG(ERROR) << "Unexpected EOF on stdin. " << strerror(errno);
}
if (!request.ParseFromString(serialized_input)) {
GOOGLE_LOG(FATAL) << "Parse of ConformanceRequest proto failed.";
return false;
}
DoTest(request, &response);
response.SerializeToString(&serialized_output);
bytes = serialized_output.size();
CheckedWrite(STDOUT_FILENO, &bytes, sizeof(uint32_t));
CheckedWrite(STDOUT_FILENO, serialized_output.c_str(), bytes);
if (verbose) {
fprintf(stderr, "conformance-cpp: request=%s, response=%s\n",
request.ShortDebugString().c_str(),
response.ShortDebugString().c_str());
}
test_count++;
return true;
return false;
}
} // namespace
} // namespace protobuf
} // namespace google
int main() {
type_resolver = NewTypeResolverForDescriptorPool(
kTypeUrlPrefix, DescriptorPool::generated_pool());
type_url = new string(GetTypeUrl(TestAllTypesProto3::descriptor()));
while (1) {
if (!google::protobuf::DoTestIo()) {
fprintf(stderr, "conformance-cpp: received EOF from test runner "
"after %d tests, exiting\n", test_count);
return 0;
google::protobuf::Harness harness;
int total_runs = 0;
while (true) {
auto is_done = harness.ServeConformanceRequest();
if (!is_done.ok()) {
GOOGLE_LOG(FATAL) << is_done.status();
}
if (*is_done) {
break;
}
total_runs++;
}
GOOGLE_LOG(INFO) << "conformance-cpp: received EOF from test runner after "
<< total_runs << " tests";
}

View File

@ -463,15 +463,15 @@ public class DescriptorsTest {
/** Tests that parsing an unknown enum throws an exception */
@Test
public void testParseUnknownEnum() {
FieldDescriptorProto.Builder field = FieldDescriptorProto.newBuilder()
.setLabel(FieldDescriptorProto.Label.LABEL_OPTIONAL)
.setTypeName("UnknownEnum")
.setType(FieldDescriptorProto.Type.TYPE_ENUM)
.setName("bar")
.setNumber(1);
DescriptorProto.Builder messageType = DescriptorProto.newBuilder()
.setName("Foo")
.addField(field);
FieldDescriptorProto.Builder field =
FieldDescriptorProto.newBuilder()
.setLabel(FieldDescriptorProto.Label.LABEL_OPTIONAL)
.setTypeName("UnknownEnum")
.setType(FieldDescriptorProto.Type.TYPE_ENUM)
.setName("bar")
.setNumber(1);
DescriptorProto.Builder messageType =
DescriptorProto.newBuilder().setName("Foo").addField(field);
FileDescriptorProto fooProto =
FileDescriptorProto.newBuilder()
.setName("foo.proto")
@ -486,7 +486,6 @@ public class DescriptorsTest {
}
}
/**
* Tests the translate/crosslink for an example where a message field's name and type name are the
* same.

View File

@ -1943,7 +1943,7 @@ public class GeneratedMessageTest {
@Test
public void
extendableBuilder_extensionFieldContainingBuilder_setRepeatedFieldOverwritesElement() {
extendableBuilder_extensionFieldContainingBuilder_setRepeatedFieldOverwritesElement() {
TestAllExtensions.Builder builder = TestAllExtensions.newBuilder();
builder.addRepeatedField(REPEATED_NESTED_MESSAGE_EXTENSION, NestedMessage.getDefaultInstance());
// Calling getRepeatedFieldBuilder and ignoring the returned Builder should have no

View File

@ -429,6 +429,8 @@ libprotoc_la_SOURCES = \
google/protobuf/compiler/java/message_field_lite.h \
google/protobuf/compiler/java/message_lite.cc \
google/protobuf/compiler/java/message_lite.h \
google/protobuf/compiler/java/message_serialization.cc \
google/protobuf/compiler/java/message_serialization.h \
google/protobuf/compiler/java/name_resolver.cc \
google/protobuf/compiler/java/name_resolver.h \
google/protobuf/compiler/java/options.h \

View File

@ -369,6 +369,7 @@ set(libprotoc_srcs
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_field.cc
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_field_lite.cc
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_lite.cc
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_serialization.cc
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/name_resolver.cc
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/primitive_field.cc
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/primitive_field_lite.cc
@ -461,6 +462,7 @@ set(libprotoc_hdrs
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_field.h
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_field_lite.h
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_lite.h
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/message_serialization.h
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/name_resolver.h
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/names.h
${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/java/options.h

View File

@ -55,6 +55,13 @@ namespace google {
namespace protobuf {
namespace internal {
// To prevent sharing cache lines between threads
#ifdef __cpp_aligned_new
enum { kCacheAlignment = 64 };
#else
enum { kCacheAlignment = alignof(max_align_t) }; // do the best we can
#endif
inline constexpr size_t AlignUpTo8(size_t n) {
// Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
return (n + 7) & static_cast<size_t>(-8);
@ -497,10 +504,10 @@ class PROTOBUF_EXPORT ThreadSafeArena {
// have fallback function calls in tail position. This substantially improves
// code for the happy path.
PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) {
SerialArena* a;
SerialArena* arena;
if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() &&
GetSerialArenaFromThreadCache(&a))) {
return a->MaybeAllocateAligned(n, out);
GetSerialArenaFromThreadCache(&arena))) {
return arena->MaybeAllocateAligned(n, out);
}
return false;
}
@ -564,7 +571,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
// fast path optimizes the case where a single thread uses multiple arenas.
ThreadCache* tc = &thread_cache();
SerialArena* serial = hint_.load(std::memory_order_acquire);
if (PROTOBUF_PREDICT_TRUE(serial != NULL && serial->owner() == tc)) {
if (PROTOBUF_PREDICT_TRUE(serial != nullptr && serial->owner() == tc)) {
*arena = serial;
return true;
}
@ -602,7 +609,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
#ifdef _MSC_VER
#pragma warning(disable : 4324)
#endif
struct alignas(64) ThreadCache {
struct alignas(kCacheAlignment) ThreadCache {
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
// If we are using the ThreadLocalStorage class to store the ThreadCache,
// then the ThreadCache's default constructor has to be responsible for
@ -610,7 +617,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
ThreadCache()
: next_lifecycle_id(0),
last_lifecycle_id_seen(-1),
last_serial_arena(NULL) {}
last_serial_arena(nullptr) {}
#endif
// Number of per-thread lifecycle IDs to reserve. Must be power of two.
@ -633,7 +640,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
#ifdef _MSC_VER
#pragma warning(disable : 4324)
#endif
struct alignas(64) CacheAlignedLifecycleIdGenerator {
struct alignas(kCacheAlignment) CacheAlignedLifecycleIdGenerator {
std::atomic<LifecycleIdAtomic> id;
};
static CacheAlignedLifecycleIdGenerator lifecycle_id_generator_;

View File

@ -200,7 +200,7 @@ std::string* ArenaStringPtr::Release() {
if (IsDefault()) return nullptr;
std::string* released = tagged_ptr_.Get();
if (!tagged_ptr_.IsAllocated()) {
if (tagged_ptr_.IsArena()) {
released = tagged_ptr_.IsMutable() ? new std::string(std::move(*released))
: new std::string(*released);
}
@ -229,9 +229,7 @@ void ArenaStringPtr::SetAllocated(std::string* value, Arena* arena) {
}
void ArenaStringPtr::Destroy() {
if (tagged_ptr_.IsAllocated()) {
delete tagged_ptr_.Get();
}
delete tagged_ptr_.GetIfAllocated();
}
void ArenaStringPtr::ClearToEmpty() {

View File

@ -96,13 +96,12 @@ class PROTOBUF_EXPORT LazyString {
class TaggedStringPtr {
public:
// Bit flags qualifying string properties. We can use up to 3 bits as
// ptr_ is guaranteed and enforced to be aligned on 8 byte boundaries.
// Bit flags qualifying string properties. We can use 2 bits as
// ptr_ is guaranteed and enforced to be aligned on 4 byte boundaries.
enum Flags {
kArenaBit = 0x1, // ptr is arena allocated
kAllocatedBit = 0x2, // ptr is heap allocated
kMutableBit = 0x4, // ptr contents are fully mutable
kMask = 0x7 // Bit mask
kMutableBit = 0x2, // ptr contents are fully mutable
kMask = 0x3 // Bit mask
};
// Composed logical types
@ -112,7 +111,7 @@ class TaggedStringPtr {
// Allocated strings are mutable and (as the name implies) owned.
// A heap allocated string must be deleted.
kAllocated = kAllocatedBit | kMutableBit,
kAllocated = kMutableBit,
// Mutable arena strings are strings where the string instance is owned
// by the arena, but the string contents itself are owned by the string
@ -166,8 +165,16 @@ class TaggedStringPtr {
// Returns true if the current string is an immutable default value.
inline bool IsDefault() const { return (as_int() & kMask) == kDefault; }
// Returns true if the current string is a heap allocated mutable value.
inline bool IsAllocated() const { return as_int() & kAllocatedBit; }
// If the current string is a heap-allocated mutable value, returns a pointer
// to it. Returns nullptr otherwise.
inline std::string *GetIfAllocated() const {
auto allocated = as_int() ^ kAllocated;
if (allocated & kMask) return nullptr;
auto ptr = reinterpret_cast<std::string*>(allocated);
PROTOBUF_ASSUME(ptr != nullptr);
return ptr;
}
// Returns true if the current string is an arena allocated value.
// This means it's either a mutable or fixed size arena string.
@ -224,8 +231,8 @@ static_assert(std::is_trivial<TaggedStringPtr>::value,
// Because ArenaStringPtr is used in oneof unions, its constructor is a NOP and
// the field is always manually initialized via method calls.
//
// See TaggedPtr for more information about the types of string values being
// held, and the mutable and ownership invariants for each type.
// See TaggedStringPtr for more information about the types of string values
// being held, and the mutable and ownership invariants for each type.
struct PROTOBUF_EXPORT ArenaStringPtr {
ArenaStringPtr() = default;
constexpr ArenaStringPtr(ExplicitlyConstructedArenaString* default_value,

View File

@ -61,12 +61,13 @@ PROTOBUF_THREAD_LOCAL absl::profiling_internal::ExponentialBiased
} // namespace
PROTOBUF_THREAD_LOCAL int64_t global_next_sample = 1LL << 10;
PROTOBUF_THREAD_LOCAL SamplingState global_sampling_state = {
.next_sample = int64_t{1} << 10, .sample_stride = int64_t{1} << 10};
ThreadSafeArenaStats::ThreadSafeArenaStats() { PrepareForSampling(); }
ThreadSafeArenaStats::ThreadSafeArenaStats() { PrepareForSampling(0); }
ThreadSafeArenaStats::~ThreadSafeArenaStats() = default;
void ThreadSafeArenaStats::PrepareForSampling() {
void ThreadSafeArenaStats::PrepareForSampling(int64_t stride) {
num_allocations.store(0, std::memory_order_relaxed);
num_resets.store(0, std::memory_order_relaxed);
bytes_requested.store(0, std::memory_order_relaxed);
@ -74,6 +75,7 @@ void ThreadSafeArenaStats::PrepareForSampling() {
bytes_wasted.store(0, std::memory_order_relaxed);
max_bytes_allocated.store(0, std::memory_order_relaxed);
thread_ids.store(0, std::memory_order_relaxed);
weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
@ -105,12 +107,15 @@ void RecordAllocateSlow(ThreadSafeArenaStats* info, size_t requested,
info->thread_ids.fetch_or(tid, std::memory_order_relaxed);
}
ThreadSafeArenaStats* SampleSlow(int64_t* next_sample) {
bool first = *next_sample < 0;
*next_sample = g_exponential_biased_generator.GetStride(
ThreadSafeArenaStats* SampleSlow(SamplingState& sampling_state) {
bool first = sampling_state.next_sample < 0;
const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_arenaz_sample_parameter.load(std::memory_order_relaxed));
// Small values of interval are equivalent to just sampling next time.
ABSL_ASSERT(*next_sample >= 1);
ABSL_ASSERT(next_stride >= 1);
sampling_state.next_sample = next_stride;
const int64_t old_stride =
absl::exchange(sampling_state.sample_stride, next_stride);
// g_arenaz_enabled can be dynamically flipped, we need to set a threshold low
// enough that we will start sampling in a reasonable time, so we just use the
@ -119,11 +124,11 @@ ThreadSafeArenaStats* SampleSlow(int64_t* next_sample) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
if (PROTOBUF_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
return SampleSlow(next_sample);
if (PROTOBUF_PREDICT_TRUE(--sampling_state.next_sample > 0)) return nullptr;
return SampleSlow(sampling_state);
}
return GlobalThreadSafeArenazSampler().Register();
return GlobalThreadSafeArenazSampler().Register(old_stride);
}
void SetThreadSafeArenazEnabled(bool enabled) {
@ -150,7 +155,8 @@ void SetThreadSafeArenazMaxSamples(int32_t max) {
void SetThreadSafeArenazGlobalNextSample(int64_t next_sample) {
if (next_sample >= 0) {
global_next_sample = next_sample;
global_sampling_state.next_sample = next_sample;
global_sampling_state.sample_stride = next_sample;
} else {
ABSL_RAW_LOG(ERROR, "Invalid thread safe arenaz next sample: %lld",
static_cast<long long>(next_sample)); // NOLINT(runtime/int)

View File

@ -58,8 +58,11 @@ struct ThreadSafeArenaStats
~ThreadSafeArenaStats();
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// blocking for any readers that are currently sampling the object. The
// 'stride' parameter is the number of ThreadSafeArenas that were instantiated
// between this sample and the previous one.
void PrepareForSampling(int64_t stride)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
@ -91,7 +94,18 @@ struct ThreadSafeArenaStats
}
};
ThreadSafeArenaStats* SampleSlow(int64_t* next_sample);
struct SamplingState {
// Number of ThreadSafeArenas that should be instantiated before the next
// ThreadSafeArena is sampled. This variable is decremented with each
// instantiation.
int64_t next_sample;
// When we make a sampling decision, we record that distance between from the
// previous sample so we can weight each sample. 'distance' here is the
// number of instantiations of ThreadSafeArena.
int64_t sample_stride;
};
ThreadSafeArenaStats* SampleSlow(SamplingState& sampling_state);
void UnsampleSlow(ThreadSafeArenaStats* info);
class ThreadSafeArenaStatsHandle {
@ -138,24 +152,27 @@ class ThreadSafeArenaStatsHandle {
using ThreadSafeArenazSampler =
::absl::profiling_internal::SampleRecorder<ThreadSafeArenaStats>;
extern PROTOBUF_THREAD_LOCAL int64_t global_next_sample;
extern PROTOBUF_THREAD_LOCAL SamplingState global_sampling_state;
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
inline ThreadSafeArenaStatsHandle Sample() {
if (PROTOBUF_PREDICT_TRUE(--global_next_sample > 0)) {
if (PROTOBUF_PREDICT_TRUE(--global_sampling_state.next_sample > 0)) {
return ThreadSafeArenaStatsHandle(nullptr);
}
return ThreadSafeArenaStatsHandle(SampleSlow(&global_next_sample));
return ThreadSafeArenaStatsHandle(SampleSlow(global_sampling_state));
}
#else
using SamplingState = int64_t;
struct ThreadSafeArenaStats {
static void RecordAllocateStats(ThreadSafeArenaStats*, size_t /*requested*/,
size_t /*allocated*/, size_t /*wasted*/) {}
};
ThreadSafeArenaStats* SampleSlow(int64_t* next_sample);
ThreadSafeArenaStats* SampleSlow(SamplingState& next_sample);
void UnsampleSlow(ThreadSafeArenaStats* info);
class ThreadSafeArenaStatsHandle {

View File

@ -64,8 +64,9 @@ std::vector<size_t> GetBytesAllocated(ThreadSafeArenazSampler* s) {
return res;
}
ThreadSafeArenaStats* Register(ThreadSafeArenazSampler* s, size_t size) {
auto* info = s->Register();
ThreadSafeArenaStats* Register(ThreadSafeArenazSampler* s, size_t size,
int64_t stride) {
auto* info = s->Register(stride);
assert(info != nullptr);
info->bytes_allocated.store(size);
return info;
@ -79,8 +80,9 @@ namespace {
TEST(ThreadSafeArenaStatsTest, PrepareForSampling) {
ThreadSafeArenaStats info;
constexpr int64_t kTestStride = 107;
MutexLock l(&info.init_mu);
info.PrepareForSampling();
info.PrepareForSampling(kTestStride);
EXPECT_EQ(info.num_allocations.load(), 0);
EXPECT_EQ(info.num_resets.load(), 0);
@ -88,6 +90,7 @@ TEST(ThreadSafeArenaStatsTest, PrepareForSampling) {
EXPECT_EQ(info.bytes_allocated.load(), 0);
EXPECT_EQ(info.bytes_wasted.load(), 0);
EXPECT_EQ(info.max_bytes_allocated.load(), 0);
EXPECT_EQ(info.weight, kTestStride);
info.num_allocations.store(1, std::memory_order_relaxed);
info.num_resets.store(1, std::memory_order_relaxed);
@ -96,19 +99,21 @@ TEST(ThreadSafeArenaStatsTest, PrepareForSampling) {
info.bytes_wasted.store(1, std::memory_order_relaxed);
info.max_bytes_allocated.store(1, std::memory_order_relaxed);
info.PrepareForSampling();
info.PrepareForSampling(2 * kTestStride);
EXPECT_EQ(info.num_allocations.load(), 0);
EXPECT_EQ(info.num_resets.load(), 0);
EXPECT_EQ(info.bytes_requested.load(), 0);
EXPECT_EQ(info.bytes_allocated.load(), 0);
EXPECT_EQ(info.bytes_wasted.load(), 0);
EXPECT_EQ(info.max_bytes_allocated.load(), 0);
EXPECT_EQ(info.weight, 2 * kTestStride);
}
TEST(ThreadSafeArenaStatsTest, RecordAllocateSlow) {
ThreadSafeArenaStats info;
constexpr int64_t kTestStride = 458;
MutexLock l(&info.init_mu);
info.PrepareForSampling();
info.PrepareForSampling(kTestStride);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128, /*wasted=*/0);
EXPECT_EQ(info.num_allocations.load(), 1);
EXPECT_EQ(info.num_resets.load(), 0);
@ -128,8 +133,9 @@ TEST(ThreadSafeArenaStatsTest, RecordAllocateSlow) {
TEST(ThreadSafeArenaStatsTest, RecordResetSlow) {
ThreadSafeArenaStats info;
constexpr int64_t kTestStride = 584;
MutexLock l(&info.init_mu);
info.PrepareForSampling();
info.PrepareForSampling(kTestStride);
EXPECT_EQ(info.num_resets.load(), 0);
EXPECT_EQ(info.bytes_allocated.load(), 0);
RecordAllocateSlow(&info, /*requested=*/100, /*allocated=*/128, /*wasted=*/0);
@ -143,11 +149,12 @@ TEST(ThreadSafeArenaStatsTest, RecordResetSlow) {
TEST(ThreadSafeArenazSamplerTest, SmallSampleParameter) {
SetThreadSafeArenazEnabled(true);
SetThreadSafeArenazSampleParameter(100);
constexpr int64_t kTestStride = 0;
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
ThreadSafeArenaStats* sample = SampleSlow(&next_sample);
EXPECT_GT(next_sample, 0);
SamplingState sampling_state = {kTestStride, kTestStride};
ThreadSafeArenaStats* sample = SampleSlow(sampling_state);
EXPECT_GT(sampling_state.next_sample, 0);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@ -156,11 +163,12 @@ TEST(ThreadSafeArenazSamplerTest, SmallSampleParameter) {
TEST(ThreadSafeArenazSamplerTest, LargeSampleParameter) {
SetThreadSafeArenazEnabled(true);
SetThreadSafeArenazSampleParameter(std::numeric_limits<int32_t>::max());
constexpr int64_t kTestStride = 0;
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
ThreadSafeArenaStats* sample = SampleSlow(&next_sample);
EXPECT_GT(next_sample, 0);
SamplingState sampling_state = {kTestStride, kTestStride};
ThreadSafeArenaStats* sample = SampleSlow(sampling_state);
EXPECT_GT(sampling_state.next_sample, 0);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@ -187,7 +195,8 @@ TEST(ThreadSafeArenazSamplerTest, Sample) {
TEST(ThreadSafeArenazSamplerTest, Handle) {
auto& sampler = GlobalThreadSafeArenazSampler();
ThreadSafeArenaStatsHandle h(sampler.Register());
constexpr int64_t kTestStride = 17;
ThreadSafeArenaStatsHandle h(sampler.Register(kTestStride));
auto* info = ThreadSafeArenaStatsHandlePeer::GetInfo(&h);
info->bytes_allocated.store(0x12345678, std::memory_order_relaxed);
@ -195,6 +204,7 @@ TEST(ThreadSafeArenazSamplerTest, Handle) {
sampler.Iterate([&](const ThreadSafeArenaStats& h) {
if (&h == info) {
EXPECT_EQ(h.bytes_allocated.load(), 0x12345678);
EXPECT_EQ(h.weight, kTestStride);
found = true;
}
});
@ -216,10 +226,11 @@ TEST(ThreadSafeArenazSamplerTest, Handle) {
TEST(ThreadSafeArenazSamplerTest, Registration) {
ThreadSafeArenazSampler sampler;
auto* info1 = Register(&sampler, 1);
constexpr int64_t kTestStride = 100;
auto* info1 = Register(&sampler, 1, kTestStride);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1));
auto* info2 = Register(&sampler, 2);
auto* info2 = Register(&sampler, 2, kTestStride);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(1, 2));
info1->bytes_allocated.store(3);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(3, 2));
@ -231,16 +242,17 @@ TEST(ThreadSafeArenazSamplerTest, Registration) {
TEST(ThreadSafeArenazSamplerTest, Unregistration) {
ThreadSafeArenazSampler sampler;
std::vector<ThreadSafeArenaStats*> infos;
constexpr int64_t kTestStride = 200;
for (size_t i = 0; i < 3; ++i) {
infos.push_back(Register(&sampler, i));
infos.push_back(Register(&sampler, i, kTestStride));
}
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 1, 2));
sampler.Unregister(infos[1]);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 2));
infos.push_back(Register(&sampler, 3));
infos.push_back(Register(&sampler, 4));
infos.push_back(Register(&sampler, 3, kTestStride));
infos.push_back(Register(&sampler, 4, kTestStride));
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 2, 3, 4));
sampler.Unregister(infos[3]);
EXPECT_THAT(GetBytesAllocated(&sampler), UnorderedElementsAre(0, 2, 4));
@ -257,18 +269,19 @@ TEST(ThreadSafeArenazSamplerTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
pool.Schedule([&sampler, &stop]() {
const int64_t sampling_stride = 11 + i % 3;
pool.Schedule([&sampler, &stop, sampling_stride]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<ThreadSafeArenaStats*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register());
infoz.push_back(sampler.Register(sampling_stride));
}
switch (std::uniform_int_distribution<>(0, 1)(gen)) {
case 0: {
infoz.push_back(sampler.Register());
infoz.push_back(sampler.Register(sampling_stride));
break;
}
case 1: {
@ -277,6 +290,7 @@ TEST(ThreadSafeArenazSamplerTest, MultiThreaded) {
ThreadSafeArenaStats* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
EXPECT_EQ(info->weight, sampling_stride);
sampler.Unregister(info);
break;
}
@ -292,9 +306,10 @@ TEST(ThreadSafeArenazSamplerTest, MultiThreaded) {
TEST(ThreadSafeArenazSamplerTest, Callback) {
ThreadSafeArenazSampler sampler;
constexpr int64_t kTestStride = 203;
auto* info1 = Register(&sampler, 1);
auto* info2 = Register(&sampler, 2);
auto* info1 = Register(&sampler, 1, kTestStride);
auto* info2 = Register(&sampler, 2, kTestStride);
static const ThreadSafeArenaStats* expected;

View File

@ -252,7 +252,9 @@ void RepeatedEnumFieldGenerator::GeneratePrivateMembers(
format("::$proto_ns$::RepeatedField<int> $name$_;\n");
if (descriptor_->is_packed() &&
HasGeneratedMethods(descriptor_->file(), options_)) {
format("mutable std::atomic<int> $cached_byte_size_name$;\n");
format(
"mutable ::$proto_ns$::internal::CachedSize "
"$cached_byte_size_name$;\n");
}
}
@ -364,7 +366,7 @@ void RepeatedEnumFieldGenerator::GenerateSerializeWithCachedSizesToArray(
format(
"{\n"
" int byte_size = "
"$cached_byte_size_field$.load(std::memory_order_relaxed);\n"
"$cached_byte_size_field$.Get();\n"
" if (byte_size > 0) {\n"
" target = stream->WriteEnumPacked(\n"
" $number$, $field$, byte_size, target);\n"
@ -402,8 +404,7 @@ void RepeatedEnumFieldGenerator::GenerateByteSize(io::Printer* printer) const {
"::_pbi::WireFormatLite::Int32Size(static_cast<$int32$>(data_size));\n"
"}\n"
"int cached_size = ::_pbi::ToCachedSize(data_size);\n"
"$cached_byte_size_field$.store(cached_size,\n"
" std::memory_order_relaxed);\n"
"$cached_byte_size_field$.Set(cached_size);\n"
"total_size += data_size;\n");
} else {
format("total_size += ($tag_size$UL * count) + data_size;\n");

View File

@ -2101,6 +2101,11 @@ void MessageGenerator::GenerateClassMethods(io::Printer* printer) {
"static constexpr int32_t kHasBitsOffset =\n"
" 8 * PROTOBUF_FIELD_OFFSET($classname$, _impl_._has_bits_);\n");
}
if (descriptor_->real_oneof_decl_count() > 0) {
format(
"static constexpr int32_t kOneofCaseOffset =\n"
" PROTOBUF_FIELD_OFFSET($classtype$, $oneof_case$);\n");
}
for (auto field : FieldRange(descriptor_)) {
field_generators_.get(field).GenerateInternalAccessorDeclarations(printer);
if (IsFieldStripped(field, options_)) {

View File

@ -292,23 +292,11 @@ TailCallTableInfo::TailCallTableInfo(
const std::vector<int>& has_bit_indices,
const std::vector<int>& inlined_string_indices,
MessageSCCAnalyzer* scc_analyzer) {
int oneof_count = descriptor->real_oneof_decl_count();
// If this message has any oneof fields, store the case offset in the first
// auxiliary entry.
if (oneof_count > 0) {
GOOGLE_LOG_IF(DFATAL, ordered_fields.empty())
<< "Invalid message: " << descriptor->full_name() << " has "
<< oneof_count << " oneof declarations, but no fields";
aux_entries.push_back(StrCat("_fl::Offset{offsetof(",
ClassName(descriptor),
", _impl_._oneof_case_)}"));
}
// If this message has any inlined string fields, store the donation state
// offset in the second auxiliary entry.
if (!inlined_string_indices.empty()) {
aux_entries.resize(2); // pad if necessary
aux_entries[1] =
aux_entries.resize(1); // pad if necessary
aux_entries[0] =
StrCat("_fl::Offset{offsetof(", ClassName(descriptor),
", _impl_._inlined_string_donated_)}");
}
@ -1102,7 +1090,7 @@ void ParseFunctionGenerator::GenerateFieldEntries(Formatter& format) {
FieldMemberName(field, /*cold=*/false));
}
if (oneof) {
format("$1$, ", oneof->index());
format("_Internal::kOneofCaseOffset + $1$, ", 4 * oneof->index());
} else if (num_hasbits_ > 0 || IsMapEntryMessage(descriptor_)) {
if (entry.hasbit_idx >= 0) {
format("_Internal::kHasBitsOffset + $1$, ", entry.hasbit_idx);

View File

@ -328,7 +328,9 @@ void RepeatedPrimitiveFieldGenerator::GeneratePrivateMembers(
format("::$proto_ns$::RepeatedField< $type$ > $name$_;\n");
if (descriptor_->is_packed() && FixedSize(descriptor_->type()) == -1 &&
HasGeneratedMethods(descriptor_->file(), options_)) {
format("mutable std::atomic<int> $cached_byte_size_name$;\n");
format(
"mutable ::$proto_ns$::internal::CachedSize "
"$cached_byte_size_name$;\n");
}
}
@ -433,7 +435,7 @@ void RepeatedPrimitiveFieldGenerator::GenerateSerializeWithCachedSizesToArray(
format(
"{\n"
" int byte_size = "
"$cached_byte_size_field$.load(std::memory_order_relaxed);\n"
"$cached_byte_size_field$.Get();\n"
" if (byte_size > 0) {\n"
" target = stream->Write$declared_type$Packed(\n"
" $number$, _internal_$name$(), byte_size, target);\n"
@ -484,8 +486,7 @@ void RepeatedPrimitiveFieldGenerator::GenerateByteSize(
if (FixedSize(descriptor_->type()) == -1) {
format(
"int cached_size = ::_pbi::ToCachedSize(data_size);\n"
"$cached_byte_size_field$.store(cached_size,\n"
" std::memory_order_relaxed);\n");
"$cached_byte_size_field$.Set(cached_size);\n");
}
format("total_size += data_size;\n");
} else {

View File

@ -31,6 +31,7 @@ cc_library(
"message_field.cc",
"message_field_lite.cc",
"message_lite.cc",
"message_serialization.cc",
"name_resolver.cc",
"primitive_field.cc",
"primitive_field_lite.cc",
@ -62,6 +63,7 @@ cc_library(
"message_field.h",
"message_field_lite.h",
"message_lite.h",
"message_serialization.h",
"name_resolver.h",
"names.h",
"options.h",

View File

@ -53,6 +53,7 @@
#include <google/protobuf/compiler/java/helpers.h>
#include <google/protobuf/compiler/java/message_builder.h>
#include <google/protobuf/compiler/java/message_builder_lite.h>
#include <google/protobuf/compiler/java/message_serialization.h>
#include <google/protobuf/compiler/java/name_resolver.h>
#include <google/protobuf/descriptor.pb.h>
@ -587,13 +588,6 @@ void ImmutableMessageGenerator::GenerateMessageSerializationMethods(
std::unique_ptr<const FieldDescriptor*[]> sorted_fields(
SortFieldsByNumber(descriptor_));
std::vector<const Descriptor::ExtensionRange*> sorted_extensions;
sorted_extensions.reserve(descriptor_->extension_range_count());
for (int i = 0; i < descriptor_->extension_range_count(); ++i) {
sorted_extensions.push_back(descriptor_->extension_range(i));
}
std::sort(sorted_extensions.begin(), sorted_extensions.end(),
ExtensionRangeOrdering());
printer->Print(
"@java.lang.Override\n"
"public void writeTo(com.google.protobuf.CodedOutputStream output)\n"
@ -628,19 +622,8 @@ void ImmutableMessageGenerator::GenerateMessageSerializationMethods(
}
}
// Merge the fields and the extension ranges, both sorted by field number.
for (int i = 0, j = 0;
i < descriptor_->field_count() || j < sorted_extensions.size();) {
if (i == descriptor_->field_count()) {
GenerateSerializeOneExtensionRange(printer, sorted_extensions[j++]);
} else if (j == sorted_extensions.size()) {
GenerateSerializeOneField(printer, sorted_fields[i++]);
} else if (sorted_fields[i]->number() < sorted_extensions[j]->start) {
GenerateSerializeOneField(printer, sorted_fields[i++]);
} else {
GenerateSerializeOneExtensionRange(printer, sorted_extensions[j++]);
}
}
GenerateSerializeFieldsAndExtensions(printer, field_generators_, descriptor_,
sorted_fields.get());
if (descriptor_->options().message_set_wire_format()) {
printer->Print("unknownFields.writeAsMessageSetTo(output);\n");
@ -770,17 +753,6 @@ void ImmutableMessageGenerator::GenerateParseFromMethods(io::Printer* printer) {
GeneratedCodeVersionSuffix());
}
void ImmutableMessageGenerator::GenerateSerializeOneField(
io::Printer* printer, const FieldDescriptor* field) {
field_generators_.get(field).GenerateSerializationCode(printer);
}
void ImmutableMessageGenerator::GenerateSerializeOneExtensionRange(
io::Printer* printer, const Descriptor::ExtensionRange* range) {
printer->Print("extensionWriter.writeUntil($end$, output);\n", "end",
StrCat(range->end));
}
// ===================================================================
void ImmutableMessageGenerator::GenerateBuilder(io::Printer* printer) {
@ -1017,8 +989,8 @@ void ImmutableMessageGenerator::GenerateEqualsAndHashCode(
" return true;\n"
"}\n"
"if (!(obj instanceof $classname$)) {\n"
// don't simply return false because mutable and immutable types
// can be equal
// don't simply return false because mutable and immutable types
// can be equal
" return super.equals(obj);\n"
"}\n"
"$classname$ other = ($classname$) obj;\n"

View File

@ -123,10 +123,6 @@ class ImmutableMessageGenerator : public MessageGenerator {
void GenerateMessageSerializationMethods(io::Printer* printer);
void GenerateParseFromMethods(io::Printer* printer);
void GenerateSerializeOneField(io::Printer* printer,
const FieldDescriptor* field);
void GenerateSerializeOneExtensionRange(
io::Printer* printer, const Descriptor::ExtensionRange* range);
void GenerateBuilder(io::Printer* printer);
void GenerateIsInitialized(io::Printer* printer);

View File

@ -0,0 +1,50 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <google/protobuf/compiler/java/message_serialization.h>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/descriptor.h>
namespace google {
namespace protobuf {
namespace compiler {
namespace java {
void GenerateSerializeExtensionRange(io::Printer* printer,
const Descriptor::ExtensionRange* range) {
printer->Print("extensionWriter.writeUntil($end$, output);\n", "end",
StrCat(range->end));
}
} // namespace java
} // namespace compiler
} // namespace protobuf
} // namespace google

View File

@ -0,0 +1,91 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_MESSAGE_SERIALIZATION_H__
#define GOOGLE_PROTOBUF_COMPILER_JAVA_MESSAGE_SERIALIZATION_H__
#include <algorithm>
#include <vector>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/descriptor.h>
#include <google/protobuf/compiler/java/field.h>
#include <google/protobuf/compiler/java/helpers.h>
namespace google {
namespace protobuf {
namespace compiler {
namespace java {
// Generates code to serialize a single extension range.
void GenerateSerializeExtensionRange(io::Printer* printer,
const Descriptor::ExtensionRange* range);
// Generates code to serialize all fields and extension ranges for the specified
// message descriptor, sorting serialization calls in increasing order by field
// number.
//
// Templatized to support different field generator implementations.
template <typename FieldGenerator>
void GenerateSerializeFieldsAndExtensions(
io::Printer* printer,
const FieldGeneratorMap<FieldGenerator>& field_generators,
const Descriptor* descriptor, const FieldDescriptor** sorted_fields) {
std::vector<const Descriptor::ExtensionRange*> sorted_extensions;
sorted_extensions.reserve(descriptor->extension_range_count());
for (int i = 0; i < descriptor->extension_range_count(); ++i) {
sorted_extensions.push_back(descriptor->extension_range(i));
}
std::sort(sorted_extensions.begin(), sorted_extensions.end(),
ExtensionRangeOrdering());
// Merge the fields and the extension ranges, both sorted by field number.
for (int i = 0, j = 0;
i < descriptor->field_count() || j < sorted_extensions.size();) {
if (i == descriptor->field_count()) {
GenerateSerializeExtensionRange(printer, sorted_extensions[j++]);
} else if (j == sorted_extensions.size()) {
field_generators.get(sorted_fields[i++])
.GenerateSerializationCode(printer);
} else if (sorted_fields[i]->number() < sorted_extensions[j]->start) {
field_generators.get(sorted_fields[i++])
.GenerateSerializationCode(printer);
} else {
GenerateSerializeExtensionRange(printer, sorted_extensions[j++]);
}
}
}
} // namespace java
} // namespace compiler
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_COMPILER_JAVA_MESSAGE_SERIALIZATION_H__

View File

@ -10437,7 +10437,7 @@ uint8_t* SourceCodeInfo_Location::_InternalSerialize(
// repeated int32 path = 1 [packed = true];
{
int byte_size = _impl_._path_cached_byte_size_.load(std::memory_order_relaxed);
int byte_size = _impl_._path_cached_byte_size_.Get();
if (byte_size > 0) {
target = stream->WriteInt32Packed(
1, _internal_path(), byte_size, target);
@ -10446,7 +10446,7 @@ uint8_t* SourceCodeInfo_Location::_InternalSerialize(
// repeated int32 span = 2 [packed = true];
{
int byte_size = _impl_._span_cached_byte_size_.load(std::memory_order_relaxed);
int byte_size = _impl_._span_cached_byte_size_.Get();
if (byte_size > 0) {
target = stream->WriteInt32Packed(
2, _internal_span(), byte_size, target);
@ -10509,8 +10509,7 @@ size_t SourceCodeInfo_Location::ByteSizeLong() const {
::_pbi::WireFormatLite::Int32Size(static_cast<int32_t>(data_size));
}
int cached_size = ::_pbi::ToCachedSize(data_size);
_impl_._path_cached_byte_size_.store(cached_size,
std::memory_order_relaxed);
_impl_._path_cached_byte_size_.Set(cached_size);
total_size += data_size;
}
@ -10523,8 +10522,7 @@ size_t SourceCodeInfo_Location::ByteSizeLong() const {
::_pbi::WireFormatLite::Int32Size(static_cast<int32_t>(data_size));
}
int cached_size = ::_pbi::ToCachedSize(data_size);
_impl_._span_cached_byte_size_.store(cached_size,
std::memory_order_relaxed);
_impl_._span_cached_byte_size_.Set(cached_size);
total_size += data_size;
}
@ -10996,7 +10994,7 @@ uint8_t* GeneratedCodeInfo_Annotation::_InternalSerialize(
// repeated int32 path = 1 [packed = true];
{
int byte_size = _impl_._path_cached_byte_size_.load(std::memory_order_relaxed);
int byte_size = _impl_._path_cached_byte_size_.Get();
if (byte_size > 0) {
target = stream->WriteInt32Packed(
1, _internal_path(), byte_size, target);
@ -11051,8 +11049,7 @@ size_t GeneratedCodeInfo_Annotation::ByteSizeLong() const {
::_pbi::WireFormatLite::Int32Size(static_cast<int32_t>(data_size));
}
int cached_size = ::_pbi::ToCachedSize(data_size);
_impl_._path_cached_byte_size_.store(cached_size,
std::memory_order_relaxed);
_impl_._path_cached_byte_size_.Set(cached_size);
total_size += data_size;
}

View File

@ -7962,9 +7962,9 @@ class PROTOBUF_EXPORT SourceCodeInfo_Location final :
::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t > path_;
mutable std::atomic<int> _path_cached_byte_size_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _path_cached_byte_size_;
::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t > span_;
mutable std::atomic<int> _span_cached_byte_size_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _span_cached_byte_size_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> leading_detached_comments_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr leading_comments_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr trailing_comments_;
@ -8350,7 +8350,7 @@ class PROTOBUF_EXPORT GeneratedCodeInfo_Annotation final :
::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
::PROTOBUF_NAMESPACE_ID::RepeatedField< int32_t > path_;
mutable std::atomic<int> _path_cached_byte_size_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _path_cached_byte_size_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr source_file_;
int32_t begin_;
int32_t end_;

View File

@ -454,30 +454,36 @@ inline PROTOBUF_ALWAYS_INLINE const char* TcParser::RepeatedParseMessageAuxImpl(
if (PROTOBUF_PREDICT_FALSE(data.coded_tag<TagType>() != 0)) {
PROTOBUF_MUSTTAIL return MiniParse(PROTOBUF_TC_PARAM_PASS);
}
auto saved_tag = UnalignedLoad<TagType>(ptr);
ptr += sizeof(TagType);
SyncHasbits(msg, hasbits, table);
auto* aux = table->field_aux(data.aux_idx());
if (aux_is_table) {
auto* inner_table = aux->table;
auto& field = RefAt<RepeatedPtrFieldBase>(msg, data.offset());
const auto expected_tag = UnalignedLoad<TagType>(ptr);
const auto aux = *table->field_aux(data.aux_idx());
auto& field = RefAt<RepeatedPtrFieldBase>(msg, data.offset());
do {
ptr += sizeof(TagType);
MessageLite* submsg = field.Add<GenericTypeHandler<MessageLite>>(
inner_table->default_instance);
if (group_coding) {
return ctx->ParseGroup<TcParser>(submsg, ptr, FastDecodeTag(saved_tag),
inner_table);
aux_is_table ? aux.table->default_instance : aux.message_default());
if (aux_is_table) {
if (group_coding) {
ptr = ctx->ParseGroup<TcParser>(submsg, ptr,
FastDecodeTag(expected_tag), aux.table);
} else {
ptr = ctx->ParseMessage<TcParser>(submsg, ptr, aux.table);
}
} else {
if (group_coding) {
ptr = ctx->ParseGroup(submsg, ptr, FastDecodeTag(expected_tag));
} else {
ptr = ctx->ParseMessage(submsg, ptr);
}
}
return ctx->ParseMessage<TcParser>(submsg, ptr, inner_table);
} else {
const MessageLite* default_instance = aux->message_default();
auto& field = RefAt<RepeatedPtrFieldBase>(msg, data.offset());
MessageLite* submsg =
field.Add<GenericTypeHandler<MessageLite>>(default_instance);
if (group_coding) {
return ctx->ParseGroup(submsg, ptr, FastDecodeTag(saved_tag));
if (PROTOBUF_PREDICT_FALSE(ptr == nullptr)) {
PROTOBUF_MUSTTAIL return Error(PROTOBUF_TC_PARAM_PASS);
}
return ctx->ParseMessage(submsg, ptr);
}
if (PROTOBUF_PREDICT_FALSE(!ctx->DataAvailable(ptr))) {
PROTOBUF_MUSTTAIL return ToParseLoop(PROTOBUF_TC_PARAM_PASS);
}
} while (UnalignedLoad<TagType>(ptr) == expected_tag);
PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS);
}
const char* TcParser::FastMdR1(PROTOBUF_TC_PARAM_DECL) {
@ -1351,11 +1357,8 @@ bool TcParser::ChangeOneof(const TcParseTableBase* table,
const TcParseTableBase::FieldEntry& entry,
uint32_t field_num, ParseContext* ctx,
MessageLite* msg) {
// The _oneof_case_ array offset is stored in the first aux entry.
uint32_t oneof_case_offset = table->field_aux(0u)->offset;
// The _oneof_case_ array index is stored in the has-bit index.
uint32_t* oneof_case =
&TcParser::RefAt<uint32_t>(msg, oneof_case_offset) + entry.has_idx;
// The _oneof_case_ value offset is stored in the has-bit index.
uint32_t* oneof_case = &TcParser::RefAt<uint32_t>(msg, entry.has_idx);
uint32_t current_case = *oneof_case;
*oneof_case = field_num;

View File

@ -185,13 +185,58 @@ T* GetOwnedMessage(Arena* message_arena, T* submessage,
// Hide atomic from the public header and allow easy change to regular int
// on platforms where the atomic might have a perf impact.
//
// CachedSize is like std::atomic<int> but with some important changes:
//
// 1) CachedSize uses Get / Set rather than load / store.
// 2) CachedSize always uses relaxed ordering.
// 3) CachedSize is assignable and copy-constructible.
// 4) CachedSize has a constexpr default constructor, and a constexpr
// constructor that takes an int argument.
// 5) If the compiler supports the __atomic_load_n / __atomic_store_n builtins,
// then CachedSize is trivially copyable.
//
// Developed at https://godbolt.org/z/vYcx7zYs1 ; supports gcc, clang, MSVC.
class PROTOBUF_EXPORT CachedSize {
private:
using Scalar = int;
public:
int Get() const { return size_.load(std::memory_order_relaxed); }
void Set(int size) { size_.store(size, std::memory_order_relaxed); }
constexpr CachedSize() noexcept : atom_(Scalar{}) {}
// NOLINTNEXTLINE(google-explicit-constructor)
constexpr CachedSize(Scalar desired) noexcept : atom_(desired) {}
#if PROTOBUF_BUILTIN_ATOMIC
constexpr CachedSize(const CachedSize& other) = default;
Scalar Get() const noexcept {
return __atomic_load_n(&atom_, __ATOMIC_RELAXED);
}
void Set(Scalar desired) noexcept {
__atomic_store_n(&atom_, desired, __ATOMIC_RELAXED);
}
#else
CachedSize(const CachedSize& other) noexcept : atom_(other.Get()) {}
CachedSize& operator=(const CachedSize& other) noexcept {
Set(other.Get());
return *this;
}
Scalar Get() const noexcept { //
return atom_.load(std::memory_order_relaxed);
}
void Set(Scalar desired) noexcept {
atom_.store(desired, std::memory_order_relaxed);
}
#endif
private:
std::atomic<int> size_{0};
#if PROTOBUF_BUILTIN_ATOMIC
Scalar atom_;
#else
std::atomic<Scalar> atom_;
#endif
};
PROTOBUF_EXPORT void DestroyMessage(const void* message);

View File

@ -139,6 +139,11 @@
#define PROTOBUF_BUILTIN_BSWAP64(x) __builtin_bswap64(x)
#endif
// Portable check for gcc-style atomic built-ins
#if __has_builtin(__atomic_load_n)
#define PROTOBUF_BUILTIN_ATOMIC 1
#endif
// Portable check for GCC minimum version:
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
#if defined(__GNUC__) && defined(__GNUC_MINOR__) \

View File

@ -38,6 +38,7 @@
#undef PROTOBUF_BUILTIN_BSWAP16
#undef PROTOBUF_BUILTIN_BSWAP32
#undef PROTOBUF_BUILTIN_BSWAP64
#undef PROTOBUF_BUILTIN_ATOMIC
#undef PROTOBUF_GNUC_MIN
#undef PROTOBUF_MSC_VER_MIN
#undef PROTOBUF_CPLUSPLUS_MIN

View File

@ -419,6 +419,8 @@ void Struct::InternalSwap(Struct* other) {
class Value::_Internal {
public:
static constexpr int32_t kOneofCaseOffset =
PROTOBUF_FIELD_OFFSET(::PROTOBUF_NAMESPACE_ID::Value, _impl_._oneof_case_);
static const ::PROTOBUF_NAMESPACE_ID::Struct& struct_value(const Value* msg);
static const ::PROTOBUF_NAMESPACE_ID::ListValue& list_value(const Value* msg);
};

View File

@ -28,9 +28,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <google/protobuf/stubs/status.h>
#include <stdio.h>
#include <string.h>
#include <ostream>
#include <stdio.h>
#include <string>
#include <utility>
@ -256,6 +257,12 @@ Status UnknownError(StringPiece message) {
return Status(StatusCode::kUnknown, message);
}
Status ErrnoToStatus(int error_number, StringPiece message) {
// We will take an Abseil dependency soon, so no reason to do anything
// elaborate here.
return InternalError(StrCat(message, ": ", strerror(error_number)));
}
} // namespace status_internal
} // namespace util
} // namespace protobuf

View File

@ -31,10 +31,12 @@
#ifndef GOOGLE_PROTOBUF_STUBS_STATUS_H_
#define GOOGLE_PROTOBUF_STUBS_STATUS_H_
#include <google/protobuf/stubs/stringpiece.h>
#include <google/protobuf/stubs/strutil.h>
#include <string>
#include <google/protobuf/stubs/stringpiece.h>
// Must be included last.
#include <google/protobuf/port_def.inc>
namespace google {
@ -147,6 +149,8 @@ PROTOBUF_EXPORT Status UnavailableError(StringPiece message);
PROTOBUF_EXPORT Status UnimplementedError(StringPiece message);
PROTOBUF_EXPORT Status UnknownError(StringPiece message);
PROTOBUF_EXPORT Status ErrnoToStatus(int error_number, StringPiece message);
} // namespace status_internal
using ::google::protobuf::util::status_internal::Status;
@ -187,6 +191,8 @@ using ::google::protobuf::util::status_internal::UnavailableError;
using ::google::protobuf::util::status_internal::UnimplementedError;
using ::google::protobuf::util::status_internal::UnknownError;
using ::google::protobuf::util::status_internal::ErrnoToStatus;
} // namespace util
} // namespace protobuf
} // namespace google

View File

@ -73,7 +73,6 @@ ProtoWriter::ProtoWriter(TypeResolver* type_resolver,
element_(nullptr),
size_insert_(),
output_(output),
buffer_(),
adapter_(&buffer_),
stream_(new CodedOutputStream(&adapter_)),
listener_(listener),
@ -95,7 +94,6 @@ ProtoWriter::ProtoWriter(const TypeInfo* typeinfo,
element_(nullptr),
size_insert_(),
output_(output),
buffer_(),
adapter_(&buffer_),
stream_(new CodedOutputStream(&adapter_)),
listener_(listener),

View File

@ -84,7 +84,7 @@ util::Status GetStatus(const util::StatusOr<T>& s) {
}
MATCHER_P(StatusIs, status,
StrCat(".status() is ", testing::PrintToString(status))) {
StrCat(".status() is ", testing::PrintToString(status))) {
return GetStatus(arg).code() == status;
}
@ -412,6 +412,110 @@ TEST(JsonUtilTest, TestDynamicMessage) {
EXPECT_EQ(*message_json, *generated_json);
}
TEST(JsonUtilTest, TestParsingAny) {
StringPiece input = R"json(
{
"value": {
"@type": "type.googleapis.com/proto3.TestMessage",
"int32_value": 5,
"string_value": "expected_value",
"message_value": {"value": 1}
}
}
)json";
TestAny m;
ASSERT_OK(FromJson(input, &m));
TestMessage t;
EXPECT_TRUE(m.value().UnpackTo(&t));
EXPECT_EQ(t.int32_value(), 5);
EXPECT_EQ(t.string_value(), "expected_value");
EXPECT_EQ(t.message_value().value(), 1);
EXPECT_THAT(
ToJson(m),
IsOkAndHolds(
R"json({"value":{"@type":"type.googleapis.com/proto3.TestMessage","int32Value":5,"stringValue":"expected_value","messageValue":{"value":1}}})json"));
}
TEST(JsonUtilTest, TestParsingAnyMiddleAtType) {
StringPiece input = R"json(
{
"value": {
"int32_value": 5,
"string_value": "expected_value",
"@type": "type.googleapis.com/proto3.TestMessage",
"message_value": {"value": 1}
}
}
)json";
TestAny m;
ASSERT_OK(FromJson(input, &m));
TestMessage t;
EXPECT_TRUE(m.value().UnpackTo(&t));
EXPECT_EQ(t.int32_value(), 5);
EXPECT_EQ(t.string_value(), "expected_value");
EXPECT_EQ(t.message_value().value(), 1);
}
TEST(JsonUtilTest, TestParsingAnyEndAtType) {
StringPiece input = R"json(
{
"value": {
"int32_value": 5,
"string_value": "expected_value",
"message_value": {"value": 1},
"@type": "type.googleapis.com/proto3.TestMessage"
}
}
)json";
TestAny m;
ASSERT_OK(FromJson(input, &m));
TestMessage t;
EXPECT_TRUE(m.value().UnpackTo(&t));
EXPECT_EQ(t.int32_value(), 5);
EXPECT_EQ(t.string_value(), "expected_value");
EXPECT_EQ(t.message_value().value(), 1);
}
TEST(JsonUtilTest, TestParsingNestedAnys) {
StringPiece input = R"json(
{
"value": {
"value": {
"int32_value": 5,
"string_value": "expected_value",
"message_value": {"value": 1},
"@type": "type.googleapis.com/proto3.TestMessage"
},
"@type": "type.googleapis.com/google.protobuf.Any"
}
}
)json";
TestAny m;
ASSERT_OK(FromJson(input, &m));
google::protobuf::Any inner;
EXPECT_TRUE(m.value().UnpackTo(&inner));
TestMessage t;
EXPECT_TRUE(inner.UnpackTo(&t));
EXPECT_EQ(t.int32_value(), 5);
EXPECT_EQ(t.string_value(), "expected_value");
EXPECT_EQ(t.message_value().value(), 1);
EXPECT_THAT(
ToJson(m),
IsOkAndHolds(
R"json({"value":{"@type":"type.googleapis.com/google.protobuf.Any","value":{"@type":"type.googleapis.com/proto3.TestMessage","int32Value":5,"stringValue":"expected_value","messageValue":{"value":1}}}})json"));
}
TEST(JsonUtilTest, TestParsingUnknownAnyFields) {
StringPiece input = R"json(
{
@ -672,9 +776,9 @@ TEST(JsonUtilTest, TestWrongJsonInput) {
auto* resolver = NewTypeResolverForDescriptorPool(
"type.googleapis.com", DescriptorPool::generated_pool());
EXPECT_THAT(JsonToBinaryStream(resolver, message_type, &input_stream,
&output_stream),
StatusIs(util::StatusCode::kInvalidArgument));
EXPECT_THAT(
JsonToBinaryStream(resolver, message_type, &input_stream, &output_stream),
StatusIs(util::StatusCode::kInvalidArgument));
delete resolver;
}